add new 0.5.0 content

This commit is contained in:
Jeremy Francis Reizenstein 2021-08-10 08:17:14 -07:00
parent 3ccabda50d
commit a4725d2421
45 changed files with 5739 additions and 43 deletions

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Data</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Data</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/io">File IO</a></li><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Data</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Data</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/io">File IO</a></li><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Renderer</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Renderer</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/io">File IO</a></li><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Renderer</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Renderer</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/io">File IO</a></li><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Ops</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Ops</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/io">File IO</a></li><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Ops</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Ops</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/io">File IO</a></li><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Data</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Data</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/io">File IO</a></li><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Data</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Data</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/io">File IO</a></li><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,63 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container mainContainer docsContainer"><div class="wrapper"><div class="post"><header class="postHeader"></header><article><div><span><h1><a class="anchor" aria-hidden="true" id="file-io"></a><a href="#file-io" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"></path></svg></a>File IO</h1>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Data</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/docs/io">File IO</a></li><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {
var links = coll[i].nextElementSibling.getElementsByTagName('*');
if (checkActiveCategory){
for (var j = 0; j < links.length; j++) {
if (links[j].classList.contains('navListItemActive')){
coll[i].nextElementSibling.classList.toggle('hide');
coll[i].childNodes[1].classList.toggle('rotate');
checkActiveCategory = false;
break;
}
}
}
coll[i].addEventListener('click', function() {
var arrow = this.childNodes[1];
arrow.classList.toggle('rotate');
var content = this.nextElementSibling;
content.classList.toggle('hide');
});
}
document.addEventListener('DOMContentLoaded', function() {
createToggler('#navToggler', '#docsNav', 'docsSliderActive');
createToggler('#tocToggler', 'body', 'tocActive');
var headings = document.querySelector('.toc-headings');
headings && headings.addEventListener('click', function(event) {
var el = event.target;
while(el !== headings){
if (el.tagName === 'A') {
document.body.classList.remove('tocActive');
break;
} else{
el = el.parentNode;
}
}
}, false);
function createToggler(togglerSelector, targetSelector, className) {
var toggler = document.querySelector(togglerSelector);
var target = document.querySelector(targetSelector);
if (!toggler) {
return;
}
toggler.onclick = function(event) {
event.preventDefault();
target.classList.toggle(className);
};
}
});
</script></nav></div><div class="container mainContainer docsContainer"><div class="wrapper"><div class="post"><header class="postHeader"></header><article><div><span><h1><a class="anchor" aria-hidden="true" id="file-io"></a><a href="#file-io" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"></path></svg></a>File IO</h1>
<p>There is a flexible interface for loading and saving point clouds and meshes from different formats.</p>
<p>The main usage is via the <code>pytorch3d.io.IO</code> object, and its methods
<code>load_mesh</code>, <code>save_mesh</code>, <code>load_point_cloud</code> and <code>save_point_cloud</code>.</p>
@ -27,4 +83,4 @@ mesh = <span class="hljs-constructor">IO()</span>.load<span class="hljs-construc
stored either in a GLB container file or a glTF JSON file with embedded binary data.
This must be enabled explicitly, as described in
<code>pytorch3d/io/experimental_gltf_io.ply</code>.</p>
</span></div></article></div><div class="docLastUpdate"><em>Last updated by Jeremy Reizenstein</em></div><div class="docs-prevnext"></div></div></div><nav class="onPageNav"></nav></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2021 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>
</span></div></article></div><div class="docLastUpdate"><em>Last updated by Jeremy Reizenstein</em></div><div class="docs-prevnext"><a class="docs-prev button" href="/docs/why_pytorch3d"><span class="arrow-prev"></span><span class="function-name-prevnext">Why PyTorch3D</span></a><a class="docs-next button" href="/docs/meshes_io"><span>Loading from file</span><span class="arrow-next"></span></a></div></div></div><nav class="onPageNav"></nav></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2021 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>

View File

@ -6,7 +6,63 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container mainContainer docsContainer"><div class="wrapper"><div class="post"><header class="postHeader"></header><article><div><span><h1><a class="anchor" aria-hidden="true" id="file-io"></a><a href="#file-io" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"></path></svg></a>File IO</h1>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Data</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/docs/io">File IO</a></li><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {
var links = coll[i].nextElementSibling.getElementsByTagName('*');
if (checkActiveCategory){
for (var j = 0; j < links.length; j++) {
if (links[j].classList.contains('navListItemActive')){
coll[i].nextElementSibling.classList.toggle('hide');
coll[i].childNodes[1].classList.toggle('rotate');
checkActiveCategory = false;
break;
}
}
}
coll[i].addEventListener('click', function() {
var arrow = this.childNodes[1];
arrow.classList.toggle('rotate');
var content = this.nextElementSibling;
content.classList.toggle('hide');
});
}
document.addEventListener('DOMContentLoaded', function() {
createToggler('#navToggler', '#docsNav', 'docsSliderActive');
createToggler('#tocToggler', 'body', 'tocActive');
var headings = document.querySelector('.toc-headings');
headings && headings.addEventListener('click', function(event) {
var el = event.target;
while(el !== headings){
if (el.tagName === 'A') {
document.body.classList.remove('tocActive');
break;
} else{
el = el.parentNode;
}
}
}, false);
function createToggler(togglerSelector, targetSelector, className) {
var toggler = document.querySelector(togglerSelector);
var target = document.querySelector(targetSelector);
if (!toggler) {
return;
}
toggler.onclick = function(event) {
event.preventDefault();
target.classList.toggle(className);
};
}
});
</script></nav></div><div class="container mainContainer docsContainer"><div class="wrapper"><div class="post"><header class="postHeader"></header><article><div><span><h1><a class="anchor" aria-hidden="true" id="file-io"></a><a href="#file-io" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"></path></svg></a>File IO</h1>
<p>There is a flexible interface for loading and saving point clouds and meshes from different formats.</p>
<p>The main usage is via the <code>pytorch3d.io.IO</code> object, and its methods
<code>load_mesh</code>, <code>save_mesh</code>, <code>load_point_cloud</code> and <code>save_point_cloud</code>.</p>
@ -27,4 +83,4 @@ mesh = <span class="hljs-constructor">IO()</span>.load<span class="hljs-construc
stored either in a GLB container file or a glTF JSON file with embedded binary data.
This must be enabled explicitly, as described in
<code>pytorch3d/io/experimental_gltf_io.ply</code>.</p>
</span></div></article></div><div class="docLastUpdate"><em>Last updated by Jeremy Reizenstein</em></div><div class="docs-prevnext"></div></div></div><nav class="onPageNav"></nav></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2021 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>
</span></div></article></div><div class="docLastUpdate"><em>Last updated by Jeremy Reizenstein</em></div><div class="docs-prevnext"><a class="docs-prev button" href="/docs/why_pytorch3d"><span class="arrow-prev"></span><span class="function-name-prevnext">Why PyTorch3D</span></a><a class="docs-next button" href="/docs/meshes_io"><span>Loading from file</span><span class="arrow-next"></span></a></div></div></div><nav class="onPageNav"></nav></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2021 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Data</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Data</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/io">File IO</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {
@ -116,4 +116,4 @@ are not triangles will be split into triangles. A Meshes object containing a
single mesh can be created from this data using</p>
<pre><code class="hljs"> meshes = <span class="hljs-constructor">Meshes(<span class="hljs-params">verts</span>=[<span class="hljs-params">verts</span>], <span class="hljs-params">faces</span>=[<span class="hljs-params">faces</span>])</span>
</code></pre>
</span></div></article></div><div class="docLastUpdate"><em>Last updated by Jeremy Reizenstein</em></div><div class="docs-prevnext"><a class="docs-prev button" href="/docs/why_pytorch3d"><span class="arrow-prev"></span><span class="function-name-prevnext">Why PyTorch3D</span></a><a class="docs-next button" href="/docs/datasets"><span>Data loaders</span><span class="arrow-next"></span></a></div></div></div><nav class="onPageNav"><ul class="toc-headings"><li><a href="#obj">OBJ</a></li><li><a href="#ply">PLY</a></li></ul></nav></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2021 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>
</span></div></article></div><div class="docLastUpdate"><em>Last updated by Jeremy Reizenstein</em></div><div class="docs-prevnext"><a class="docs-prev button" href="/docs/io"><span class="arrow-prev"></span><span>File IO</span></a><a class="docs-next button" href="/docs/datasets"><span>Data loaders</span><span class="arrow-next"></span></a></div></div></div><nav class="onPageNav"><ul class="toc-headings"><li><a href="#obj">OBJ</a></li><li><a href="#ply">PLY</a></li></ul></nav></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2021 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Data</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Data</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/io">File IO</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {
@ -116,4 +116,4 @@ are not triangles will be split into triangles. A Meshes object containing a
single mesh can be created from this data using</p>
<pre><code class="hljs"> meshes = <span class="hljs-constructor">Meshes(<span class="hljs-params">verts</span>=[<span class="hljs-params">verts</span>], <span class="hljs-params">faces</span>=[<span class="hljs-params">faces</span>])</span>
</code></pre>
</span></div></article></div><div class="docLastUpdate"><em>Last updated by Jeremy Reizenstein</em></div><div class="docs-prevnext"><a class="docs-prev button" href="/docs/why_pytorch3d"><span class="arrow-prev"></span><span class="function-name-prevnext">Why PyTorch3D</span></a><a class="docs-next button" href="/docs/datasets"><span>Data loaders</span><span class="arrow-next"></span></a></div></div></div><nav class="onPageNav"><ul class="toc-headings"><li><a href="#obj">OBJ</a></li><li><a href="#ply">PLY</a></li></ul></nav></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2021 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>
</span></div></article></div><div class="docLastUpdate"><em>Last updated by Jeremy Reizenstein</em></div><div class="docs-prevnext"><a class="docs-prev button" href="/docs/io"><span class="arrow-prev"></span><span>File IO</span></a><a class="docs-next button" href="/docs/datasets"><span>Data loaders</span><span class="arrow-next"></span></a></div></div></div><nav class="onPageNav"><ul class="toc-headings"><li><a href="#obj">OBJ</a></li><li><a href="#ply">PLY</a></li></ul></nav></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2021 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Renderer</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Renderer</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/io">File IO</a></li><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Renderer</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Renderer</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/io">File IO</a></li><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Renderer</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Renderer</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/io">File IO</a></li><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Renderer</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Renderer</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/io">File IO</a></li><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Visualization</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Visualization</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/io">File IO</a></li><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Visualization</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Visualization</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/io">File IO</a></li><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive siteNavItemActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Introduction</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive siteNavItemActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Introduction</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/io">File IO</a></li><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {
@ -65,4 +65,4 @@
</script></nav></div><div class="container mainContainer docsContainer"><div class="wrapper"><div class="post"><header class="postHeader"></header><article><div><span><h1><a class="anchor" aria-hidden="true" id="why-pytorch3d"></a><a href="#why-pytorch3d" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"></path></svg></a>Why PyTorch3D</h1>
<p>Our goal with PyTorch3D is to help accelerate research at the intersection of deep learning and 3D. 3D data is more complex than 2D images and while working on projects such as <a href="https://github.com/facebookresearch/meshrcnn">Mesh R-CNN</a> and <a href="https://github.com/facebookresearch/c3dpo_nrsfm">C3DPO</a>, we encountered several challenges including 3D data representation, batching, and speed. We have developed many useful operators and abstractions for working on 3D deep learning and want to share this with the community to drive novel research in this area.</p>
<p>In PyTorch3D we have included efficient 3D operators, heterogeneous batching capabilities, and a modular differentiable rendering API, to equip researchers in this field with a much needed toolkit to implement cutting-edge research with complex 3D inputs.</p>
</span></div></article></div><div class="docLastUpdate"><em>Last updated by Patrick Labatut</em></div><div class="docs-prevnext"><a class="docs-next button" href="/docs/meshes_io"><span>Loading from file</span><span class="arrow-next"></span></a></div></div></div><nav class="onPageNav"></nav></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2021 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>
</span></div></article></div><div class="docLastUpdate"><em>Last updated by Patrick Labatut</em></div><div class="docs-prevnext"><a class="docs-next button" href="/docs/io"><span>File IO</span><span class="arrow-next"></span></a></div></div></div><nav class="onPageNav"></nav></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2021 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive siteNavItemActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Introduction</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive siteNavItemActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span>Introduction</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/io">File IO</a></li><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {
@ -65,4 +65,4 @@
</script></nav></div><div class="container mainContainer docsContainer"><div class="wrapper"><div class="post"><header class="postHeader"></header><article><div><span><h1><a class="anchor" aria-hidden="true" id="why-pytorch3d"></a><a href="#why-pytorch3d" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"></path></svg></a>Why PyTorch3D</h1>
<p>Our goal with PyTorch3D is to help accelerate research at the intersection of deep learning and 3D. 3D data is more complex than 2D images and while working on projects such as <a href="https://github.com/facebookresearch/meshrcnn">Mesh R-CNN</a> and <a href="https://github.com/facebookresearch/c3dpo_nrsfm">C3DPO</a>, we encountered several challenges including 3D data representation, batching, and speed. We have developed many useful operators and abstractions for working on 3D deep learning and want to share this with the community to drive novel research in this area.</p>
<p>In PyTorch3D we have included efficient 3D operators, heterogeneous batching capabilities, and a modular differentiable rendering API, to equip researchers in this field with a much needed toolkit to implement cutting-edge research with complex 3D inputs.</p>
</span></div></article></div><div class="docLastUpdate"><em>Last updated by Patrick Labatut</em></div><div class="docs-prevnext"><a class="docs-next button" href="/docs/meshes_io"><span>Loading from file</span><span class="arrow-next"></span></a></div></div></div><nav class="onPageNav"></nav></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2021 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>
</span></div></article></div><div class="docLastUpdate"><em>Last updated by Patrick Labatut</em></div><div class="docs-prevnext"><a class="docs-next button" href="/docs/io"><span>File IO</span><span class="arrow-next"></span></a></div></div></div><nav class="onPageNav"></nav></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2021 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>

View File

@ -0,0 +1,901 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Fit a simple Neural Radiance Field via raymarching\n",
"\n",
"This tutorial shows how to fit Neural Radiance Field given a set of views of a scene using differentiable implicit function rendering.\n",
"\n",
"More specifically, this tutorial will explain how to:\n",
"1. Create a differentiable implicit function renderer with either image-grid or Monte Carlo ray sampling.\n",
"2. Create an Implicit model of a scene.\n",
"3. Fit the implicit function (Neural Radiance Field) based on input images using the differentiable implicit renderer. \n",
"4. Visualize the learnt implicit function.\n",
"\n",
"Note that the presented implicit model is a simplified version of NeRF:<br>\n",
"_Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, Ren Ng: NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis, ECCV 2020._\n",
"\n",
"The simplifications include:\n",
"* *Ray sampling*: This notebook does not perform stratified ray sampling but rather ray sampling at equidistant depths.\n",
"* *Rendering*: We do a single rendering pass, as opposed to the original implementation that does a coarse and fine rendering pass.\n",
"* *Architecture*: Our network is shallower which allows for faster optimization possibly at the cost of surface details.\n",
"* *Mask loss*: Since our observations include segmentation masks, we also optimize a silhouette loss that forces rays to either get fully absorbed inside the volume, or to completely pass through it.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 0. Install and Import modules\n",
"Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import sys\n",
"import torch\n",
"need_pytorch3d=False\n",
"try:\n",
" import pytorch3d\n",
"except ModuleNotFoundError:\n",
" need_pytorch3d=True\n",
"if need_pytorch3d:\n",
" if torch.__version__.startswith(\"1.9\") and sys.platform.startswith(\"linux\"):\n",
" # We try to install PyTorch3D via a released wheel.\n",
" version_str=\"\".join([\n",
" f\"py3{sys.version_info.minor}_cu\",\n",
" torch.version.cuda.replace(\".\",\"\"),\n",
" f\"_pyt{torch.__version__[0:5:2]}\"\n",
" ])\n",
" !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
" else:\n",
" # We try to install PyTorch3D from source.\n",
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
" !tar xzf 1.10.0.tar.gz\n",
" os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n",
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# %matplotlib inline\n",
"# %matplotlib notebook\n",
"import os\n",
"import sys\n",
"import time\n",
"import json\n",
"import glob\n",
"import torch\n",
"import math\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"from PIL import Image\n",
"from IPython import display\n",
"from tqdm.notebook import tqdm\n",
"\n",
"# Data structures and functions for rendering\n",
"from pytorch3d.structures import Volumes\n",
"from pytorch3d.transforms import so3_exp_map\n",
"from pytorch3d.renderer import (\n",
" FoVPerspectiveCameras, \n",
" NDCGridRaysampler,\n",
" MonteCarloRaysampler,\n",
" EmissionAbsorptionRaymarcher,\n",
" ImplicitRenderer,\n",
" RayBundle,\n",
" ray_bundle_to_ray_points,\n",
")\n",
"\n",
"# obtain the utilized device\n",
"if torch.cuda.is_available():\n",
" device = torch.device(\"cuda:0\")\n",
" torch.cuda.set_device(device)\n",
"else:\n",
" print(\n",
" 'Please note that NeRF is a resource-demanding method.'\n",
" + ' Running this notebook on CPU will be extremely slow.'\n",
" + ' We recommend running the example on a GPU'\n",
" + ' with at least 10 GB of memory.'\n",
" )\n",
" device = torch.device(\"cpu\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/plot_image_grid.py\n",
"!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/generate_cow_renders.py\n",
"from plot_image_grid import image_grid\n",
"from generate_cow_renders import generate_cow_renders"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"OR if running locally uncomment and run the following cell:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# from utils.generate_cow_renders import generate_cow_renders\n",
"# from utils import image_grid"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 1. Generate images of the scene and masks\n",
"\n",
"The following cell generates our training data.\n",
"It renders the cow mesh from the `fit_textured_mesh.ipynb` tutorial from several viewpoints and returns:\n",
"1. A batch of image and silhouette tensors that are produced by the cow mesh renderer.\n",
"2. A set of cameras corresponding to each render.\n",
"\n",
"Note: For the purpose of this tutorial, which aims at explaining the details of implicit rendering, we do not explain how the mesh rendering, implemented in the `generate_cow_renders` function, works. Please refer to `fit_textured_mesh.ipynb` for a detailed explanation of mesh rendering."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"target_cameras, target_images, target_silhouettes = generate_cow_renders(num_views=40, azimuth_range=180)\n",
"print(f'Generated {len(target_images)} images/silhouettes/cameras.')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 2. Initialize the implicit renderer\n",
"\n",
"The following initializes an implicit renderer that emits a ray from each pixel of a target image and samples a set of uniformly-spaced points along the ray. At each ray-point, the corresponding density and color value is obtained by querying the corresponding location in the neural model of the scene (the model is described & instantiated in a later cell).\n",
"\n",
"The renderer is composed of a *raymarcher* and a *raysampler*.\n",
"- The *raysampler* is responsible for emitting rays from image pixels and sampling the points along them. Here, we use two different raysamplers:\n",
" - `MonteCarloRaysampler` is used to generate rays from a random subset of pixels of the image plane. The random subsampling of pixels is carried out during **training** to decrease the memory consumption of the implicit model.\n",
" - `NDCGridRaysampler` which follows the standard PyTorch3D coordinate grid convention (+X from right to left; +Y from bottom to top; +Z away from the user). In combination with the implicit model of the scene, `NDCGridRaysampler` consumes a large amount of memory and, hence, is only used for visualizing the results of the training at **test** time.\n",
"- The *raymarcher* takes the densities and colors sampled along each ray and renders each ray into a color and an opacity value of the ray's source pixel. Here we use the `EmissionAbsorptionRaymarcher` which implements the standard Emission-Absorption raymarching algorithm."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# render_size describes the size of both sides of the \n",
"# rendered images in pixels. Since an advantage of \n",
"# Neural Radiance Fields are high quality renders\n",
"# with a significant amount of details, we render\n",
"# the implicit function at double the size of \n",
"# target images.\n",
"render_size = target_images.shape[1] * 2\n",
"\n",
"# Our rendered scene is centered around (0,0,0) \n",
"# and is enclosed inside a bounding box\n",
"# whose side is roughly equal to 3.0 (world units).\n",
"volume_extent_world = 3.0\n",
"\n",
"# 1) Instantiate the raysamplers.\n",
"\n",
"# Here, NDCGridRaysampler generates a rectangular image\n",
"# grid of rays whose coordinates follow the PyTorch3D\n",
"# coordinate conventions.\n",
"raysampler_grid = NDCGridRaysampler(\n",
" image_height=render_size,\n",
" image_width=render_size,\n",
" n_pts_per_ray=128,\n",
" min_depth=0.1,\n",
" max_depth=volume_extent_world,\n",
")\n",
"\n",
"# MonteCarloRaysampler generates a random subset \n",
"# of `n_rays_per_image` rays emitted from the image plane.\n",
"raysampler_mc = MonteCarloRaysampler(\n",
" min_x = -1.0,\n",
" max_x = 1.0,\n",
" min_y = -1.0,\n",
" max_y = 1.0,\n",
" n_rays_per_image=750,\n",
" n_pts_per_ray=128,\n",
" min_depth=0.1,\n",
" max_depth=volume_extent_world,\n",
")\n",
"\n",
"# 2) Instantiate the raymarcher.\n",
"# Here, we use the standard EmissionAbsorptionRaymarcher \n",
"# which marches along each ray in order to render\n",
"# the ray into a single 3D color vector \n",
"# and an opacity scalar.\n",
"raymarcher = EmissionAbsorptionRaymarcher()\n",
"\n",
"# Finally, instantiate the implicit renders\n",
"# for both raysamplers.\n",
"renderer_grid = ImplicitRenderer(\n",
" raysampler=raysampler_grid, raymarcher=raymarcher,\n",
")\n",
"renderer_mc = ImplicitRenderer(\n",
" raysampler=raysampler_mc, raymarcher=raymarcher,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 3. Define the neural radiance field model\n",
"\n",
"In this cell we define the `NeuralRadianceField` module, which specifies a continuous field of colors and opacities over the 3D domain of the scene.\n",
"\n",
"The `forward` function of `NeuralRadianceField` (NeRF) receives as input a set of tensors that parametrize a bundle of rendering rays. The ray bundle is later converted to 3D ray points in the world coordinates of the scene. Each 3D point is then mapped to a harmonic representation using the `HarmonicEmbedding` layer (defined in the next cell). The harmonic embeddings then enter the _color_ and _opacity_ branches of the NeRF model in order to label each ray point with a 3D vector and a 1D scalar ranging in [0-1] which define the point's RGB color and opacity respectively.\n",
"\n",
"Since NeRF has a large memory footprint, we also implement the `NeuralRadianceField.forward_batched` method. The method splits the input rays into batches and executes the `forward` function for each batch separately in a for loop. This lets us render a large set of rays without running out of GPU memory. Standardly, `forward_batched` would be used to render rays emitted from all pixels of an image in order to produce a full-sized render of a scene.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"class HarmonicEmbedding(torch.nn.Module):\n",
" def __init__(self, n_harmonic_functions=60, omega0=0.1):\n",
" \"\"\"\n",
" Given an input tensor `x` of shape [minibatch, ... , dim],\n",
" the harmonic embedding layer converts each feature\n",
" in `x` into a series of harmonic features `embedding`\n",
" as follows:\n",
" embedding[..., i*dim:(i+1)*dim] = [\n",
" sin(x[..., i]),\n",
" sin(2*x[..., i]),\n",
" sin(4*x[..., i]),\n",
" ...\n",
" sin(2**(self.n_harmonic_functions-1) * x[..., i]),\n",
" cos(x[..., i]),\n",
" cos(2*x[..., i]),\n",
" cos(4*x[..., i]),\n",
" ...\n",
" cos(2**(self.n_harmonic_functions-1) * x[..., i])\n",
" ]\n",
" \n",
" Note that `x` is also premultiplied by `omega0` before\n",
" evaluating the harmonic functions.\n",
" \"\"\"\n",
" super().__init__()\n",
" self.register_buffer(\n",
" 'frequencies',\n",
" omega0 * (2.0 ** torch.arange(n_harmonic_functions)),\n",
" )\n",
" def forward(self, x):\n",
" \"\"\"\n",
" Args:\n",
" x: tensor of shape [..., dim]\n",
" Returns:\n",
" embedding: a harmonic embedding of `x`\n",
" of shape [..., n_harmonic_functions * dim * 2]\n",
" \"\"\"\n",
" embed = (x[..., None] * self.frequencies).view(*x.shape[:-1], -1)\n",
" return torch.cat((embed.sin(), embed.cos()), dim=-1)\n",
"\n",
"\n",
"class NeuralRadianceField(torch.nn.Module):\n",
" def __init__(self, n_harmonic_functions=60, n_hidden_neurons=256):\n",
" super().__init__()\n",
" \"\"\"\n",
" Args:\n",
" n_harmonic_functions: The number of harmonic functions\n",
" used to form the harmonic embedding of each point.\n",
" n_hidden_neurons: The number of hidden units in the\n",
" fully connected layers of the MLPs of the model.\n",
" \"\"\"\n",
" \n",
" # The harmonic embedding layer converts input 3D coordinates\n",
" # to a representation that is more suitable for\n",
" # processing with a deep neural network.\n",
" self.harmonic_embedding = HarmonicEmbedding(n_harmonic_functions)\n",
" \n",
" # The dimension of the harmonic embedding.\n",
" embedding_dim = n_harmonic_functions * 2 * 3\n",
" \n",
" # self.mlp is a simple 2-layer multi-layer perceptron\n",
" # which converts the input per-point harmonic embeddings\n",
" # to a latent representation.\n",
" # Not that we use Softplus activations instead of ReLU.\n",
" self.mlp = torch.nn.Sequential(\n",
" torch.nn.Linear(embedding_dim, n_hidden_neurons),\n",
" torch.nn.Softplus(beta=10.0),\n",
" torch.nn.Linear(n_hidden_neurons, n_hidden_neurons),\n",
" torch.nn.Softplus(beta=10.0),\n",
" ) \n",
" \n",
" # Given features predicted by self.mlp, self.color_layer\n",
" # is responsible for predicting a 3-D per-point vector\n",
" # that represents the RGB color of the point.\n",
" self.color_layer = torch.nn.Sequential(\n",
" torch.nn.Linear(n_hidden_neurons + embedding_dim, n_hidden_neurons),\n",
" torch.nn.Softplus(beta=10.0),\n",
" torch.nn.Linear(n_hidden_neurons, 3),\n",
" torch.nn.Sigmoid(),\n",
" # To ensure that the colors correctly range between [0-1],\n",
" # the layer is terminated with a sigmoid layer.\n",
" ) \n",
" \n",
" # The density layer converts the features of self.mlp\n",
" # to a 1D density value representing the raw opacity\n",
" # of each point.\n",
" self.density_layer = torch.nn.Sequential(\n",
" torch.nn.Linear(n_hidden_neurons, 1),\n",
" torch.nn.Softplus(beta=10.0),\n",
" # Sofplus activation ensures that the raw opacity\n",
" # is a non-negative number.\n",
" )\n",
" \n",
" # We set the bias of the density layer to -1.5\n",
" # in order to initialize the opacities of the\n",
" # ray points to values close to 0. \n",
" # This is a crucial detail for ensuring convergence\n",
" # of the model.\n",
" self.density_layer[0].bias.data[0] = -1.5 \n",
" \n",
" def _get_densities(self, features):\n",
" \"\"\"\n",
" This function takes `features` predicted by `self.mlp`\n",
" and converts them to `raw_densities` with `self.density_layer`.\n",
" `raw_densities` are later mapped to [0-1] range with\n",
" 1 - inverse exponential of `raw_densities`.\n",
" \"\"\"\n",
" raw_densities = self.density_layer(features)\n",
" return 1 - (-raw_densities).exp()\n",
" \n",
" def _get_colors(self, features, rays_directions):\n",
" \"\"\"\n",
" This function takes per-point `features` predicted by `self.mlp`\n",
" and evaluates the color model in order to attach to each\n",
" point a 3D vector of its RGB color.\n",
" \n",
" In order to represent viewpoint dependent effects,\n",
" before evaluating `self.color_layer`, `NeuralRadianceField`\n",
" concatenates to the `features` a harmonic embedding\n",
" of `ray_directions`, which are per-point directions \n",
" of point rays expressed as 3D l2-normalized vectors\n",
" in world coordinates.\n",
" \"\"\"\n",
" spatial_size = features.shape[:-1]\n",
" \n",
" # Normalize the ray_directions to unit l2 norm.\n",
" rays_directions_normed = torch.nn.functional.normalize(\n",
" rays_directions, dim=-1\n",
" )\n",
" \n",
" # Obtain the harmonic embedding of the normalized ray directions.\n",
" rays_embedding = self.harmonic_embedding(\n",
" rays_directions_normed\n",
" )\n",
" \n",
" # Expand the ray directions tensor so that its spatial size\n",
" # is equal to the size of features.\n",
" rays_embedding_expand = rays_embedding[..., None, :].expand(\n",
" *spatial_size, rays_embedding.shape[-1]\n",
" )\n",
" \n",
" # Concatenate ray direction embeddings with \n",
" # features and evaluate the color model.\n",
" color_layer_input = torch.cat(\n",
" (features, rays_embedding_expand),\n",
" dim=-1\n",
" )\n",
" return self.color_layer(color_layer_input)\n",
" \n",
" \n",
" def forward(\n",
" self, \n",
" ray_bundle: RayBundle,\n",
" **kwargs,\n",
" ):\n",
" \"\"\"\n",
" The forward function accepts the parametrizations of\n",
" 3D points sampled along projection rays. The forward\n",
" pass is responsible for attaching a 3D vector\n",
" and a 1D scalar representing the point's \n",
" RGB color and opacity respectively.\n",
" \n",
" Args:\n",
" ray_bundle: A RayBundle object containing the following variables:\n",
" origins: A tensor of shape `(minibatch, ..., 3)` denoting the\n",
" origins of the sampling rays in world coords.\n",
" directions: A tensor of shape `(minibatch, ..., 3)`\n",
" containing the direction vectors of sampling rays in world coords.\n",
" lengths: A tensor of shape `(minibatch, ..., num_points_per_ray)`\n",
" containing the lengths at which the rays are sampled.\n",
"\n",
" Returns:\n",
" rays_densities: A tensor of shape `(minibatch, ..., num_points_per_ray, 1)`\n",
" denoting the opacity of each ray point.\n",
" rays_colors: A tensor of shape `(minibatch, ..., num_points_per_ray, 3)`\n",
" denoting the color of each ray point.\n",
" \"\"\"\n",
" # We first convert the ray parametrizations to world\n",
" # coordinates with `ray_bundle_to_ray_points`.\n",
" rays_points_world = ray_bundle_to_ray_points(ray_bundle)\n",
" # rays_points_world.shape = [minibatch x ... x 3]\n",
" \n",
" # For each 3D world coordinate, we obtain its harmonic embedding.\n",
" embeds = self.harmonic_embedding(\n",
" rays_points_world\n",
" )\n",
" # embeds.shape = [minibatch x ... x self.n_harmonic_functions*6]\n",
" \n",
" # self.mlp maps each harmonic embedding to a latent feature space.\n",
" features = self.mlp(embeds)\n",
" # features.shape = [minibatch x ... x n_hidden_neurons]\n",
" \n",
" # Finally, given the per-point features, \n",
" # execute the density and color branches.\n",
" \n",
" rays_densities = self._get_densities(features)\n",
" # rays_densities.shape = [minibatch x ... x 1]\n",
"\n",
" rays_colors = self._get_colors(features, ray_bundle.directions)\n",
" # rays_colors.shape = [minibatch x ... x 3]\n",
" \n",
" return rays_densities, rays_colors\n",
" \n",
" def batched_forward(\n",
" self, \n",
" ray_bundle: RayBundle,\n",
" n_batches: int = 16,\n",
" **kwargs, \n",
" ):\n",
" \"\"\"\n",
" This function is used to allow for memory efficient processing\n",
" of input rays. The input rays are first split to `n_batches`\n",
" chunks and passed through the `self.forward` function one at a time\n",
" in a for loop. Combined with disabling PyTorch gradient caching\n",
" (`torch.no_grad()`), this allows for rendering large batches\n",
" of rays that do not all fit into GPU memory in a single forward pass.\n",
" In our case, batched_forward is used to export a fully-sized render\n",
" of the radiance field for visualization purposes.\n",
" \n",
" Args:\n",
" ray_bundle: A RayBundle object containing the following variables:\n",
" origins: A tensor of shape `(minibatch, ..., 3)` denoting the\n",
" origins of the sampling rays in world coords.\n",
" directions: A tensor of shape `(minibatch, ..., 3)`\n",
" containing the direction vectors of sampling rays in world coords.\n",
" lengths: A tensor of shape `(minibatch, ..., num_points_per_ray)`\n",
" containing the lengths at which the rays are sampled.\n",
" n_batches: Specifies the number of batches the input rays are split into.\n",
" The larger the number of batches, the smaller the memory footprint\n",
" and the lower the processing speed.\n",
"\n",
" Returns:\n",
" rays_densities: A tensor of shape `(minibatch, ..., num_points_per_ray, 1)`\n",
" denoting the opacity of each ray point.\n",
" rays_colors: A tensor of shape `(minibatch, ..., num_points_per_ray, 3)`\n",
" denoting the color of each ray point.\n",
"\n",
" \"\"\"\n",
"\n",
" # Parse out shapes needed for tensor reshaping in this function.\n",
" n_pts_per_ray = ray_bundle.lengths.shape[-1] \n",
" spatial_size = [*ray_bundle.origins.shape[:-1], n_pts_per_ray]\n",
"\n",
" # Split the rays to `n_batches` batches.\n",
" tot_samples = ray_bundle.origins.shape[:-1].numel()\n",
" batches = torch.chunk(torch.arange(tot_samples), n_batches)\n",
"\n",
" # For each batch, execute the standard forward pass.\n",
" batch_outputs = [\n",
" self.forward(\n",
" RayBundle(\n",
" origins=ray_bundle.origins.view(-1, 3)[batch_idx],\n",
" directions=ray_bundle.directions.view(-1, 3)[batch_idx],\n",
" lengths=ray_bundle.lengths.view(-1, n_pts_per_ray)[batch_idx],\n",
" xys=None,\n",
" )\n",
" ) for batch_idx in batches\n",
" ]\n",
" \n",
" # Concatenate the per-batch rays_densities and rays_colors\n",
" # and reshape according to the sizes of the inputs.\n",
" rays_densities, rays_colors = [\n",
" torch.cat(\n",
" [batch_output[output_i] for batch_output in batch_outputs], dim=0\n",
" ).view(*spatial_size, -1) for output_i in (0, 1)\n",
" ]\n",
" return rays_densities, rays_colors"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 4. Helper functions\n",
"\n",
"In this function we define functions that help with the Neural Radiance Field optimization."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def huber(x, y, scaling=0.1):\n",
" \"\"\"\n",
" A helper function for evaluating the smooth L1 (huber) loss\n",
" between the rendered silhouettes and colors.\n",
" \"\"\"\n",
" diff_sq = (x - y) ** 2\n",
" loss = ((1 + diff_sq / (scaling**2)).clamp(1e-4).sqrt() - 1) * float(scaling)\n",
" return loss\n",
"\n",
"def sample_images_at_mc_locs(target_images, sampled_rays_xy):\n",
" \"\"\"\n",
" Given a set of Monte Carlo pixel locations `sampled_rays_xy`,\n",
" this method samples the tensor `target_images` at the\n",
" respective 2D locations.\n",
" \n",
" This function is used in order to extract the colors from\n",
" ground truth images that correspond to the colors\n",
" rendered using `MonteCarloRaysampler`.\n",
" \"\"\"\n",
" ba = target_images.shape[0]\n",
" dim = target_images.shape[-1]\n",
" spatial_size = sampled_rays_xy.shape[1:-1]\n",
" # In order to sample target_images, we utilize\n",
" # the grid_sample function which implements a\n",
" # bilinear image sampler.\n",
" # Note that we have to invert the sign of the \n",
" # sampled ray positions to convert the NDC xy locations\n",
" # of the MonteCarloRaysampler to the coordinate\n",
" # convention of grid_sample.\n",
" images_sampled = torch.nn.functional.grid_sample(\n",
" target_images.permute(0, 3, 1, 2), \n",
" -sampled_rays_xy.view(ba, -1, 1, 2), # note the sign inversion\n",
" align_corners=True\n",
" )\n",
" return images_sampled.permute(0, 2, 3, 1).view(\n",
" ba, *spatial_size, dim\n",
" )\n",
"\n",
"def show_full_render(\n",
" neural_radiance_field, camera,\n",
" target_image, target_silhouette,\n",
" loss_history_color, loss_history_sil,\n",
"):\n",
" \"\"\"\n",
" This is a helper function for visualizing the\n",
" intermediate results of the learning. \n",
" \n",
" Since the `NeuralRadianceField` suffers from\n",
" a large memory footprint, which does not let us\n",
" render the full image grid in a single forward pass,\n",
" we utilize the `NeuralRadianceField.batched_forward`\n",
" function in combination with disabling the gradient caching.\n",
" This chunks the set of emitted rays to batches and \n",
" evaluates the implicit function on one batch at a time\n",
" to prevent GPU memory overflow.\n",
" \"\"\"\n",
" \n",
" # Prevent gradient caching.\n",
" with torch.no_grad():\n",
" # Render using the grid renderer and the\n",
" # batched_forward function of neural_radiance_field.\n",
" rendered_image_silhouette, _ = renderer_grid(\n",
" cameras=camera, \n",
" volumetric_function=neural_radiance_field.batched_forward\n",
" )\n",
" # Split the rendering result to a silhouette render\n",
" # and the image render.\n",
" rendered_image, rendered_silhouette = (\n",
" rendered_image_silhouette[0].split([3, 1], dim=-1)\n",
" )\n",
" \n",
" # Generate plots.\n",
" fig, ax = plt.subplots(2, 3, figsize=(15, 10))\n",
" ax = ax.ravel()\n",
" clamp_and_detach = lambda x: x.clamp(0.0, 1.0).cpu().detach().numpy()\n",
" ax[0].plot(list(range(len(loss_history_color))), loss_history_color, linewidth=1)\n",
" ax[1].imshow(clamp_and_detach(rendered_image))\n",
" ax[2].imshow(clamp_and_detach(rendered_silhouette[..., 0]))\n",
" ax[3].plot(list(range(len(loss_history_sil))), loss_history_sil, linewidth=1)\n",
" ax[4].imshow(clamp_and_detach(target_image))\n",
" ax[5].imshow(clamp_and_detach(target_silhouette))\n",
" for ax_, title_ in zip(\n",
" ax,\n",
" (\n",
" \"loss color\", \"rendered image\", \"rendered silhouette\",\n",
" \"loss silhouette\", \"target image\", \"target silhouette\",\n",
" )\n",
" ):\n",
" if not title_.startswith('loss'):\n",
" ax_.grid(\"off\")\n",
" ax_.axis(\"off\")\n",
" ax_.set_title(title_)\n",
" fig.canvas.draw(); fig.show()\n",
" display.clear_output(wait=True)\n",
" display.display(fig)\n",
" return fig\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 5. Fit the radiance field\n",
"\n",
"Here we carry out the radiance field fitting with differentiable rendering.\n",
"\n",
"In order to fit the radiance field, we render it from the viewpoints of the `target_cameras`\n",
"and compare the resulting renders with the observed `target_images` and `target_silhouettes`.\n",
"\n",
"The comparison is done by evaluating the mean huber (smooth-l1) error between corresponding\n",
"pairs of `target_images`/`rendered_images` and `target_silhouettes`/`rendered_silhouettes`.\n",
"\n",
"Since we use the `MonteCarloRaysampler`, the outputs of the training renderer `renderer_mc`\n",
"are colors of pixels that are randomly sampled from the image plane, not a lattice of pixels forming\n",
"a valid image. Thus, in order to compare the rendered colors with the ground truth, we \n",
"utilize the random MonteCarlo pixel locations to sample the ground truth images/silhouettes\n",
"`target_silhouettes`/`rendered_silhouettes` at the xy locations corresponding to the render\n",
"locations. This is done with the helper function `sample_images_at_mc_locs`, which is\n",
"described in the previous cell."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# First move all relevant variables to the correct device.\n",
"renderer_grid = renderer_grid.to(device)\n",
"renderer_mc = renderer_mc.to(device)\n",
"target_cameras = target_cameras.to(device)\n",
"target_images = target_images.to(device)\n",
"target_silhouettes = target_silhouettes.to(device)\n",
"\n",
"# Set the seed for reproducibility\n",
"torch.manual_seed(1)\n",
"\n",
"# Instantiate the radiance field model.\n",
"neural_radiance_field = NeuralRadianceField().to(device)\n",
"\n",
"# Instantiate the Adam optimizer. We set its master learning rate to 1e-3.\n",
"lr = 1e-3\n",
"optimizer = torch.optim.Adam(neural_radiance_field.parameters(), lr=lr)\n",
"\n",
"# We sample 6 random cameras in a minibatch. Each camera\n",
"# emits raysampler_mc.n_pts_per_image rays.\n",
"batch_size = 6\n",
"\n",
"# 3000 iterations take ~20 min on a Tesla M40 and lead to\n",
"# reasonably sharp results. However, for the best possible\n",
"# results, we recommend setting n_iter=20000.\n",
"n_iter = 3000\n",
"\n",
"# Init the loss history buffers.\n",
"loss_history_color, loss_history_sil = [], []\n",
"\n",
"# The main optimization loop.\n",
"for iteration in range(n_iter): \n",
" # In case we reached the last 75% of iterations,\n",
" # decrease the learning rate of the optimizer 10-fold.\n",
" if iteration == round(n_iter * 0.75):\n",
" print('Decreasing LR 10-fold ...')\n",
" optimizer = torch.optim.Adam(\n",
" neural_radiance_field.parameters(), lr=lr * 0.1\n",
" )\n",
" \n",
" # Zero the optimizer gradient.\n",
" optimizer.zero_grad()\n",
" \n",
" # Sample random batch indices.\n",
" batch_idx = torch.randperm(len(target_cameras))[:batch_size]\n",
" \n",
" # Sample the minibatch of cameras.\n",
" batch_cameras = FoVPerspectiveCameras(\n",
" R = target_cameras.R[batch_idx], \n",
" T = target_cameras.T[batch_idx], \n",
" znear = target_cameras.znear[batch_idx],\n",
" zfar = target_cameras.zfar[batch_idx],\n",
" aspect_ratio = target_cameras.aspect_ratio[batch_idx],\n",
" fov = target_cameras.fov[batch_idx],\n",
" device = device,\n",
" )\n",
" \n",
" # Evaluate the nerf model.\n",
" rendered_images_silhouettes, sampled_rays = renderer_mc(\n",
" cameras=batch_cameras, \n",
" volumetric_function=neural_radiance_field\n",
" )\n",
" rendered_images, rendered_silhouettes = (\n",
" rendered_images_silhouettes.split([3, 1], dim=-1)\n",
" )\n",
" \n",
" # Compute the silhouette error as the mean huber\n",
" # loss between the predicted masks and the\n",
" # sampled target silhouettes.\n",
" silhouettes_at_rays = sample_images_at_mc_locs(\n",
" target_silhouettes[batch_idx, ..., None], \n",
" sampled_rays.xys\n",
" )\n",
" sil_err = huber(\n",
" rendered_silhouettes, \n",
" silhouettes_at_rays,\n",
" ).abs().mean()\n",
"\n",
" # Compute the color error as the mean huber\n",
" # loss between the rendered colors and the\n",
" # sampled target images.\n",
" colors_at_rays = sample_images_at_mc_locs(\n",
" target_images[batch_idx], \n",
" sampled_rays.xys\n",
" )\n",
" color_err = huber(\n",
" rendered_images, \n",
" colors_at_rays,\n",
" ).abs().mean()\n",
" \n",
" # The optimization loss is a simple\n",
" # sum of the color and silhouette errors.\n",
" loss = color_err + sil_err\n",
" \n",
" # Log the loss history.\n",
" loss_history_color.append(float(color_err))\n",
" loss_history_sil.append(float(sil_err))\n",
" \n",
" # Every 10 iterations, print the current values of the losses.\n",
" if iteration % 10 == 0:\n",
" print(\n",
" f'Iteration {iteration:05d}:'\n",
" + f' loss color = {float(color_err):1.2e}'\n",
" + f' loss silhouette = {float(sil_err):1.2e}'\n",
" )\n",
" \n",
" # Take the optimization step.\n",
" loss.backward()\n",
" optimizer.step()\n",
" \n",
" # Visualize the full renders every 100 iterations.\n",
" if iteration % 100 == 0:\n",
" show_idx = torch.randperm(len(target_cameras))[:1]\n",
" show_full_render(\n",
" neural_radiance_field,\n",
" FoVPerspectiveCameras(\n",
" R = target_cameras.R[show_idx], \n",
" T = target_cameras.T[show_idx], \n",
" znear = target_cameras.znear[show_idx],\n",
" zfar = target_cameras.zfar[show_idx],\n",
" aspect_ratio = target_cameras.aspect_ratio[show_idx],\n",
" fov = target_cameras.fov[show_idx],\n",
" device = device,\n",
" ), \n",
" target_images[show_idx][0],\n",
" target_silhouettes[show_idx][0],\n",
" loss_history_color,\n",
" loss_history_sil,\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 5. Visualizing the optimized neural radiance field\n",
"\n",
"Finally, we visualize the neural radiance field by rendering from multiple viewpoints that rotate around the volume's y-axis."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def generate_rotating_nerf(neural_radiance_field, n_frames = 50):\n",
" logRs = torch.zeros(n_frames, 3, device=device)\n",
" logRs[:, 1] = torch.linspace(-3.14, 3.14, n_frames, device=device)\n",
" Rs = so3_exp_map(logRs)\n",
" Ts = torch.zeros(n_frames, 3, device=device)\n",
" Ts[:, 2] = 2.7\n",
" frames = []\n",
" print('Rendering rotating NeRF ...')\n",
" for R, T in zip(tqdm(Rs), Ts):\n",
" camera = FoVPerspectiveCameras(\n",
" R=R[None], \n",
" T=T[None], \n",
" znear=target_cameras.znear[0],\n",
" zfar=target_cameras.zfar[0],\n",
" aspect_ratio=target_cameras.aspect_ratio[0],\n",
" fov=target_cameras.fov[0],\n",
" device=device,\n",
" )\n",
" # Note that we again render with `NDCGridRaySampler`\n",
" # and the batched_forward function of neural_radiance_field.\n",
" frames.append(\n",
" renderer_grid(\n",
" cameras=camera, \n",
" volumetric_function=neural_radiance_field.batched_forward,\n",
" )[0][..., :3]\n",
" )\n",
" return torch.cat(frames)\n",
" \n",
"with torch.no_grad():\n",
" rotating_nerf_frames = generate_rotating_nerf(neural_radiance_field, n_frames=3*5)\n",
" \n",
"image_grid(rotating_nerf_frames.clamp(0., 1.).cpu().numpy(), rows=3, cols=5, rgb=True, fill=True)\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 6. Conclusion\n",
"\n",
"In this tutorial, we have shown how to optimize an implicit representation of a scene such that the renders of the scene from known viewpoints match the observed images for each viewpoint. The rendering was carried out using the PyTorch3D's implicit function renderer composed of either a `MonteCarloRaysampler` or `NDCGridRaysampler`, and an `EmissionAbsorptionRaymarcher`."
]
}
],
"metadata": {
"bento_stylesheets": {
"bento/extensions/flow/main.css": true,
"bento/extensions/kernel_selector/main.css": true,
"bento/extensions/kernel_ui/main.css": true,
"bento/extensions/new_kernel/main.css": true,
"bento/extensions/system_usage/main.css": true,
"bento/extensions/theme/main.css": true
},
"kernelspec": {
"display_name": "pytorch3d_etc (local)",
"language": "python",
"name": "pytorch3d_etc_local"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.5+"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -0,0 +1,789 @@
# coding: utf-8
# In[ ]:
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# # Fit a simple Neural Radiance Field via raymarching
#
# This tutorial shows how to fit Neural Radiance Field given a set of views of a scene using differentiable implicit function rendering.
#
# More specifically, this tutorial will explain how to:
# 1. Create a differentiable implicit function renderer with either image-grid or Monte Carlo ray sampling.
# 2. Create an Implicit model of a scene.
# 3. Fit the implicit function (Neural Radiance Field) based on input images using the differentiable implicit renderer.
# 4. Visualize the learnt implicit function.
#
# Note that the presented implicit model is a simplified version of NeRF:<br>
# _Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, Ren Ng: NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis, ECCV 2020._
#
# The simplifications include:
# * *Ray sampling*: This notebook does not perform stratified ray sampling but rather ray sampling at equidistant depths.
# * *Rendering*: We do a single rendering pass, as opposed to the original implementation that does a coarse and fine rendering pass.
# * *Architecture*: Our network is shallower which allows for faster optimization possibly at the cost of surface details.
# * *Mask loss*: Since our observations include segmentation masks, we also optimize a silhouette loss that forces rays to either get fully absorbed inside the volume, or to completely pass through it.
#
# ## 0. Install and Import modules
# Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell:
# In[ ]:
import os
import sys
import torch
need_pytorch3d=False
try:
import pytorch3d
except ModuleNotFoundError:
need_pytorch3d=True
if need_pytorch3d:
if torch.__version__.startswith("1.9") and sys.platform.startswith("linux"):
# We try to install PyTorch3D via a released wheel.
version_str="".join([
f"py3{sys.version_info.minor}_cu",
torch.version.cuda.replace(".",""),
f"_pyt{torch.__version__[0:5:2]}"
])
get_ipython().system('pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html')
else:
# We try to install PyTorch3D from source.
get_ipython().system('curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz')
get_ipython().system('tar xzf 1.10.0.tar.gz')
os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0"
get_ipython().system("pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'")
# In[ ]:
# %matplotlib inline
# %matplotlib notebook
import os
import sys
import time
import json
import glob
import torch
import math
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from IPython import display
from tqdm.notebook import tqdm
# Data structures and functions for rendering
from pytorch3d.structures import Volumes
from pytorch3d.transforms import so3_exp_map
from pytorch3d.renderer import (
FoVPerspectiveCameras,
NDCGridRaysampler,
MonteCarloRaysampler,
EmissionAbsorptionRaymarcher,
ImplicitRenderer,
RayBundle,
ray_bundle_to_ray_points,
)
# obtain the utilized device
if torch.cuda.is_available():
device = torch.device("cuda:0")
torch.cuda.set_device(device)
else:
print(
'Please note that NeRF is a resource-demanding method.'
+ ' Running this notebook on CPU will be extremely slow.'
+ ' We recommend running the example on a GPU'
+ ' with at least 10 GB of memory.'
)
device = torch.device("cpu")
# In[ ]:
get_ipython().system('wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/plot_image_grid.py')
get_ipython().system('wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/generate_cow_renders.py')
from plot_image_grid import image_grid
from generate_cow_renders import generate_cow_renders
# OR if running locally uncomment and run the following cell:
# In[ ]:
# from utils.generate_cow_renders import generate_cow_renders
# from utils import image_grid
# ## 1. Generate images of the scene and masks
#
# The following cell generates our training data.
# It renders the cow mesh from the `fit_textured_mesh.ipynb` tutorial from several viewpoints and returns:
# 1. A batch of image and silhouette tensors that are produced by the cow mesh renderer.
# 2. A set of cameras corresponding to each render.
#
# Note: For the purpose of this tutorial, which aims at explaining the details of implicit rendering, we do not explain how the mesh rendering, implemented in the `generate_cow_renders` function, works. Please refer to `fit_textured_mesh.ipynb` for a detailed explanation of mesh rendering.
# In[ ]:
target_cameras, target_images, target_silhouettes = generate_cow_renders(num_views=40, azimuth_range=180)
print(f'Generated {len(target_images)} images/silhouettes/cameras.')
# ## 2. Initialize the implicit renderer
#
# The following initializes an implicit renderer that emits a ray from each pixel of a target image and samples a set of uniformly-spaced points along the ray. At each ray-point, the corresponding density and color value is obtained by querying the corresponding location in the neural model of the scene (the model is described & instantiated in a later cell).
#
# The renderer is composed of a *raymarcher* and a *raysampler*.
# - The *raysampler* is responsible for emitting rays from image pixels and sampling the points along them. Here, we use two different raysamplers:
# - `MonteCarloRaysampler` is used to generate rays from a random subset of pixels of the image plane. The random subsampling of pixels is carried out during **training** to decrease the memory consumption of the implicit model.
# - `NDCGridRaysampler` which follows the standard PyTorch3D coordinate grid convention (+X from right to left; +Y from bottom to top; +Z away from the user). In combination with the implicit model of the scene, `NDCGridRaysampler` consumes a large amount of memory and, hence, is only used for visualizing the results of the training at **test** time.
# - The *raymarcher* takes the densities and colors sampled along each ray and renders each ray into a color and an opacity value of the ray's source pixel. Here we use the `EmissionAbsorptionRaymarcher` which implements the standard Emission-Absorption raymarching algorithm.
# In[ ]:
# render_size describes the size of both sides of the
# rendered images in pixels. Since an advantage of
# Neural Radiance Fields are high quality renders
# with a significant amount of details, we render
# the implicit function at double the size of
# target images.
render_size = target_images.shape[1] * 2
# Our rendered scene is centered around (0,0,0)
# and is enclosed inside a bounding box
# whose side is roughly equal to 3.0 (world units).
volume_extent_world = 3.0
# 1) Instantiate the raysamplers.
# Here, NDCGridRaysampler generates a rectangular image
# grid of rays whose coordinates follow the PyTorch3D
# coordinate conventions.
raysampler_grid = NDCGridRaysampler(
image_height=render_size,
image_width=render_size,
n_pts_per_ray=128,
min_depth=0.1,
max_depth=volume_extent_world,
)
# MonteCarloRaysampler generates a random subset
# of `n_rays_per_image` rays emitted from the image plane.
raysampler_mc = MonteCarloRaysampler(
min_x = -1.0,
max_x = 1.0,
min_y = -1.0,
max_y = 1.0,
n_rays_per_image=750,
n_pts_per_ray=128,
min_depth=0.1,
max_depth=volume_extent_world,
)
# 2) Instantiate the raymarcher.
# Here, we use the standard EmissionAbsorptionRaymarcher
# which marches along each ray in order to render
# the ray into a single 3D color vector
# and an opacity scalar.
raymarcher = EmissionAbsorptionRaymarcher()
# Finally, instantiate the implicit renders
# for both raysamplers.
renderer_grid = ImplicitRenderer(
raysampler=raysampler_grid, raymarcher=raymarcher,
)
renderer_mc = ImplicitRenderer(
raysampler=raysampler_mc, raymarcher=raymarcher,
)
# ## 3. Define the neural radiance field model
#
# In this cell we define the `NeuralRadianceField` module, which specifies a continuous field of colors and opacities over the 3D domain of the scene.
#
# The `forward` function of `NeuralRadianceField` (NeRF) receives as input a set of tensors that parametrize a bundle of rendering rays. The ray bundle is later converted to 3D ray points in the world coordinates of the scene. Each 3D point is then mapped to a harmonic representation using the `HarmonicEmbedding` layer (defined in the next cell). The harmonic embeddings then enter the _color_ and _opacity_ branches of the NeRF model in order to label each ray point with a 3D vector and a 1D scalar ranging in [0-1] which define the point's RGB color and opacity respectively.
#
# Since NeRF has a large memory footprint, we also implement the `NeuralRadianceField.forward_batched` method. The method splits the input rays into batches and executes the `forward` function for each batch separately in a for loop. This lets us render a large set of rays without running out of GPU memory. Standardly, `forward_batched` would be used to render rays emitted from all pixels of an image in order to produce a full-sized render of a scene.
#
# In[ ]:
class HarmonicEmbedding(torch.nn.Module):
def __init__(self, n_harmonic_functions=60, omega0=0.1):
"""
Given an input tensor `x` of shape [minibatch, ... , dim],
the harmonic embedding layer converts each feature
in `x` into a series of harmonic features `embedding`
as follows:
embedding[..., i*dim:(i+1)*dim] = [
sin(x[..., i]),
sin(2*x[..., i]),
sin(4*x[..., i]),
...
sin(2**(self.n_harmonic_functions-1) * x[..., i]),
cos(x[..., i]),
cos(2*x[..., i]),
cos(4*x[..., i]),
...
cos(2**(self.n_harmonic_functions-1) * x[..., i])
]
Note that `x` is also premultiplied by `omega0` before
evaluating the harmonic functions.
"""
super().__init__()
self.register_buffer(
'frequencies',
omega0 * (2.0 ** torch.arange(n_harmonic_functions)),
)
def forward(self, x):
"""
Args:
x: tensor of shape [..., dim]
Returns:
embedding: a harmonic embedding of `x`
of shape [..., n_harmonic_functions * dim * 2]
"""
embed = (x[..., None] * self.frequencies).view(*x.shape[:-1], -1)
return torch.cat((embed.sin(), embed.cos()), dim=-1)
class NeuralRadianceField(torch.nn.Module):
def __init__(self, n_harmonic_functions=60, n_hidden_neurons=256):
super().__init__()
"""
Args:
n_harmonic_functions: The number of harmonic functions
used to form the harmonic embedding of each point.
n_hidden_neurons: The number of hidden units in the
fully connected layers of the MLPs of the model.
"""
# The harmonic embedding layer converts input 3D coordinates
# to a representation that is more suitable for
# processing with a deep neural network.
self.harmonic_embedding = HarmonicEmbedding(n_harmonic_functions)
# The dimension of the harmonic embedding.
embedding_dim = n_harmonic_functions * 2 * 3
# self.mlp is a simple 2-layer multi-layer perceptron
# which converts the input per-point harmonic embeddings
# to a latent representation.
# Not that we use Softplus activations instead of ReLU.
self.mlp = torch.nn.Sequential(
torch.nn.Linear(embedding_dim, n_hidden_neurons),
torch.nn.Softplus(beta=10.0),
torch.nn.Linear(n_hidden_neurons, n_hidden_neurons),
torch.nn.Softplus(beta=10.0),
)
# Given features predicted by self.mlp, self.color_layer
# is responsible for predicting a 3-D per-point vector
# that represents the RGB color of the point.
self.color_layer = torch.nn.Sequential(
torch.nn.Linear(n_hidden_neurons + embedding_dim, n_hidden_neurons),
torch.nn.Softplus(beta=10.0),
torch.nn.Linear(n_hidden_neurons, 3),
torch.nn.Sigmoid(),
# To ensure that the colors correctly range between [0-1],
# the layer is terminated with a sigmoid layer.
)
# The density layer converts the features of self.mlp
# to a 1D density value representing the raw opacity
# of each point.
self.density_layer = torch.nn.Sequential(
torch.nn.Linear(n_hidden_neurons, 1),
torch.nn.Softplus(beta=10.0),
# Sofplus activation ensures that the raw opacity
# is a non-negative number.
)
# We set the bias of the density layer to -1.5
# in order to initialize the opacities of the
# ray points to values close to 0.
# This is a crucial detail for ensuring convergence
# of the model.
self.density_layer[0].bias.data[0] = -1.5
def _get_densities(self, features):
"""
This function takes `features` predicted by `self.mlp`
and converts them to `raw_densities` with `self.density_layer`.
`raw_densities` are later mapped to [0-1] range with
1 - inverse exponential of `raw_densities`.
"""
raw_densities = self.density_layer(features)
return 1 - (-raw_densities).exp()
def _get_colors(self, features, rays_directions):
"""
This function takes per-point `features` predicted by `self.mlp`
and evaluates the color model in order to attach to each
point a 3D vector of its RGB color.
In order to represent viewpoint dependent effects,
before evaluating `self.color_layer`, `NeuralRadianceField`
concatenates to the `features` a harmonic embedding
of `ray_directions`, which are per-point directions
of point rays expressed as 3D l2-normalized vectors
in world coordinates.
"""
spatial_size = features.shape[:-1]
# Normalize the ray_directions to unit l2 norm.
rays_directions_normed = torch.nn.functional.normalize(
rays_directions, dim=-1
)
# Obtain the harmonic embedding of the normalized ray directions.
rays_embedding = self.harmonic_embedding(
rays_directions_normed
)
# Expand the ray directions tensor so that its spatial size
# is equal to the size of features.
rays_embedding_expand = rays_embedding[..., None, :].expand(
*spatial_size, rays_embedding.shape[-1]
)
# Concatenate ray direction embeddings with
# features and evaluate the color model.
color_layer_input = torch.cat(
(features, rays_embedding_expand),
dim=-1
)
return self.color_layer(color_layer_input)
def forward(
self,
ray_bundle: RayBundle,
**kwargs,
):
"""
The forward function accepts the parametrizations of
3D points sampled along projection rays. The forward
pass is responsible for attaching a 3D vector
and a 1D scalar representing the point's
RGB color and opacity respectively.
Args:
ray_bundle: A RayBundle object containing the following variables:
origins: A tensor of shape `(minibatch, ..., 3)` denoting the
origins of the sampling rays in world coords.
directions: A tensor of shape `(minibatch, ..., 3)`
containing the direction vectors of sampling rays in world coords.
lengths: A tensor of shape `(minibatch, ..., num_points_per_ray)`
containing the lengths at which the rays are sampled.
Returns:
rays_densities: A tensor of shape `(minibatch, ..., num_points_per_ray, 1)`
denoting the opacity of each ray point.
rays_colors: A tensor of shape `(minibatch, ..., num_points_per_ray, 3)`
denoting the color of each ray point.
"""
# We first convert the ray parametrizations to world
# coordinates with `ray_bundle_to_ray_points`.
rays_points_world = ray_bundle_to_ray_points(ray_bundle)
# rays_points_world.shape = [minibatch x ... x 3]
# For each 3D world coordinate, we obtain its harmonic embedding.
embeds = self.harmonic_embedding(
rays_points_world
)
# embeds.shape = [minibatch x ... x self.n_harmonic_functions*6]
# self.mlp maps each harmonic embedding to a latent feature space.
features = self.mlp(embeds)
# features.shape = [minibatch x ... x n_hidden_neurons]
# Finally, given the per-point features,
# execute the density and color branches.
rays_densities = self._get_densities(features)
# rays_densities.shape = [minibatch x ... x 1]
rays_colors = self._get_colors(features, ray_bundle.directions)
# rays_colors.shape = [minibatch x ... x 3]
return rays_densities, rays_colors
def batched_forward(
self,
ray_bundle: RayBundle,
n_batches: int = 16,
**kwargs,
):
"""
This function is used to allow for memory efficient processing
of input rays. The input rays are first split to `n_batches`
chunks and passed through the `self.forward` function one at a time
in a for loop. Combined with disabling PyTorch gradient caching
(`torch.no_grad()`), this allows for rendering large batches
of rays that do not all fit into GPU memory in a single forward pass.
In our case, batched_forward is used to export a fully-sized render
of the radiance field for visualization purposes.
Args:
ray_bundle: A RayBundle object containing the following variables:
origins: A tensor of shape `(minibatch, ..., 3)` denoting the
origins of the sampling rays in world coords.
directions: A tensor of shape `(minibatch, ..., 3)`
containing the direction vectors of sampling rays in world coords.
lengths: A tensor of shape `(minibatch, ..., num_points_per_ray)`
containing the lengths at which the rays are sampled.
n_batches: Specifies the number of batches the input rays are split into.
The larger the number of batches, the smaller the memory footprint
and the lower the processing speed.
Returns:
rays_densities: A tensor of shape `(minibatch, ..., num_points_per_ray, 1)`
denoting the opacity of each ray point.
rays_colors: A tensor of shape `(minibatch, ..., num_points_per_ray, 3)`
denoting the color of each ray point.
"""
# Parse out shapes needed for tensor reshaping in this function.
n_pts_per_ray = ray_bundle.lengths.shape[-1]
spatial_size = [*ray_bundle.origins.shape[:-1], n_pts_per_ray]
# Split the rays to `n_batches` batches.
tot_samples = ray_bundle.origins.shape[:-1].numel()
batches = torch.chunk(torch.arange(tot_samples), n_batches)
# For each batch, execute the standard forward pass.
batch_outputs = [
self.forward(
RayBundle(
origins=ray_bundle.origins.view(-1, 3)[batch_idx],
directions=ray_bundle.directions.view(-1, 3)[batch_idx],
lengths=ray_bundle.lengths.view(-1, n_pts_per_ray)[batch_idx],
xys=None,
)
) for batch_idx in batches
]
# Concatenate the per-batch rays_densities and rays_colors
# and reshape according to the sizes of the inputs.
rays_densities, rays_colors = [
torch.cat(
[batch_output[output_i] for batch_output in batch_outputs], dim=0
).view(*spatial_size, -1) for output_i in (0, 1)
]
return rays_densities, rays_colors
# ## 4. Helper functions
#
# In this function we define functions that help with the Neural Radiance Field optimization.
# In[ ]:
def huber(x, y, scaling=0.1):
"""
A helper function for evaluating the smooth L1 (huber) loss
between the rendered silhouettes and colors.
"""
diff_sq = (x - y) ** 2
loss = ((1 + diff_sq / (scaling**2)).clamp(1e-4).sqrt() - 1) * float(scaling)
return loss
def sample_images_at_mc_locs(target_images, sampled_rays_xy):
"""
Given a set of Monte Carlo pixel locations `sampled_rays_xy`,
this method samples the tensor `target_images` at the
respective 2D locations.
This function is used in order to extract the colors from
ground truth images that correspond to the colors
rendered using `MonteCarloRaysampler`.
"""
ba = target_images.shape[0]
dim = target_images.shape[-1]
spatial_size = sampled_rays_xy.shape[1:-1]
# In order to sample target_images, we utilize
# the grid_sample function which implements a
# bilinear image sampler.
# Note that we have to invert the sign of the
# sampled ray positions to convert the NDC xy locations
# of the MonteCarloRaysampler to the coordinate
# convention of grid_sample.
images_sampled = torch.nn.functional.grid_sample(
target_images.permute(0, 3, 1, 2),
-sampled_rays_xy.view(ba, -1, 1, 2), # note the sign inversion
align_corners=True
)
return images_sampled.permute(0, 2, 3, 1).view(
ba, *spatial_size, dim
)
def show_full_render(
neural_radiance_field, camera,
target_image, target_silhouette,
loss_history_color, loss_history_sil,
):
"""
This is a helper function for visualizing the
intermediate results of the learning.
Since the `NeuralRadianceField` suffers from
a large memory footprint, which does not let us
render the full image grid in a single forward pass,
we utilize the `NeuralRadianceField.batched_forward`
function in combination with disabling the gradient caching.
This chunks the set of emitted rays to batches and
evaluates the implicit function on one batch at a time
to prevent GPU memory overflow.
"""
# Prevent gradient caching.
with torch.no_grad():
# Render using the grid renderer and the
# batched_forward function of neural_radiance_field.
rendered_image_silhouette, _ = renderer_grid(
cameras=camera,
volumetric_function=neural_radiance_field.batched_forward
)
# Split the rendering result to a silhouette render
# and the image render.
rendered_image, rendered_silhouette = (
rendered_image_silhouette[0].split([3, 1], dim=-1)
)
# Generate plots.
fig, ax = plt.subplots(2, 3, figsize=(15, 10))
ax = ax.ravel()
clamp_and_detach = lambda x: x.clamp(0.0, 1.0).cpu().detach().numpy()
ax[0].plot(list(range(len(loss_history_color))), loss_history_color, linewidth=1)
ax[1].imshow(clamp_and_detach(rendered_image))
ax[2].imshow(clamp_and_detach(rendered_silhouette[..., 0]))
ax[3].plot(list(range(len(loss_history_sil))), loss_history_sil, linewidth=1)
ax[4].imshow(clamp_and_detach(target_image))
ax[5].imshow(clamp_and_detach(target_silhouette))
for ax_, title_ in zip(
ax,
(
"loss color", "rendered image", "rendered silhouette",
"loss silhouette", "target image", "target silhouette",
)
):
if not title_.startswith('loss'):
ax_.grid("off")
ax_.axis("off")
ax_.set_title(title_)
fig.canvas.draw(); fig.show()
display.clear_output(wait=True)
display.display(fig)
return fig
# ## 5. Fit the radiance field
#
# Here we carry out the radiance field fitting with differentiable rendering.
#
# In order to fit the radiance field, we render it from the viewpoints of the `target_cameras`
# and compare the resulting renders with the observed `target_images` and `target_silhouettes`.
#
# The comparison is done by evaluating the mean huber (smooth-l1) error between corresponding
# pairs of `target_images`/`rendered_images` and `target_silhouettes`/`rendered_silhouettes`.
#
# Since we use the `MonteCarloRaysampler`, the outputs of the training renderer `renderer_mc`
# are colors of pixels that are randomly sampled from the image plane, not a lattice of pixels forming
# a valid image. Thus, in order to compare the rendered colors with the ground truth, we
# utilize the random MonteCarlo pixel locations to sample the ground truth images/silhouettes
# `target_silhouettes`/`rendered_silhouettes` at the xy locations corresponding to the render
# locations. This is done with the helper function `sample_images_at_mc_locs`, which is
# described in the previous cell.
# In[ ]:
# First move all relevant variables to the correct device.
renderer_grid = renderer_grid.to(device)
renderer_mc = renderer_mc.to(device)
target_cameras = target_cameras.to(device)
target_images = target_images.to(device)
target_silhouettes = target_silhouettes.to(device)
# Set the seed for reproducibility
torch.manual_seed(1)
# Instantiate the radiance field model.
neural_radiance_field = NeuralRadianceField().to(device)
# Instantiate the Adam optimizer. We set its master learning rate to 1e-3.
lr = 1e-3
optimizer = torch.optim.Adam(neural_radiance_field.parameters(), lr=lr)
# We sample 6 random cameras in a minibatch. Each camera
# emits raysampler_mc.n_pts_per_image rays.
batch_size = 6
# 3000 iterations take ~20 min on a Tesla M40 and lead to
# reasonably sharp results. However, for the best possible
# results, we recommend setting n_iter=20000.
n_iter = 3000
# Init the loss history buffers.
loss_history_color, loss_history_sil = [], []
# The main optimization loop.
for iteration in range(n_iter):
# In case we reached the last 75% of iterations,
# decrease the learning rate of the optimizer 10-fold.
if iteration == round(n_iter * 0.75):
print('Decreasing LR 10-fold ...')
optimizer = torch.optim.Adam(
neural_radiance_field.parameters(), lr=lr * 0.1
)
# Zero the optimizer gradient.
optimizer.zero_grad()
# Sample random batch indices.
batch_idx = torch.randperm(len(target_cameras))[:batch_size]
# Sample the minibatch of cameras.
batch_cameras = FoVPerspectiveCameras(
R = target_cameras.R[batch_idx],
T = target_cameras.T[batch_idx],
znear = target_cameras.znear[batch_idx],
zfar = target_cameras.zfar[batch_idx],
aspect_ratio = target_cameras.aspect_ratio[batch_idx],
fov = target_cameras.fov[batch_idx],
device = device,
)
# Evaluate the nerf model.
rendered_images_silhouettes, sampled_rays = renderer_mc(
cameras=batch_cameras,
volumetric_function=neural_radiance_field
)
rendered_images, rendered_silhouettes = (
rendered_images_silhouettes.split([3, 1], dim=-1)
)
# Compute the silhouette error as the mean huber
# loss between the predicted masks and the
# sampled target silhouettes.
silhouettes_at_rays = sample_images_at_mc_locs(
target_silhouettes[batch_idx, ..., None],
sampled_rays.xys
)
sil_err = huber(
rendered_silhouettes,
silhouettes_at_rays,
).abs().mean()
# Compute the color error as the mean huber
# loss between the rendered colors and the
# sampled target images.
colors_at_rays = sample_images_at_mc_locs(
target_images[batch_idx],
sampled_rays.xys
)
color_err = huber(
rendered_images,
colors_at_rays,
).abs().mean()
# The optimization loss is a simple
# sum of the color and silhouette errors.
loss = color_err + sil_err
# Log the loss history.
loss_history_color.append(float(color_err))
loss_history_sil.append(float(sil_err))
# Every 10 iterations, print the current values of the losses.
if iteration % 10 == 0:
print(
f'Iteration {iteration:05d}:'
+ f' loss color = {float(color_err):1.2e}'
+ f' loss silhouette = {float(sil_err):1.2e}'
)
# Take the optimization step.
loss.backward()
optimizer.step()
# Visualize the full renders every 100 iterations.
if iteration % 100 == 0:
show_idx = torch.randperm(len(target_cameras))[:1]
show_full_render(
neural_radiance_field,
FoVPerspectiveCameras(
R = target_cameras.R[show_idx],
T = target_cameras.T[show_idx],
znear = target_cameras.znear[show_idx],
zfar = target_cameras.zfar[show_idx],
aspect_ratio = target_cameras.aspect_ratio[show_idx],
fov = target_cameras.fov[show_idx],
device = device,
),
target_images[show_idx][0],
target_silhouettes[show_idx][0],
loss_history_color,
loss_history_sil,
)
# ## 5. Visualizing the optimized neural radiance field
#
# Finally, we visualize the neural radiance field by rendering from multiple viewpoints that rotate around the volume's y-axis.
# In[ ]:
def generate_rotating_nerf(neural_radiance_field, n_frames = 50):
logRs = torch.zeros(n_frames, 3, device=device)
logRs[:, 1] = torch.linspace(-3.14, 3.14, n_frames, device=device)
Rs = so3_exp_map(logRs)
Ts = torch.zeros(n_frames, 3, device=device)
Ts[:, 2] = 2.7
frames = []
print('Rendering rotating NeRF ...')
for R, T in zip(tqdm(Rs), Ts):
camera = FoVPerspectiveCameras(
R=R[None],
T=T[None],
znear=target_cameras.znear[0],
zfar=target_cameras.zfar[0],
aspect_ratio=target_cameras.aspect_ratio[0],
fov=target_cameras.fov[0],
device=device,
)
# Note that we again render with `NDCGridRaySampler`
# and the batched_forward function of neural_radiance_field.
frames.append(
renderer_grid(
cameras=camera,
volumetric_function=neural_radiance_field.batched_forward,
)[0][..., :3]
)
return torch.cat(frames)
with torch.no_grad():
rotating_nerf_frames = generate_rotating_nerf(neural_radiance_field, n_frames=3*5)
image_grid(rotating_nerf_frames.clamp(0., 1.).cpu().numpy(), rows=3, cols=5, rgb=True, fill=True)
plt.show()
# ## 6. Conclusion
#
# In this tutorial, we have shown how to optimize an implicit representation of a scene such that the renders of the scene from known viewpoints match the observed images for each viewpoint. The rendering was carried out using the PyTorch3D's implicit function renderer composed of either a `MonteCarloRaysampler` or `NDCGridRaysampler`, and an `EmissionAbsorptionRaymarcher`.

View File

@ -0,0 +1,496 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Fit a volume via raymarching\n",
"\n",
"This tutorial shows how to fit a volume given a set of views of a scene using differentiable volumetric rendering.\n",
"\n",
"More specifically, this tutorial will explain how to:\n",
"1. Create a differentiable volumetric renderer.\n",
"2. Create a Volumetric model (including how to use the `Volumes` class).\n",
"3. Fit the volume based on the images using the differentiable volumetric renderer. \n",
"4. Visualize the predicted volume."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 0. Install and Import modules\n",
"Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import sys\n",
"import torch\n",
"need_pytorch3d=False\n",
"try:\n",
" import pytorch3d\n",
"except ModuleNotFoundError:\n",
" need_pytorch3d=True\n",
"if need_pytorch3d:\n",
" if torch.__version__.startswith(\"1.9\") and sys.platform.startswith(\"linux\"):\n",
" # We try to install PyTorch3D via a released wheel.\n",
" version_str=\"\".join([\n",
" f\"py3{sys.version_info.minor}_cu\",\n",
" torch.version.cuda.replace(\".\",\"\"),\n",
" f\"_pyt{torch.__version__[0:5:2]}\"\n",
" ])\n",
" !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
" else:\n",
" # We try to install PyTorch3D from source.\n",
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
" !tar xzf 1.10.0.tar.gz\n",
" os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n",
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import sys\n",
"import time\n",
"import json\n",
"import glob\n",
"import torch\n",
"import math\n",
"from tqdm.notebook import tqdm\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"from PIL import Image\n",
"from IPython import display\n",
"\n",
"# Data structures and functions for rendering\n",
"from pytorch3d.structures import Volumes\n",
"from pytorch3d.renderer import (\n",
" FoVPerspectiveCameras, \n",
" VolumeRenderer,\n",
" NDCGridRaysampler,\n",
" EmissionAbsorptionRaymarcher\n",
")\n",
"from pytorch3d.transforms import so3_exp_map\n",
"\n",
"# obtain the utilized device\n",
"if torch.cuda.is_available():\n",
" device = torch.device(\"cuda:0\")\n",
" torch.cuda.set_device(device)\n",
"else:\n",
" device = torch.device(\"cpu\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/plot_image_grid.py\n",
"!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/generate_cow_renders.py\n",
"from plot_image_grid import image_grid\n",
"from generate_cow_renders import generate_cow_renders"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"OR if running locally uncomment and run the following cell:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# from utils.generate_cow_renders import generate_cow_renders\n",
"# from utils import image_grid"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 1. Generate images of the scene and masks\n",
"\n",
"The following cell generates our training data.\n",
"It renders the cow mesh from the `fit_textured_mesh.ipynb` tutorial from several viewpoints and returns:\n",
"1. A batch of image and silhouette tensors that are produced by the cow mesh renderer.\n",
"2. A set of cameras corresponding to each render.\n",
"\n",
"Note: For the purpose of this tutorial, which aims at explaining the details of volumetric rendering, we do not explain how the mesh rendering, implemented in the `generate_cow_renders` function, works. Please refer to `fit_textured_mesh.ipynb` for a detailed explanation of mesh rendering."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"target_cameras, target_images, target_silhouettes = generate_cow_renders(num_views=40)\n",
"print(f'Generated {len(target_images)} images/silhouettes/cameras.')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 2. Initialize the volumetric renderer\n",
"\n",
"The following initializes a volumetric renderer that emits a ray from each pixel of a target image and samples a set of uniformly-spaced points along the ray. At each ray-point, the corresponding density and color value is obtained by querying the corresponding location in the volumetric model of the scene (the model is described & instantiated in a later cell).\n",
"\n",
"The renderer is composed of a *raymarcher* and a *raysampler*.\n",
"- The *raysampler* is responsible for emitting rays from image pixels and sampling the points along them. Here, we use the `NDCGridRaysampler` which follows the standard PyTorch3D coordinate grid convention (+X from right to left; +Y from bottom to top; +Z away from the user).\n",
"- The *raymarcher* takes the densities and colors sampled along each ray and renders each ray into a color and an opacity value of the ray's source pixel. Here we use the `EmissionAbsorptionRaymarcher` which implements the standard Emission-Absorption raymarching algorithm."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# render_size describes the size of both sides of the \n",
"# rendered images in pixels. We set this to the same size\n",
"# as the target images. I.e. we render at the same\n",
"# size as the ground truth images.\n",
"render_size = target_images.shape[1]\n",
"\n",
"# Our rendered scene is centered around (0,0,0) \n",
"# and is enclosed inside a bounding box\n",
"# whose side is roughly equal to 3.0 (world units).\n",
"volume_extent_world = 3.0\n",
"\n",
"# 1) Instantiate the raysampler.\n",
"# Here, NDCGridRaysampler generates a rectangular image\n",
"# grid of rays whose coordinates follow the PyTorch3D\n",
"# coordinate conventions.\n",
"# Since we use a volume of size 128^3, we sample n_pts_per_ray=150,\n",
"# which roughly corresponds to a one ray-point per voxel.\n",
"# We further set the min_depth=0.1 since there is no surface within\n",
"# 0.1 units of any camera plane.\n",
"raysampler = NDCGridRaysampler(\n",
" image_width=render_size,\n",
" image_height=render_size,\n",
" n_pts_per_ray=150,\n",
" min_depth=0.1,\n",
" max_depth=volume_extent_world,\n",
")\n",
"\n",
"\n",
"# 2) Instantiate the raymarcher.\n",
"# Here, we use the standard EmissionAbsorptionRaymarcher \n",
"# which marches along each ray in order to render\n",
"# each ray into a single 3D color vector \n",
"# and an opacity scalar.\n",
"raymarcher = EmissionAbsorptionRaymarcher()\n",
"\n",
"# Finally, instantiate the volumetric render\n",
"# with the raysampler and raymarcher objects.\n",
"renderer = VolumeRenderer(\n",
" raysampler=raysampler, raymarcher=raymarcher,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 3. Initialize the volumetric model\n",
"\n",
"Next we instantiate a volumetric model of the scene. This quantizes the 3D space to cubical voxels, where each voxel is described with a 3D vector representing the voxel's RGB color and a density scalar which describes the opacity of the voxel (ranging between [0-1], the higher the more opaque).\n",
"\n",
"In order to ensure the range of densities and colors is between [0-1], we represent both volume colors and densities in the logarithmic space. During the forward function of the model, the log-space values are passed through the sigmoid function to bring the log-space values to the correct range.\n",
"\n",
"Additionally, `VolumeModel` contains the renderer object. This object stays unaltered throughout the optimization.\n",
"\n",
"In this cell we also define the `huber` loss function which computes the discrepancy between the rendered colors and masks."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"class VolumeModel(torch.nn.Module):\n",
" def __init__(self, renderer, volume_size=[64] * 3, voxel_size=0.1):\n",
" super().__init__()\n",
" # After evaluating torch.sigmoid(self.log_colors), we get \n",
" # densities close to zero.\n",
" self.log_densities = torch.nn.Parameter(-4.0 * torch.ones(1, *volume_size))\n",
" # After evaluating torch.sigmoid(self.log_colors), we get \n",
" # a neutral gray color everywhere.\n",
" self.log_colors = torch.nn.Parameter(torch.zeros(3, *volume_size))\n",
" self._voxel_size = voxel_size\n",
" # Store the renderer module as well.\n",
" self._renderer = renderer\n",
" \n",
" def forward(self, cameras):\n",
" batch_size = cameras.R.shape[0]\n",
"\n",
" # Convert the log-space values to the densities/colors\n",
" densities = torch.sigmoid(self.log_densities)\n",
" colors = torch.sigmoid(self.log_colors)\n",
" \n",
" # Instantiate the Volumes object, making sure\n",
" # the densities and colors are correctly\n",
" # expanded batch_size-times.\n",
" volumes = Volumes(\n",
" densities = densities[None].expand(\n",
" batch_size, *self.log_densities.shape),\n",
" features = colors[None].expand(\n",
" batch_size, *self.log_colors.shape),\n",
" voxel_size=self._voxel_size,\n",
" )\n",
" \n",
" # Given cameras and volumes, run the renderer\n",
" # and return only the first output value \n",
" # (the 2nd output is a representation of the sampled\n",
" # rays which can be omitted for our purpose).\n",
" return self._renderer(cameras=cameras, volumes=volumes)[0]\n",
" \n",
"# A helper function for evaluating the smooth L1 (huber) loss\n",
"# between the rendered silhouettes and colors.\n",
"def huber(x, y, scaling=0.1):\n",
" diff_sq = (x - y) ** 2\n",
" loss = ((1 + diff_sq / (scaling**2)).clamp(1e-4).sqrt() - 1) * float(scaling)\n",
" return loss"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 4. Fit the volume\n",
"\n",
"Here we carry out the volume fitting with differentiable rendering.\n",
"\n",
"In order to fit the volume, we render it from the viewpoints of the `target_cameras`\n",
"and compare the resulting renders with the observed `target_images` and `target_silhouettes`.\n",
"\n",
"The comparison is done by evaluating the mean huber (smooth-l1) error between corresponding\n",
"pairs of `target_images`/`rendered_images` and `target_silhouettes`/`rendered_silhouettes`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# First move all relevant variables to the correct device.\n",
"target_cameras = target_cameras.to(device)\n",
"target_images = target_images.to(device)\n",
"target_silhouettes = target_silhouettes.to(device)\n",
"\n",
"# Instantiate the volumetric model.\n",
"# We use a cubical volume with the size of \n",
"# one side = 128. The size of each voxel of the volume \n",
"# is set to volume_extent_world / volume_size s.t. the\n",
"# volume represents the space enclosed in a 3D bounding box\n",
"# centered at (0, 0, 0) with the size of each side equal to 3.\n",
"volume_size = 128\n",
"volume_model = VolumeModel(\n",
" renderer,\n",
" volume_size=[volume_size] * 3, \n",
" voxel_size = volume_extent_world / volume_size,\n",
").to(device)\n",
"\n",
"# Instantiate the Adam optimizer. We set its master learning rate to 0.1.\n",
"lr = 0.1\n",
"optimizer = torch.optim.Adam(volume_model.parameters(), lr=lr)\n",
"\n",
"# We do 300 Adam iterations and sample 10 random images in each minibatch.\n",
"batch_size = 10\n",
"n_iter = 300\n",
"for iteration in range(n_iter):\n",
"\n",
" # In case we reached the last 75% of iterations,\n",
" # decrease the learning rate of the optimizer 10-fold.\n",
" if iteration == round(n_iter * 0.75):\n",
" print('Decreasing LR 10-fold ...')\n",
" optimizer = torch.optim.Adam(\n",
" volume_model.parameters(), lr=lr * 0.1\n",
" )\n",
" \n",
" # Zero the optimizer gradient.\n",
" optimizer.zero_grad()\n",
" \n",
" # Sample random batch indices.\n",
" batch_idx = torch.randperm(len(target_cameras))[:batch_size]\n",
" \n",
" # Sample the minibatch of cameras.\n",
" batch_cameras = FoVPerspectiveCameras(\n",
" R = target_cameras.R[batch_idx], \n",
" T = target_cameras.T[batch_idx], \n",
" znear = target_cameras.znear[batch_idx],\n",
" zfar = target_cameras.zfar[batch_idx],\n",
" aspect_ratio = target_cameras.aspect_ratio[batch_idx],\n",
" fov = target_cameras.fov[batch_idx],\n",
" device = device,\n",
" )\n",
" \n",
" # Evaluate the volumetric model.\n",
" rendered_images, rendered_silhouettes = volume_model(\n",
" batch_cameras\n",
" ).split([3, 1], dim=-1)\n",
" \n",
" # Compute the silhouette error as the mean huber\n",
" # loss between the predicted masks and the\n",
" # target silhouettes.\n",
" sil_err = huber(\n",
" rendered_silhouettes[..., 0], target_silhouettes[batch_idx],\n",
" ).abs().mean()\n",
"\n",
" # Compute the color error as the mean huber\n",
" # loss between the rendered colors and the\n",
" # target ground truth images.\n",
" color_err = huber(\n",
" rendered_images, target_images[batch_idx],\n",
" ).abs().mean()\n",
" \n",
" # The optimization loss is a simple\n",
" # sum of the color and silhouette errors.\n",
" loss = color_err + sil_err \n",
" \n",
" # Print the current values of the losses.\n",
" if iteration % 10 == 0:\n",
" print(\n",
" f'Iteration {iteration:05d}:'\n",
" + f' color_err = {float(color_err):1.2e}'\n",
" + f' mask_err = {float(sil_err):1.2e}'\n",
" )\n",
" \n",
" # Take the optimization step.\n",
" loss.backward()\n",
" optimizer.step()\n",
" \n",
" # Visualize the renders every 40 iterations.\n",
" if iteration % 40 == 0:\n",
" # Visualize only a single randomly selected element of the batch.\n",
" im_show_idx = int(torch.randint(low=0, high=batch_size, size=(1,)))\n",
" fig, ax = plt.subplots(2, 2, figsize=(10, 10))\n",
" ax = ax.ravel()\n",
" clamp_and_detach = lambda x: x.clamp(0.0, 1.0).cpu().detach().numpy()\n",
" ax[0].imshow(clamp_and_detach(rendered_images[im_show_idx]))\n",
" ax[1].imshow(clamp_and_detach(target_images[batch_idx[im_show_idx], ..., :3]))\n",
" ax[2].imshow(clamp_and_detach(rendered_silhouettes[im_show_idx, ..., 0]))\n",
" ax[3].imshow(clamp_and_detach(target_silhouettes[batch_idx[im_show_idx]]))\n",
" for ax_, title_ in zip(\n",
" ax, \n",
" (\"rendered image\", \"target image\", \"rendered silhouette\", \"target silhouette\")\n",
" ):\n",
" ax_.grid(\"off\")\n",
" ax_.axis(\"off\")\n",
" ax_.set_title(title_)\n",
" fig.canvas.draw(); fig.show()\n",
" display.clear_output(wait=True)\n",
" display.display(fig)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 5. Visualizing the optimized volume\n",
"\n",
"Finally, we visualize the optimized volume by rendering from multiple viewpoints that rotate around the volume's y-axis."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def generate_rotating_volume(volume_model, n_frames = 50):\n",
" logRs = torch.zeros(n_frames, 3, device=device)\n",
" logRs[:, 1] = torch.linspace(0.0, 2.0 * 3.14, n_frames, device=device)\n",
" Rs = so3_exp_map(logRs)\n",
" Ts = torch.zeros(n_frames, 3, device=device)\n",
" Ts[:, 2] = 2.7\n",
" frames = []\n",
" print('Generating rotating volume ...')\n",
" for R, T in zip(tqdm(Rs), Ts):\n",
" camera = FoVPerspectiveCameras(\n",
" R=R[None], \n",
" T=T[None], \n",
" znear = target_cameras.znear[0],\n",
" zfar = target_cameras.zfar[0],\n",
" aspect_ratio = target_cameras.aspect_ratio[0],\n",
" fov = target_cameras.fov[0],\n",
" device=device,\n",
" )\n",
" frames.append(volume_model(camera)[..., :3].clamp(0.0, 1.0))\n",
" return torch.cat(frames)\n",
" \n",
"with torch.no_grad():\n",
" rotating_volume_frames = generate_rotating_volume(volume_model, n_frames=7*4)\n",
"\n",
"image_grid(rotating_volume_frames.clamp(0., 1.).cpu().numpy(), rows=4, cols=7, rgb=True, fill=True)\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 6. Conclusion\n",
"\n",
"In this tutorial, we have shown how to optimize a 3D volumetric representation of a scene such that the renders of the volume from known viewpoints match the observed images for each viewpoint. The rendering was carried out using the PyTorch3D's volumetric renderer composed of an `NDCGridRaysampler` and an `EmissionAbsorptionRaymarcher`."
]
}
],
"metadata": {
"bento_stylesheets": {
"bento/extensions/flow/main.css": true,
"bento/extensions/kernel_selector/main.css": true,
"bento/extensions/kernel_ui/main.css": true,
"bento/extensions/new_kernel/main.css": true,
"bento/extensions/system_usage/main.css": true,
"bento/extensions/theme/main.css": true
},
"kernelspec": {
"display_name": "pytorch3d_etc (local)",
"language": "python",
"name": "pytorch3d_etc_local"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.5+"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -0,0 +1,390 @@
# coding: utf-8
# In[ ]:
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# # Fit a volume via raymarching
#
# This tutorial shows how to fit a volume given a set of views of a scene using differentiable volumetric rendering.
#
# More specifically, this tutorial will explain how to:
# 1. Create a differentiable volumetric renderer.
# 2. Create a Volumetric model (including how to use the `Volumes` class).
# 3. Fit the volume based on the images using the differentiable volumetric renderer.
# 4. Visualize the predicted volume.
# ## 0. Install and Import modules
# Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell:
# In[ ]:
import os
import sys
import torch
need_pytorch3d=False
try:
import pytorch3d
except ModuleNotFoundError:
need_pytorch3d=True
if need_pytorch3d:
if torch.__version__.startswith("1.9") and sys.platform.startswith("linux"):
# We try to install PyTorch3D via a released wheel.
version_str="".join([
f"py3{sys.version_info.minor}_cu",
torch.version.cuda.replace(".",""),
f"_pyt{torch.__version__[0:5:2]}"
])
get_ipython().system('pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html')
else:
# We try to install PyTorch3D from source.
get_ipython().system('curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz')
get_ipython().system('tar xzf 1.10.0.tar.gz')
os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0"
get_ipython().system("pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'")
# In[ ]:
import os
import sys
import time
import json
import glob
import torch
import math
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from IPython import display
# Data structures and functions for rendering
from pytorch3d.structures import Volumes
from pytorch3d.renderer import (
FoVPerspectiveCameras,
VolumeRenderer,
NDCGridRaysampler,
EmissionAbsorptionRaymarcher
)
from pytorch3d.transforms import so3_exp_map
# obtain the utilized device
if torch.cuda.is_available():
device = torch.device("cuda:0")
torch.cuda.set_device(device)
else:
device = torch.device("cpu")
# In[ ]:
get_ipython().system('wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/plot_image_grid.py')
get_ipython().system('wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/generate_cow_renders.py')
from plot_image_grid import image_grid
from generate_cow_renders import generate_cow_renders
# OR if running locally uncomment and run the following cell:
# In[ ]:
# from utils.generate_cow_renders import generate_cow_renders
# from utils import image_grid
# ## 1. Generate images of the scene and masks
#
# The following cell generates our training data.
# It renders the cow mesh from the `fit_textured_mesh.ipynb` tutorial from several viewpoints and returns:
# 1. A batch of image and silhouette tensors that are produced by the cow mesh renderer.
# 2. A set of cameras corresponding to each render.
#
# Note: For the purpose of this tutorial, which aims at explaining the details of volumetric rendering, we do not explain how the mesh rendering, implemented in the `generate_cow_renders` function, works. Please refer to `fit_textured_mesh.ipynb` for a detailed explanation of mesh rendering.
# In[ ]:
target_cameras, target_images, target_silhouettes = generate_cow_renders(num_views=40)
print(f'Generated {len(target_images)} images/silhouettes/cameras.')
# ## 2. Initialize the volumetric renderer
#
# The following initializes a volumetric renderer that emits a ray from each pixel of a target image and samples a set of uniformly-spaced points along the ray. At each ray-point, the corresponding density and color value is obtained by querying the corresponding location in the volumetric model of the scene (the model is described & instantiated in a later cell).
#
# The renderer is composed of a *raymarcher* and a *raysampler*.
# - The *raysampler* is responsible for emitting rays from image pixels and sampling the points along them. Here, we use the `NDCGridRaysampler` which follows the standard PyTorch3D coordinate grid convention (+X from right to left; +Y from bottom to top; +Z away from the user).
# - The *raymarcher* takes the densities and colors sampled along each ray and renders each ray into a color and an opacity value of the ray's source pixel. Here we use the `EmissionAbsorptionRaymarcher` which implements the standard Emission-Absorption raymarching algorithm.
# In[ ]:
# render_size describes the size of both sides of the
# rendered images in pixels. We set this to the same size
# as the target images. I.e. we render at the same
# size as the ground truth images.
render_size = target_images.shape[1]
# Our rendered scene is centered around (0,0,0)
# and is enclosed inside a bounding box
# whose side is roughly equal to 3.0 (world units).
volume_extent_world = 3.0
# 1) Instantiate the raysampler.
# Here, NDCGridRaysampler generates a rectangular image
# grid of rays whose coordinates follow the PyTorch3D
# coordinate conventions.
# Since we use a volume of size 128^3, we sample n_pts_per_ray=150,
# which roughly corresponds to a one ray-point per voxel.
# We further set the min_depth=0.1 since there is no surface within
# 0.1 units of any camera plane.
raysampler = NDCGridRaysampler(
image_width=render_size,
image_height=render_size,
n_pts_per_ray=150,
min_depth=0.1,
max_depth=volume_extent_world,
)
# 2) Instantiate the raymarcher.
# Here, we use the standard EmissionAbsorptionRaymarcher
# which marches along each ray in order to render
# each ray into a single 3D color vector
# and an opacity scalar.
raymarcher = EmissionAbsorptionRaymarcher()
# Finally, instantiate the volumetric render
# with the raysampler and raymarcher objects.
renderer = VolumeRenderer(
raysampler=raysampler, raymarcher=raymarcher,
)
# ## 3. Initialize the volumetric model
#
# Next we instantiate a volumetric model of the scene. This quantizes the 3D space to cubical voxels, where each voxel is described with a 3D vector representing the voxel's RGB color and a density scalar which describes the opacity of the voxel (ranging between [0-1], the higher the more opaque).
#
# In order to ensure the range of densities and colors is between [0-1], we represent both volume colors and densities in the logarithmic space. During the forward function of the model, the log-space values are passed through the sigmoid function to bring the log-space values to the correct range.
#
# Additionally, `VolumeModel` contains the renderer object. This object stays unaltered throughout the optimization.
#
# In this cell we also define the `huber` loss function which computes the discrepancy between the rendered colors and masks.
# In[ ]:
class VolumeModel(torch.nn.Module):
def __init__(self, renderer, volume_size=[64] * 3, voxel_size=0.1):
super().__init__()
# After evaluating torch.sigmoid(self.log_colors), we get
# densities close to zero.
self.log_densities = torch.nn.Parameter(-4.0 * torch.ones(1, *volume_size))
# After evaluating torch.sigmoid(self.log_colors), we get
# a neutral gray color everywhere.
self.log_colors = torch.nn.Parameter(torch.zeros(3, *volume_size))
self._voxel_size = voxel_size
# Store the renderer module as well.
self._renderer = renderer
def forward(self, cameras):
batch_size = cameras.R.shape[0]
# Convert the log-space values to the densities/colors
densities = torch.sigmoid(self.log_densities)
colors = torch.sigmoid(self.log_colors)
# Instantiate the Volumes object, making sure
# the densities and colors are correctly
# expanded batch_size-times.
volumes = Volumes(
densities = densities[None].expand(
batch_size, *self.log_densities.shape),
features = colors[None].expand(
batch_size, *self.log_colors.shape),
voxel_size=self._voxel_size,
)
# Given cameras and volumes, run the renderer
# and return only the first output value
# (the 2nd output is a representation of the sampled
# rays which can be omitted for our purpose).
return self._renderer(cameras=cameras, volumes=volumes)[0]
# A helper function for evaluating the smooth L1 (huber) loss
# between the rendered silhouettes and colors.
def huber(x, y, scaling=0.1):
diff_sq = (x - y) ** 2
loss = ((1 + diff_sq / (scaling**2)).clamp(1e-4).sqrt() - 1) * float(scaling)
return loss
# ## 4. Fit the volume
#
# Here we carry out the volume fitting with differentiable rendering.
#
# In order to fit the volume, we render it from the viewpoints of the `target_cameras`
# and compare the resulting renders with the observed `target_images` and `target_silhouettes`.
#
# The comparison is done by evaluating the mean huber (smooth-l1) error between corresponding
# pairs of `target_images`/`rendered_images` and `target_silhouettes`/`rendered_silhouettes`.
# In[ ]:
# First move all relevant variables to the correct device.
target_cameras = target_cameras.to(device)
target_images = target_images.to(device)
target_silhouettes = target_silhouettes.to(device)
# Instantiate the volumetric model.
# We use a cubical volume with the size of
# one side = 128. The size of each voxel of the volume
# is set to volume_extent_world / volume_size s.t. the
# volume represents the space enclosed in a 3D bounding box
# centered at (0, 0, 0) with the size of each side equal to 3.
volume_size = 128
volume_model = VolumeModel(
renderer,
volume_size=[volume_size] * 3,
voxel_size = volume_extent_world / volume_size,
).to(device)
# Instantiate the Adam optimizer. We set its master learning rate to 0.1.
lr = 0.1
optimizer = torch.optim.Adam(volume_model.parameters(), lr=lr)
# We do 300 Adam iterations and sample 10 random images in each minibatch.
batch_size = 10
n_iter = 300
for iteration in range(n_iter):
# In case we reached the last 75% of iterations,
# decrease the learning rate of the optimizer 10-fold.
if iteration == round(n_iter * 0.75):
print('Decreasing LR 10-fold ...')
optimizer = torch.optim.Adam(
volume_model.parameters(), lr=lr * 0.1
)
# Zero the optimizer gradient.
optimizer.zero_grad()
# Sample random batch indices.
batch_idx = torch.randperm(len(target_cameras))[:batch_size]
# Sample the minibatch of cameras.
batch_cameras = FoVPerspectiveCameras(
R = target_cameras.R[batch_idx],
T = target_cameras.T[batch_idx],
znear = target_cameras.znear[batch_idx],
zfar = target_cameras.zfar[batch_idx],
aspect_ratio = target_cameras.aspect_ratio[batch_idx],
fov = target_cameras.fov[batch_idx],
device = device,
)
# Evaluate the volumetric model.
rendered_images, rendered_silhouettes = volume_model(
batch_cameras
).split([3, 1], dim=-1)
# Compute the silhouette error as the mean huber
# loss between the predicted masks and the
# target silhouettes.
sil_err = huber(
rendered_silhouettes[..., 0], target_silhouettes[batch_idx],
).abs().mean()
# Compute the color error as the mean huber
# loss between the rendered colors and the
# target ground truth images.
color_err = huber(
rendered_images, target_images[batch_idx],
).abs().mean()
# The optimization loss is a simple
# sum of the color and silhouette errors.
loss = color_err + sil_err
# Print the current values of the losses.
if iteration % 10 == 0:
print(
f'Iteration {iteration:05d}:'
+ f' color_err = {float(color_err):1.2e}'
+ f' mask_err = {float(sil_err):1.2e}'
)
# Take the optimization step.
loss.backward()
optimizer.step()
# Visualize the renders every 40 iterations.
if iteration % 40 == 0:
# Visualize only a single randomly selected element of the batch.
im_show_idx = int(torch.randint(low=0, high=batch_size, size=(1,)))
fig, ax = plt.subplots(2, 2, figsize=(10, 10))
ax = ax.ravel()
clamp_and_detach = lambda x: x.clamp(0.0, 1.0).cpu().detach().numpy()
ax[0].imshow(clamp_and_detach(rendered_images[im_show_idx]))
ax[1].imshow(clamp_and_detach(target_images[batch_idx[im_show_idx], ..., :3]))
ax[2].imshow(clamp_and_detach(rendered_silhouettes[im_show_idx, ..., 0]))
ax[3].imshow(clamp_and_detach(target_silhouettes[batch_idx[im_show_idx]]))
for ax_, title_ in zip(
ax,
("rendered image", "target image", "rendered silhouette", "target silhouette")
):
ax_.grid("off")
ax_.axis("off")
ax_.set_title(title_)
fig.canvas.draw(); fig.show()
display.clear_output(wait=True)
display.display(fig)
# ## 5. Visualizing the optimized volume
#
# Finally, we visualize the optimized volume by rendering from multiple viewpoints that rotate around the volume's y-axis.
# In[ ]:
def generate_rotating_volume(volume_model, n_frames = 50):
logRs = torch.zeros(n_frames, 3, device=device)
logRs[:, 1] = torch.linspace(0.0, 2.0 * 3.14, n_frames, device=device)
Rs = so3_exp_map(logRs)
Ts = torch.zeros(n_frames, 3, device=device)
Ts[:, 2] = 2.7
frames = []
print('Generating rotating volume ...')
for R, T in zip(tqdm(Rs), Ts):
camera = FoVPerspectiveCameras(
R=R[None],
T=T[None],
znear = target_cameras.znear[0],
zfar = target_cameras.zfar[0],
aspect_ratio = target_cameras.aspect_ratio[0],
fov = target_cameras.fov[0],
device=device,
)
frames.append(volume_model(camera)[..., :3].clamp(0.0, 1.0))
return torch.cat(frames)
with torch.no_grad():
rotating_volume_frames = generate_rotating_volume(volume_model, n_frames=7*4)
image_grid(rotating_volume_frames.clamp(0., 1.).cpu().numpy(), rows=4, cols=7, rgb=True, fill=True)
plt.show()
# ## 6. Conclusion
#
# In this tutorial, we have shown how to optimize a 3D volumetric representation of a scene such that the renders of the volume from known viewpoints match the observed images for each viewpoint. The rendering was carried out using the PyTorch3D's volumetric renderer composed of an `NDCGridRaysampler` and an `EmissionAbsorptionRaymarcher`.

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_volume">Fit a volume via raymarching</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_simple_neural_radiance_field">Fit a simplified NeRF via raymarching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_volume">Fit a volume via raymarching</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_simple_neural_radiance_field">Fit a simplified NeRF via raymarching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_volume">Fit a volume via raymarching</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_simple_neural_radiance_field">Fit a simplified NeRF via raymarching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_volume">Fit a volume via raymarching</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_simple_neural_radiance_field">Fit a simplified NeRF via raymarching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_volume">Fit a volume via raymarching</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_simple_neural_radiance_field">Fit a simplified NeRF via raymarching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_volume">Fit a volume via raymarching</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_simple_neural_radiance_field">Fit a simplified NeRF via raymarching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_volume">Fit a volume via raymarching</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_simple_neural_radiance_field">Fit a simplified NeRF via raymarching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_volume">Fit a volume via raymarching</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_simple_neural_radiance_field">Fit a simplified NeRF via raymarching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -0,0 +1,956 @@
<!DOCTYPE html><html lang=""><head><meta charSet="utf-8"/><meta http-equiv="X-UA-Compatible" content="IE=edge"/><title>PyTorch3D · A library for deep learning with 3D data</title><meta name="viewport" content="width=device-width, initial-scale=1.0"/><meta name="generator" content="Docusaurus"/><meta name="description" content="A library for deep learning with 3D data"/><meta property="og:title" content="PyTorch3D · A library for deep learning with 3D data"/><meta property="og:type" content="website"/><meta property="og:url" content="https://pytorch3d.org/"/><meta property="og:description" content="A library for deep learning with 3D data"/><meta property="og:image" content="https://pytorch3d.org/img/pytorch3dlogoicon.svg"/><meta name="twitter:card" content="summary"/><meta name="twitter:image" content="https://pytorch3d.org/img/pytorch3dlogoicon.svg"/><link rel="shortcut icon" href="/img/pytorch3dfavicon.png"/><link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/default.min.css"/><script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_volume">Fit a volume via raymarching</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/fit_simple_neural_radiance_field">Fit a simplified NeRF via raymarching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {
var links = coll[i].nextElementSibling.getElementsByTagName('*');
if (checkActiveCategory){
for (var j = 0; j < links.length; j++) {
if (links[j].classList.contains('navListItemActive')){
coll[i].nextElementSibling.classList.toggle('hide');
coll[i].childNodes[1].classList.toggle('rotate');
checkActiveCategory = false;
break;
}
}
}
coll[i].addEventListener('click', function() {
var arrow = this.childNodes[1];
arrow.classList.toggle('rotate');
var content = this.nextElementSibling;
content.classList.toggle('hide');
});
}
document.addEventListener('DOMContentLoaded', function() {
createToggler('#navToggler', '#docsNav', 'docsSliderActive');
createToggler('#tocToggler', 'body', 'tocActive');
var headings = document.querySelector('.toc-headings');
headings && headings.addEventListener('click', function(event) {
var el = event.target;
while(el !== headings){
if (el.tagName === 'A') {
document.body.classList.remove('tocActive');
break;
} else{
el = el.parentNode;
}
}
}, false);
function createToggler(togglerSelector, targetSelector, className) {
var toggler = document.querySelector(togglerSelector);
var target = document.querySelector(targetSelector);
if (!toggler) {
return;
}
toggler.onclick = function(event) {
event.preventDefault();
target.classList.toggle(className);
};
}
});
</script></nav></div><div class="container mainContainer"><div class="wrapper"><div class="tutorialButtonsWrapper"><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="https://colab.research.google.com/github/facebookresearch/pytorch3d/blob/stable/docs/tutorials/fit_simple_neural_radiance_field.ipynb" target="_blank"><img class="colabButton" align="left" src="/img/colab_icon.png"/>Run in Google Colab</a></div><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="/files/fit_simple_neural_radiance_field.ipynb" target="_blank"><svg aria-hidden="true" focusable="false" data-prefix="fas" data-icon="file-download" class="svg-inline--fa fa-file-download fa-w-12" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><path fill="currentColor" d="M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm76.45 211.36l-96.42 95.7c-6.65 6.61-17.39 6.61-24.04 0l-96.42-95.7C73.42 337.29 80.54 320 94.82 320H160v-80c0-8.84 7.16-16 16-16h32c8.84 0 16 7.16 16 16v80h65.18c14.28 0 21.4 17.29 11.27 27.36zM377 105L279.1 7c-4.5-4.5-10.6-7-17-7H256v128h128v-6.1c0-6.3-2.5-12.4-7-16.9z"></path></svg>Download Tutorial Jupyter Notebook</a></div><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="/files/fit_simple_neural_radiance_field.py" target="_blank"><svg aria-hidden="true" focusable="false" data-prefix="fas" data-icon="file-download" class="svg-inline--fa fa-file-download fa-w-12" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><path fill="currentColor" d="M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm76.45 211.36l-96.42 95.7c-6.65 6.61-17.39 6.61-24.04 0l-96.42-95.7C73.42 337.29 80.54 320 94.82 320H160v-80c0-8.84 7.16-16 16-16h32c8.84 0 16 7.16 16 16v80h65.18c14.28 0 21.4 17.29 11.27 27.36zM377 105L279.1 7c-4.5-4.5-10.6-7-17-7H256v128h128v-6.1c0-6.3-2.5-12.4-7-16.9z"></path></svg>Download Tutorial Source Code</a></div></div><div class="tutorialBody">
<script
src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js">
</script>
<script
src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js">
</script>
<div class="notebook">
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h1 id="Fit-a-simple-Neural-Radiance-Field-via-raymarching">Fit a simple Neural Radiance Field via raymarching<a class="anchor-link" href="#Fit-a-simple-Neural-Radiance-Field-via-raymarching"></a></h1><p>This tutorial shows how to fit Neural Radiance Field given a set of views of a scene using differentiable implicit function rendering.</p>
<p>More specifically, this tutorial will explain how to:</p>
<ol>
<li>Create a differentiable implicit function renderer with either image-grid or Monte Carlo ray sampling.</li>
<li>Create an Implicit model of a scene.</li>
<li>Fit the implicit function (Neural Radiance Field) based on input images using the differentiable implicit renderer. </li>
<li>Visualize the learnt implicit function.</li>
</ol>
<p>Note that the presented implicit model is a simplified version of NeRF:<br/>
<em>Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, Ren Ng: NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis, ECCV 2020.</em></p>
<p>The simplifications include:</p>
<ul>
<li><em>Ray sampling</em>: This notebook does not perform stratified ray sampling but rather ray sampling at equidistant depths.</li>
<li><em>Rendering</em>: We do a single rendering pass, as opposed to the original implementation that does a coarse and fine rendering pass.</li>
<li><em>Architecture</em>: Our network is shallower which allows for faster optimization possibly at the cost of surface details.</li>
<li><em>Mask loss</em>: Since our observations include segmentation masks, we also optimize a silhouette loss that forces rays to either get fully absorbed inside the volume, or to completely pass through it.</li>
</ul>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="0.-Install-and-Import-modules">0. Install and Import modules<a class="anchor-link" href="#0.-Install-and-Import-modules"></a></h2><p>Ensure <code>torch</code> and <code>torchvision</code> are installed. If <code>pytorch3d</code> is not installed, install it using the following cell:</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="kn">import</span> <span class="nn">os</span>
<span class="kn">import</span> <span class="nn">sys</span>
<span class="kn">import</span> <span class="nn">torch</span>
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
<span class="k">try</span><span class="p">:</span>
<span class="kn">import</span> <span class="nn">pytorch3d</span>
<span class="k">except</span> <span class="ne">ModuleNotFoundError</span><span class="p">:</span>
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s2">"1.9"</span><span class="p">)</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s2">"linux"</span><span class="p">):</span>
<span class="c1"># We try to install PyTorch3D via a released wheel.</span>
<span class="n">version_str</span><span class="o">=</span><span class="s2">""</span><span class="o">.</span><span class="n">join</span><span class="p">([</span>
<span class="sa">f</span><span class="s2">"py3</span><span class="si">{</span><span class="n">sys</span><span class="o">.</span><span class="n">version_info</span><span class="o">.</span><span class="n">minor</span><span class="si">}</span><span class="s2">_cu"</span><span class="p">,</span>
<span class="n">torch</span><span class="o">.</span><span class="n">version</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">replace</span><span class="p">(</span><span class="s2">"."</span><span class="p">,</span><span class="s2">""</span><span class="p">),</span>
<span class="sa">f</span><span class="s2">"_pyt</span><span class="si">{</span><span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="p">[</span><span class="mi">0</span><span class="p">:</span><span class="mi">5</span><span class="p">:</span><span class="mi">2</span><span class="p">]</span><span class="si">}</span><span class="s2">"</span>
<span class="p">])</span>
<span class="o">!</span>pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/<span class="o">{</span>version_str<span class="o">}</span>/download.html
<span class="k">else</span><span class="p">:</span>
<span class="c1"># We try to install PyTorch3D from source.</span>
<span class="o">!</span>curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
<span class="o">!</span>tar xzf <span class="m">1</span>.10.0.tar.gz
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">"CUB_HOME"</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">getcwd</span><span class="p">()</span> <span class="o">+</span> <span class="s2">"/cub-1.10.0"</span>
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># %matplotlib inline</span>
<span class="c1"># %matplotlib notebook</span>
<span class="kn">import</span> <span class="nn">os</span>
<span class="kn">import</span> <span class="nn">sys</span>
<span class="kn">import</span> <span class="nn">time</span>
<span class="kn">import</span> <span class="nn">json</span>
<span class="kn">import</span> <span class="nn">glob</span>
<span class="kn">import</span> <span class="nn">torch</span>
<span class="kn">import</span> <span class="nn">math</span>
<span class="kn">import</span> <span class="nn">matplotlib.pyplot</span> <span class="k">as</span> <span class="nn">plt</span>
<span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="kn">from</span> <span class="nn">PIL</span> <span class="kn">import</span> <span class="n">Image</span>
<span class="kn">from</span> <span class="nn">IPython</span> <span class="kn">import</span> <span class="n">display</span>
<span class="kn">from</span> <span class="nn">tqdm.notebook</span> <span class="kn">import</span> <span class="n">tqdm</span>
<span class="c1"># Data structures and functions for rendering</span>
<span class="kn">from</span> <span class="nn">pytorch3d.structures</span> <span class="kn">import</span> <span class="n">Volumes</span>
<span class="kn">from</span> <span class="nn">pytorch3d.transforms</span> <span class="kn">import</span> <span class="n">so3_exp_map</span>
<span class="kn">from</span> <span class="nn">pytorch3d.renderer</span> <span class="kn">import</span> <span class="p">(</span>
<span class="n">FoVPerspectiveCameras</span><span class="p">,</span>
<span class="n">NDCGridRaysampler</span><span class="p">,</span>
<span class="n">MonteCarloRaysampler</span><span class="p">,</span>
<span class="n">EmissionAbsorptionRaymarcher</span><span class="p">,</span>
<span class="n">ImplicitRenderer</span><span class="p">,</span>
<span class="n">RayBundle</span><span class="p">,</span>
<span class="n">ray_bundle_to_ray_points</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># obtain the utilized device</span>
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">is_available</span><span class="p">():</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">"cuda:0"</span><span class="p">)</span>
<span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">set_device</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="nb">print</span><span class="p">(</span>
<span class="s1">'Please note that NeRF is a resource-demanding method.'</span>
<span class="o">+</span> <span class="s1">' Running this notebook on CPU will be extremely slow.'</span>
<span class="o">+</span> <span class="s1">' We recommend running the example on a GPU'</span>
<span class="o">+</span> <span class="s1">' with at least 10 GB of memory.'</span>
<span class="p">)</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">"cpu"</span><span class="p">)</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/plot_image_grid.py
<span class="o">!</span>wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/generate_cow_renders.py
<span class="kn">from</span> <span class="nn">plot_image_grid</span> <span class="kn">import</span> <span class="n">image_grid</span>
<span class="kn">from</span> <span class="nn">generate_cow_renders</span> <span class="kn">import</span> <span class="n">generate_cow_renders</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<p>OR if running locally uncomment and run the following cell:</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># from utils.generate_cow_renders import generate_cow_renders</span>
<span class="c1"># from utils import image_grid</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="1.-Generate-images-of-the-scene-and-masks">1. Generate images of the scene and masks<a class="anchor-link" href="#1.-Generate-images-of-the-scene-and-masks"></a></h2><p>The following cell generates our training data.
It renders the cow mesh from the <code>fit_textured_mesh.ipynb</code> tutorial from several viewpoints and returns:</p>
<ol>
<li>A batch of image and silhouette tensors that are produced by the cow mesh renderer.</li>
<li>A set of cameras corresponding to each render.</li>
</ol>
<p>Note: For the purpose of this tutorial, which aims at explaining the details of implicit rendering, we do not explain how the mesh rendering, implemented in the <code>generate_cow_renders</code> function, works. Please refer to <code>fit_textured_mesh.ipynb</code> for a detailed explanation of mesh rendering.</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="n">target_cameras</span><span class="p">,</span> <span class="n">target_images</span><span class="p">,</span> <span class="n">target_silhouettes</span> <span class="o">=</span> <span class="n">generate_cow_renders</span><span class="p">(</span><span class="n">num_views</span><span class="o">=</span><span class="mi">40</span><span class="p">,</span> <span class="n">azimuth_range</span><span class="o">=</span><span class="mi">180</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="sa">f</span><span class="s1">'Generated </span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">target_images</span><span class="p">)</span><span class="si">}</span><span class="s1"> images/silhouettes/cameras.'</span><span class="p">)</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="2.-Initialize-the-implicit-renderer">2. Initialize the implicit renderer<a class="anchor-link" href="#2.-Initialize-the-implicit-renderer"></a></h2><p>The following initializes an implicit renderer that emits a ray from each pixel of a target image and samples a set of uniformly-spaced points along the ray. At each ray-point, the corresponding density and color value is obtained by querying the corresponding location in the neural model of the scene (the model is described &amp; instantiated in a later cell).</p>
<p>The renderer is composed of a <em>raymarcher</em> and a <em>raysampler</em>.</p>
<ul>
<li>The <em>raysampler</em> is responsible for emitting rays from image pixels and sampling the points along them. Here, we use two different raysamplers:<ul>
<li><code>MonteCarloRaysampler</code> is used to generate rays from a random subset of pixels of the image plane. The random subsampling of pixels is carried out during <strong>training</strong> to decrease the memory consumption of the implicit model.</li>
<li><code>NDCGridRaysampler</code> which follows the standard PyTorch3D coordinate grid convention (+X from right to left; +Y from bottom to top; +Z away from the user). In combination with the implicit model of the scene, <code>NDCGridRaysampler</code> consumes a large amount of memory and, hence, is only used for visualizing the results of the training at <strong>test</strong> time.</li>
</ul>
</li>
<li>The <em>raymarcher</em> takes the densities and colors sampled along each ray and renders each ray into a color and an opacity value of the ray's source pixel. Here we use the <code>EmissionAbsorptionRaymarcher</code> which implements the standard Emission-Absorption raymarching algorithm.</li>
</ul>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># render_size describes the size of both sides of the </span>
<span class="c1"># rendered images in pixels. Since an advantage of </span>
<span class="c1"># Neural Radiance Fields are high quality renders</span>
<span class="c1"># with a significant amount of details, we render</span>
<span class="c1"># the implicit function at double the size of </span>
<span class="c1"># target images.</span>
<span class="n">render_size</span> <span class="o">=</span> <span class="n">target_images</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="o">*</span> <span class="mi">2</span>
<span class="c1"># Our rendered scene is centered around (0,0,0) </span>
<span class="c1"># and is enclosed inside a bounding box</span>
<span class="c1"># whose side is roughly equal to 3.0 (world units).</span>
<span class="n">volume_extent_world</span> <span class="o">=</span> <span class="mf">3.0</span>
<span class="c1"># 1) Instantiate the raysamplers.</span>
<span class="c1"># Here, NDCGridRaysampler generates a rectangular image</span>
<span class="c1"># grid of rays whose coordinates follow the PyTorch3D</span>
<span class="c1"># coordinate conventions.</span>
<span class="n">raysampler_grid</span> <span class="o">=</span> <span class="n">NDCGridRaysampler</span><span class="p">(</span>
<span class="n">image_height</span><span class="o">=</span><span class="n">render_size</span><span class="p">,</span>
<span class="n">image_width</span><span class="o">=</span><span class="n">render_size</span><span class="p">,</span>
<span class="n">n_pts_per_ray</span><span class="o">=</span><span class="mi">128</span><span class="p">,</span>
<span class="n">min_depth</span><span class="o">=</span><span class="mf">0.1</span><span class="p">,</span>
<span class="n">max_depth</span><span class="o">=</span><span class="n">volume_extent_world</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># MonteCarloRaysampler generates a random subset </span>
<span class="c1"># of `n_rays_per_image` rays emitted from the image plane.</span>
<span class="n">raysampler_mc</span> <span class="o">=</span> <span class="n">MonteCarloRaysampler</span><span class="p">(</span>
<span class="n">min_x</span> <span class="o">=</span> <span class="o">-</span><span class="mf">1.0</span><span class="p">,</span>
<span class="n">max_x</span> <span class="o">=</span> <span class="mf">1.0</span><span class="p">,</span>
<span class="n">min_y</span> <span class="o">=</span> <span class="o">-</span><span class="mf">1.0</span><span class="p">,</span>
<span class="n">max_y</span> <span class="o">=</span> <span class="mf">1.0</span><span class="p">,</span>
<span class="n">n_rays_per_image</span><span class="o">=</span><span class="mi">750</span><span class="p">,</span>
<span class="n">n_pts_per_ray</span><span class="o">=</span><span class="mi">128</span><span class="p">,</span>
<span class="n">min_depth</span><span class="o">=</span><span class="mf">0.1</span><span class="p">,</span>
<span class="n">max_depth</span><span class="o">=</span><span class="n">volume_extent_world</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># 2) Instantiate the raymarcher.</span>
<span class="c1"># Here, we use the standard EmissionAbsorptionRaymarcher </span>
<span class="c1"># which marches along each ray in order to render</span>
<span class="c1"># the ray into a single 3D color vector </span>
<span class="c1"># and an opacity scalar.</span>
<span class="n">raymarcher</span> <span class="o">=</span> <span class="n">EmissionAbsorptionRaymarcher</span><span class="p">()</span>
<span class="c1"># Finally, instantiate the implicit renders</span>
<span class="c1"># for both raysamplers.</span>
<span class="n">renderer_grid</span> <span class="o">=</span> <span class="n">ImplicitRenderer</span><span class="p">(</span>
<span class="n">raysampler</span><span class="o">=</span><span class="n">raysampler_grid</span><span class="p">,</span> <span class="n">raymarcher</span><span class="o">=</span><span class="n">raymarcher</span><span class="p">,</span>
<span class="p">)</span>
<span class="n">renderer_mc</span> <span class="o">=</span> <span class="n">ImplicitRenderer</span><span class="p">(</span>
<span class="n">raysampler</span><span class="o">=</span><span class="n">raysampler_mc</span><span class="p">,</span> <span class="n">raymarcher</span><span class="o">=</span><span class="n">raymarcher</span><span class="p">,</span>
<span class="p">)</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="3.-Define-the-neural-radiance-field-model">3. Define the neural radiance field model<a class="anchor-link" href="#3.-Define-the-neural-radiance-field-model"></a></h2><p>In this cell we define the <code>NeuralRadianceField</code> module, which specifies a continuous field of colors and opacities over the 3D domain of the scene.</p>
<p>The <code>forward</code> function of <code>NeuralRadianceField</code> (NeRF) receives as input a set of tensors that parametrize a bundle of rendering rays. The ray bundle is later converted to 3D ray points in the world coordinates of the scene. Each 3D point is then mapped to a harmonic representation using the <code>HarmonicEmbedding</code> layer (defined in the next cell). The harmonic embeddings then enter the <em>color</em> and <em>opacity</em> branches of the NeRF model in order to label each ray point with a 3D vector and a 1D scalar ranging in [0-1] which define the point's RGB color and opacity respectively.</p>
<p>Since NeRF has a large memory footprint, we also implement the <code>NeuralRadianceField.forward_batched</code> method. The method splits the input rays into batches and executes the <code>forward</code> function for each batch separately in a for loop. This lets us render a large set of rays without running out of GPU memory. Standardly, <code>forward_batched</code> would be used to render rays emitted from all pixels of an image in order to produce a full-sized render of a scene.</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="k">class</span> <span class="nc">HarmonicEmbedding</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">n_harmonic_functions</span><span class="o">=</span><span class="mi">60</span><span class="p">,</span> <span class="n">omega0</span><span class="o">=</span><span class="mf">0.1</span><span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> Given an input tensor `x` of shape [minibatch, ... , dim],</span>
<span class="sd"> the harmonic embedding layer converts each feature</span>
<span class="sd"> in `x` into a series of harmonic features `embedding`</span>
<span class="sd"> as follows:</span>
<span class="sd"> embedding[..., i*dim:(i+1)*dim] = [</span>
<span class="sd"> sin(x[..., i]),</span>
<span class="sd"> sin(2*x[..., i]),</span>
<span class="sd"> sin(4*x[..., i]),</span>
<span class="sd"> ...</span>
<span class="sd"> sin(2**(self.n_harmonic_functions-1) * x[..., i]),</span>
<span class="sd"> cos(x[..., i]),</span>
<span class="sd"> cos(2*x[..., i]),</span>
<span class="sd"> cos(4*x[..., i]),</span>
<span class="sd"> ...</span>
<span class="sd"> cos(2**(self.n_harmonic_functions-1) * x[..., i])</span>
<span class="sd"> ]</span>
<span class="sd"> </span>
<span class="sd"> Note that `x` is also premultiplied by `omega0` before</span>
<span class="sd"> evaluating the harmonic functions.</span>
<span class="sd"> """</span>
<span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
<span class="bp">self</span><span class="o">.</span><span class="n">register_buffer</span><span class="p">(</span>
<span class="s1">'frequencies'</span><span class="p">,</span>
<span class="n">omega0</span> <span class="o">*</span> <span class="p">(</span><span class="mf">2.0</span> <span class="o">**</span> <span class="n">torch</span><span class="o">.</span><span class="n">arange</span><span class="p">(</span><span class="n">n_harmonic_functions</span><span class="p">)),</span>
<span class="p">)</span>
<span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> Args:</span>
<span class="sd"> x: tensor of shape [..., dim]</span>
<span class="sd"> Returns:</span>
<span class="sd"> embedding: a harmonic embedding of `x`</span>
<span class="sd"> of shape [..., n_harmonic_functions * dim * 2]</span>
<span class="sd"> """</span>
<span class="n">embed</span> <span class="o">=</span> <span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="o">...</span><span class="p">,</span> <span class="kc">None</span><span class="p">]</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">frequencies</span><span class="p">)</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">*</span><span class="n">x</span><span class="o">.</span><span class="n">shape</span><span class="p">[:</span><span class="o">-</span><span class="mi">1</span><span class="p">],</span> <span class="o">-</span><span class="mi">1</span><span class="p">)</span>
<span class="k">return</span> <span class="n">torch</span><span class="o">.</span><span class="n">cat</span><span class="p">((</span><span class="n">embed</span><span class="o">.</span><span class="n">sin</span><span class="p">(),</span> <span class="n">embed</span><span class="o">.</span><span class="n">cos</span><span class="p">()),</span> <span class="n">dim</span><span class="o">=-</span><span class="mi">1</span><span class="p">)</span>
<span class="k">class</span> <span class="nc">NeuralRadianceField</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">n_harmonic_functions</span><span class="o">=</span><span class="mi">60</span><span class="p">,</span> <span class="n">n_hidden_neurons</span><span class="o">=</span><span class="mi">256</span><span class="p">):</span>
<span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
<span class="sd">"""</span>
<span class="sd"> Args:</span>
<span class="sd"> n_harmonic_functions: The number of harmonic functions</span>
<span class="sd"> used to form the harmonic embedding of each point.</span>
<span class="sd"> n_hidden_neurons: The number of hidden units in the</span>
<span class="sd"> fully connected layers of the MLPs of the model.</span>
<span class="sd"> """</span>
<span class="c1"># The harmonic embedding layer converts input 3D coordinates</span>
<span class="c1"># to a representation that is more suitable for</span>
<span class="c1"># processing with a deep neural network.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">harmonic_embedding</span> <span class="o">=</span> <span class="n">HarmonicEmbedding</span><span class="p">(</span><span class="n">n_harmonic_functions</span><span class="p">)</span>
<span class="c1"># The dimension of the harmonic embedding.</span>
<span class="n">embedding_dim</span> <span class="o">=</span> <span class="n">n_harmonic_functions</span> <span class="o">*</span> <span class="mi">2</span> <span class="o">*</span> <span class="mi">3</span>
<span class="c1"># self.mlp is a simple 2-layer multi-layer perceptron</span>
<span class="c1"># which converts the input per-point harmonic embeddings</span>
<span class="c1"># to a latent representation.</span>
<span class="c1"># Not that we use Softplus activations instead of ReLU.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">mlp</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Sequential</span><span class="p">(</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="n">embedding_dim</span><span class="p">,</span> <span class="n">n_hidden_neurons</span><span class="p">),</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Softplus</span><span class="p">(</span><span class="n">beta</span><span class="o">=</span><span class="mf">10.0</span><span class="p">),</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="n">n_hidden_neurons</span><span class="p">,</span> <span class="n">n_hidden_neurons</span><span class="p">),</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Softplus</span><span class="p">(</span><span class="n">beta</span><span class="o">=</span><span class="mf">10.0</span><span class="p">),</span>
<span class="p">)</span>
<span class="c1"># Given features predicted by self.mlp, self.color_layer</span>
<span class="c1"># is responsible for predicting a 3-D per-point vector</span>
<span class="c1"># that represents the RGB color of the point.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">color_layer</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Sequential</span><span class="p">(</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="n">n_hidden_neurons</span> <span class="o">+</span> <span class="n">embedding_dim</span><span class="p">,</span> <span class="n">n_hidden_neurons</span><span class="p">),</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Softplus</span><span class="p">(</span><span class="n">beta</span><span class="o">=</span><span class="mf">10.0</span><span class="p">),</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="n">n_hidden_neurons</span><span class="p">,</span> <span class="mi">3</span><span class="p">),</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Sigmoid</span><span class="p">(),</span>
<span class="c1"># To ensure that the colors correctly range between [0-1],</span>
<span class="c1"># the layer is terminated with a sigmoid layer.</span>
<span class="p">)</span>
<span class="c1"># The density layer converts the features of self.mlp</span>
<span class="c1"># to a 1D density value representing the raw opacity</span>
<span class="c1"># of each point.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">density_layer</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Sequential</span><span class="p">(</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="n">n_hidden_neurons</span><span class="p">,</span> <span class="mi">1</span><span class="p">),</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Softplus</span><span class="p">(</span><span class="n">beta</span><span class="o">=</span><span class="mf">10.0</span><span class="p">),</span>
<span class="c1"># Sofplus activation ensures that the raw opacity</span>
<span class="c1"># is a non-negative number.</span>
<span class="p">)</span>
<span class="c1"># We set the bias of the density layer to -1.5</span>
<span class="c1"># in order to initialize the opacities of the</span>
<span class="c1"># ray points to values close to 0. </span>
<span class="c1"># This is a crucial detail for ensuring convergence</span>
<span class="c1"># of the model.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">density_layer</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">bias</span><span class="o">.</span><span class="n">data</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">=</span> <span class="o">-</span><span class="mf">1.5</span>
<span class="k">def</span> <span class="nf">_get_densities</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">features</span><span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> This function takes `features` predicted by `self.mlp`</span>
<span class="sd"> and converts them to `raw_densities` with `self.density_layer`.</span>
<span class="sd"> `raw_densities` are later mapped to [0-1] range with</span>
<span class="sd"> 1 - inverse exponential of `raw_densities`.</span>
<span class="sd"> """</span>
<span class="n">raw_densities</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">density_layer</span><span class="p">(</span><span class="n">features</span><span class="p">)</span>
<span class="k">return</span> <span class="mi">1</span> <span class="o">-</span> <span class="p">(</span><span class="o">-</span><span class="n">raw_densities</span><span class="p">)</span><span class="o">.</span><span class="n">exp</span><span class="p">()</span>
<span class="k">def</span> <span class="nf">_get_colors</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">features</span><span class="p">,</span> <span class="n">rays_directions</span><span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> This function takes per-point `features` predicted by `self.mlp`</span>
<span class="sd"> and evaluates the color model in order to attach to each</span>
<span class="sd"> point a 3D vector of its RGB color.</span>
<span class="sd"> </span>
<span class="sd"> In order to represent viewpoint dependent effects,</span>
<span class="sd"> before evaluating `self.color_layer`, `NeuralRadianceField`</span>
<span class="sd"> concatenates to the `features` a harmonic embedding</span>
<span class="sd"> of `ray_directions`, which are per-point directions </span>
<span class="sd"> of point rays expressed as 3D l2-normalized vectors</span>
<span class="sd"> in world coordinates.</span>
<span class="sd"> """</span>
<span class="n">spatial_size</span> <span class="o">=</span> <span class="n">features</span><span class="o">.</span><span class="n">shape</span><span class="p">[:</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>
<span class="c1"># Normalize the ray_directions to unit l2 norm.</span>
<span class="n">rays_directions_normed</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">functional</span><span class="o">.</span><span class="n">normalize</span><span class="p">(</span>
<span class="n">rays_directions</span><span class="p">,</span> <span class="n">dim</span><span class="o">=-</span><span class="mi">1</span>
<span class="p">)</span>
<span class="c1"># Obtain the harmonic embedding of the normalized ray directions.</span>
<span class="n">rays_embedding</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">harmonic_embedding</span><span class="p">(</span>
<span class="n">rays_directions_normed</span>
<span class="p">)</span>
<span class="c1"># Expand the ray directions tensor so that its spatial size</span>
<span class="c1"># is equal to the size of features.</span>
<span class="n">rays_embedding_expand</span> <span class="o">=</span> <span class="n">rays_embedding</span><span class="p">[</span><span class="o">...</span><span class="p">,</span> <span class="kc">None</span><span class="p">,</span> <span class="p">:]</span><span class="o">.</span><span class="n">expand</span><span class="p">(</span>
<span class="o">*</span><span class="n">spatial_size</span><span class="p">,</span> <span class="n">rays_embedding</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>
<span class="p">)</span>
<span class="c1"># Concatenate ray direction embeddings with </span>
<span class="c1"># features and evaluate the color model.</span>
<span class="n">color_layer_input</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">cat</span><span class="p">(</span>
<span class="p">(</span><span class="n">features</span><span class="p">,</span> <span class="n">rays_embedding_expand</span><span class="p">),</span>
<span class="n">dim</span><span class="o">=-</span><span class="mi">1</span>
<span class="p">)</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">color_layer</span><span class="p">(</span><span class="n">color_layer_input</span><span class="p">)</span>
<span class="k">def</span> <span class="nf">forward</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span>
<span class="n">ray_bundle</span><span class="p">:</span> <span class="n">RayBundle</span><span class="p">,</span>
<span class="o">**</span><span class="n">kwargs</span><span class="p">,</span>
<span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> The forward function accepts the parametrizations of</span>
<span class="sd"> 3D points sampled along projection rays. The forward</span>
<span class="sd"> pass is responsible for attaching a 3D vector</span>
<span class="sd"> and a 1D scalar representing the point's </span>
<span class="sd"> RGB color and opacity respectively.</span>
<span class="sd"> </span>
<span class="sd"> Args:</span>
<span class="sd"> ray_bundle: A RayBundle object containing the following variables:</span>
<span class="sd"> origins: A tensor of shape `(minibatch, ..., 3)` denoting the</span>
<span class="sd"> origins of the sampling rays in world coords.</span>
<span class="sd"> directions: A tensor of shape `(minibatch, ..., 3)`</span>
<span class="sd"> containing the direction vectors of sampling rays in world coords.</span>
<span class="sd"> lengths: A tensor of shape `(minibatch, ..., num_points_per_ray)`</span>
<span class="sd"> containing the lengths at which the rays are sampled.</span>
<span class="sd"> Returns:</span>
<span class="sd"> rays_densities: A tensor of shape `(minibatch, ..., num_points_per_ray, 1)`</span>
<span class="sd"> denoting the opacity of each ray point.</span>
<span class="sd"> rays_colors: A tensor of shape `(minibatch, ..., num_points_per_ray, 3)`</span>
<span class="sd"> denoting the color of each ray point.</span>
<span class="sd"> """</span>
<span class="c1"># We first convert the ray parametrizations to world</span>
<span class="c1"># coordinates with `ray_bundle_to_ray_points`.</span>
<span class="n">rays_points_world</span> <span class="o">=</span> <span class="n">ray_bundle_to_ray_points</span><span class="p">(</span><span class="n">ray_bundle</span><span class="p">)</span>
<span class="c1"># rays_points_world.shape = [minibatch x ... x 3]</span>
<span class="c1"># For each 3D world coordinate, we obtain its harmonic embedding.</span>
<span class="n">embeds</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">harmonic_embedding</span><span class="p">(</span>
<span class="n">rays_points_world</span>
<span class="p">)</span>
<span class="c1"># embeds.shape = [minibatch x ... x self.n_harmonic_functions*6]</span>
<span class="c1"># self.mlp maps each harmonic embedding to a latent feature space.</span>
<span class="n">features</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">mlp</span><span class="p">(</span><span class="n">embeds</span><span class="p">)</span>
<span class="c1"># features.shape = [minibatch x ... x n_hidden_neurons]</span>
<span class="c1"># Finally, given the per-point features, </span>
<span class="c1"># execute the density and color branches.</span>
<span class="n">rays_densities</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_densities</span><span class="p">(</span><span class="n">features</span><span class="p">)</span>
<span class="c1"># rays_densities.shape = [minibatch x ... x 1]</span>
<span class="n">rays_colors</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_colors</span><span class="p">(</span><span class="n">features</span><span class="p">,</span> <span class="n">ray_bundle</span><span class="o">.</span><span class="n">directions</span><span class="p">)</span>
<span class="c1"># rays_colors.shape = [minibatch x ... x 3]</span>
<span class="k">return</span> <span class="n">rays_densities</span><span class="p">,</span> <span class="n">rays_colors</span>
<span class="k">def</span> <span class="nf">batched_forward</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span>
<span class="n">ray_bundle</span><span class="p">:</span> <span class="n">RayBundle</span><span class="p">,</span>
<span class="n">n_batches</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="mi">16</span><span class="p">,</span>
<span class="o">**</span><span class="n">kwargs</span><span class="p">,</span>
<span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> This function is used to allow for memory efficient processing</span>
<span class="sd"> of input rays. The input rays are first split to `n_batches`</span>
<span class="sd"> chunks and passed through the `self.forward` function one at a time</span>
<span class="sd"> in a for loop. Combined with disabling PyTorch gradient caching</span>
<span class="sd"> (`torch.no_grad()`), this allows for rendering large batches</span>
<span class="sd"> of rays that do not all fit into GPU memory in a single forward pass.</span>
<span class="sd"> In our case, batched_forward is used to export a fully-sized render</span>
<span class="sd"> of the radiance field for visualization purposes.</span>
<span class="sd"> </span>
<span class="sd"> Args:</span>
<span class="sd"> ray_bundle: A RayBundle object containing the following variables:</span>
<span class="sd"> origins: A tensor of shape `(minibatch, ..., 3)` denoting the</span>
<span class="sd"> origins of the sampling rays in world coords.</span>
<span class="sd"> directions: A tensor of shape `(minibatch, ..., 3)`</span>
<span class="sd"> containing the direction vectors of sampling rays in world coords.</span>
<span class="sd"> lengths: A tensor of shape `(minibatch, ..., num_points_per_ray)`</span>
<span class="sd"> containing the lengths at which the rays are sampled.</span>
<span class="sd"> n_batches: Specifies the number of batches the input rays are split into.</span>
<span class="sd"> The larger the number of batches, the smaller the memory footprint</span>
<span class="sd"> and the lower the processing speed.</span>
<span class="sd"> Returns:</span>
<span class="sd"> rays_densities: A tensor of shape `(minibatch, ..., num_points_per_ray, 1)`</span>
<span class="sd"> denoting the opacity of each ray point.</span>
<span class="sd"> rays_colors: A tensor of shape `(minibatch, ..., num_points_per_ray, 3)`</span>
<span class="sd"> denoting the color of each ray point.</span>
<span class="sd"> """</span>
<span class="c1"># Parse out shapes needed for tensor reshaping in this function.</span>
<span class="n">n_pts_per_ray</span> <span class="o">=</span> <span class="n">ray_bundle</span><span class="o">.</span><span class="n">lengths</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>
<span class="n">spatial_size</span> <span class="o">=</span> <span class="p">[</span><span class="o">*</span><span class="n">ray_bundle</span><span class="o">.</span><span class="n">origins</span><span class="o">.</span><span class="n">shape</span><span class="p">[:</span><span class="o">-</span><span class="mi">1</span><span class="p">],</span> <span class="n">n_pts_per_ray</span><span class="p">]</span>
<span class="c1"># Split the rays to `n_batches` batches.</span>
<span class="n">tot_samples</span> <span class="o">=</span> <span class="n">ray_bundle</span><span class="o">.</span><span class="n">origins</span><span class="o">.</span><span class="n">shape</span><span class="p">[:</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span><span class="o">.</span><span class="n">numel</span><span class="p">()</span>
<span class="n">batches</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">chunk</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">arange</span><span class="p">(</span><span class="n">tot_samples</span><span class="p">),</span> <span class="n">n_batches</span><span class="p">)</span>
<span class="c1"># For each batch, execute the standard forward pass.</span>
<span class="n">batch_outputs</span> <span class="o">=</span> <span class="p">[</span>
<span class="bp">self</span><span class="o">.</span><span class="n">forward</span><span class="p">(</span>
<span class="n">RayBundle</span><span class="p">(</span>
<span class="n">origins</span><span class="o">=</span><span class="n">ray_bundle</span><span class="o">.</span><span class="n">origins</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="mi">3</span><span class="p">)[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">directions</span><span class="o">=</span><span class="n">ray_bundle</span><span class="o">.</span><span class="n">directions</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="mi">3</span><span class="p">)[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">lengths</span><span class="o">=</span><span class="n">ray_bundle</span><span class="o">.</span><span class="n">lengths</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="n">n_pts_per_ray</span><span class="p">)[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">xys</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="p">)</span>
<span class="p">)</span> <span class="k">for</span> <span class="n">batch_idx</span> <span class="ow">in</span> <span class="n">batches</span>
<span class="p">]</span>
<span class="c1"># Concatenate the per-batch rays_densities and rays_colors</span>
<span class="c1"># and reshape according to the sizes of the inputs.</span>
<span class="n">rays_densities</span><span class="p">,</span> <span class="n">rays_colors</span> <span class="o">=</span> <span class="p">[</span>
<span class="n">torch</span><span class="o">.</span><span class="n">cat</span><span class="p">(</span>
<span class="p">[</span><span class="n">batch_output</span><span class="p">[</span><span class="n">output_i</span><span class="p">]</span> <span class="k">for</span> <span class="n">batch_output</span> <span class="ow">in</span> <span class="n">batch_outputs</span><span class="p">],</span> <span class="n">dim</span><span class="o">=</span><span class="mi">0</span>
<span class="p">)</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">*</span><span class="n">spatial_size</span><span class="p">,</span> <span class="o">-</span><span class="mi">1</span><span class="p">)</span> <span class="k">for</span> <span class="n">output_i</span> <span class="ow">in</span> <span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>
<span class="p">]</span>
<span class="k">return</span> <span class="n">rays_densities</span><span class="p">,</span> <span class="n">rays_colors</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="4.-Helper-functions">4. Helper functions<a class="anchor-link" href="#4.-Helper-functions"></a></h2><p>In this function we define functions that help with the Neural Radiance Field optimization.</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="k">def</span> <span class="nf">huber</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">,</span> <span class="n">scaling</span><span class="o">=</span><span class="mf">0.1</span><span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> A helper function for evaluating the smooth L1 (huber) loss</span>
<span class="sd"> between the rendered silhouettes and colors.</span>
<span class="sd"> """</span>
<span class="n">diff_sq</span> <span class="o">=</span> <span class="p">(</span><span class="n">x</span> <span class="o">-</span> <span class="n">y</span><span class="p">)</span> <span class="o">**</span> <span class="mi">2</span>
<span class="n">loss</span> <span class="o">=</span> <span class="p">((</span><span class="mi">1</span> <span class="o">+</span> <span class="n">diff_sq</span> <span class="o">/</span> <span class="p">(</span><span class="n">scaling</span><span class="o">**</span><span class="mi">2</span><span class="p">))</span><span class="o">.</span><span class="n">clamp</span><span class="p">(</span><span class="mf">1e-4</span><span class="p">)</span><span class="o">.</span><span class="n">sqrt</span><span class="p">()</span> <span class="o">-</span> <span class="mi">1</span><span class="p">)</span> <span class="o">*</span> <span class="nb">float</span><span class="p">(</span><span class="n">scaling</span><span class="p">)</span>
<span class="k">return</span> <span class="n">loss</span>
<span class="k">def</span> <span class="nf">sample_images_at_mc_locs</span><span class="p">(</span><span class="n">target_images</span><span class="p">,</span> <span class="n">sampled_rays_xy</span><span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> Given a set of Monte Carlo pixel locations `sampled_rays_xy`,</span>
<span class="sd"> this method samples the tensor `target_images` at the</span>
<span class="sd"> respective 2D locations.</span>
<span class="sd"> </span>
<span class="sd"> This function is used in order to extract the colors from</span>
<span class="sd"> ground truth images that correspond to the colors</span>
<span class="sd"> rendered using `MonteCarloRaysampler`.</span>
<span class="sd"> """</span>
<span class="n">ba</span> <span class="o">=</span> <span class="n">target_images</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
<span class="n">dim</span> <span class="o">=</span> <span class="n">target_images</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>
<span class="n">spatial_size</span> <span class="o">=</span> <span class="n">sampled_rays_xy</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">1</span><span class="p">:</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>
<span class="c1"># In order to sample target_images, we utilize</span>
<span class="c1"># the grid_sample function which implements a</span>
<span class="c1"># bilinear image sampler.</span>
<span class="c1"># Note that we have to invert the sign of the </span>
<span class="c1"># sampled ray positions to convert the NDC xy locations</span>
<span class="c1"># of the MonteCarloRaysampler to the coordinate</span>
<span class="c1"># convention of grid_sample.</span>
<span class="n">images_sampled</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">functional</span><span class="o">.</span><span class="n">grid_sample</span><span class="p">(</span>
<span class="n">target_images</span><span class="o">.</span><span class="n">permute</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">),</span>
<span class="o">-</span><span class="n">sampled_rays_xy</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="n">ba</span><span class="p">,</span> <span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">),</span> <span class="c1"># note the sign inversion</span>
<span class="n">align_corners</span><span class="o">=</span><span class="kc">True</span>
<span class="p">)</span>
<span class="k">return</span> <span class="n">images_sampled</span><span class="o">.</span><span class="n">permute</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span><span class="o">.</span><span class="n">view</span><span class="p">(</span>
<span class="n">ba</span><span class="p">,</span> <span class="o">*</span><span class="n">spatial_size</span><span class="p">,</span> <span class="n">dim</span>
<span class="p">)</span>
<span class="k">def</span> <span class="nf">show_full_render</span><span class="p">(</span>
<span class="n">neural_radiance_field</span><span class="p">,</span> <span class="n">camera</span><span class="p">,</span>
<span class="n">target_image</span><span class="p">,</span> <span class="n">target_silhouette</span><span class="p">,</span>
<span class="n">loss_history_color</span><span class="p">,</span> <span class="n">loss_history_sil</span><span class="p">,</span>
<span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> This is a helper function for visualizing the</span>
<span class="sd"> intermediate results of the learning. </span>
<span class="sd"> </span>
<span class="sd"> Since the `NeuralRadianceField` suffers from</span>
<span class="sd"> a large memory footprint, which does not let us</span>
<span class="sd"> render the full image grid in a single forward pass,</span>
<span class="sd"> we utilize the `NeuralRadianceField.batched_forward`</span>
<span class="sd"> function in combination with disabling the gradient caching.</span>
<span class="sd"> This chunks the set of emitted rays to batches and </span>
<span class="sd"> evaluates the implicit function on one batch at a time</span>
<span class="sd"> to prevent GPU memory overflow.</span>
<span class="sd"> """</span>
<span class="c1"># Prevent gradient caching.</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">no_grad</span><span class="p">():</span>
<span class="c1"># Render using the grid renderer and the</span>
<span class="c1"># batched_forward function of neural_radiance_field.</span>
<span class="n">rendered_image_silhouette</span><span class="p">,</span> <span class="n">_</span> <span class="o">=</span> <span class="n">renderer_grid</span><span class="p">(</span>
<span class="n">cameras</span><span class="o">=</span><span class="n">camera</span><span class="p">,</span>
<span class="n">volumetric_function</span><span class="o">=</span><span class="n">neural_radiance_field</span><span class="o">.</span><span class="n">batched_forward</span>
<span class="p">)</span>
<span class="c1"># Split the rendering result to a silhouette render</span>
<span class="c1"># and the image render.</span>
<span class="n">rendered_image</span><span class="p">,</span> <span class="n">rendered_silhouette</span> <span class="o">=</span> <span class="p">(</span>
<span class="n">rendered_image_silhouette</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">split</span><span class="p">([</span><span class="mi">3</span><span class="p">,</span> <span class="mi">1</span><span class="p">],</span> <span class="n">dim</span><span class="o">=-</span><span class="mi">1</span><span class="p">)</span>
<span class="p">)</span>
<span class="c1"># Generate plots.</span>
<span class="n">fig</span><span class="p">,</span> <span class="n">ax</span> <span class="o">=</span> <span class="n">plt</span><span class="o">.</span><span class="n">subplots</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">15</span><span class="p">,</span> <span class="mi">10</span><span class="p">))</span>
<span class="n">ax</span> <span class="o">=</span> <span class="n">ax</span><span class="o">.</span><span class="n">ravel</span><span class="p">()</span>
<span class="n">clamp_and_detach</span> <span class="o">=</span> <span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="n">x</span><span class="o">.</span><span class="n">clamp</span><span class="p">(</span><span class="mf">0.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">)</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">detach</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">()</span>
<span class="n">ax</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">plot</span><span class="p">(</span><span class="nb">list</span><span class="p">(</span><span class="nb">range</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">loss_history_color</span><span class="p">))),</span> <span class="n">loss_history_color</span><span class="p">,</span> <span class="n">linewidth</span><span class="o">=</span><span class="mi">1</span><span class="p">)</span>
<span class="n">ax</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">clamp_and_detach</span><span class="p">(</span><span class="n">rendered_image</span><span class="p">))</span>
<span class="n">ax</span><span class="p">[</span><span class="mi">2</span><span class="p">]</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">clamp_and_detach</span><span class="p">(</span><span class="n">rendered_silhouette</span><span class="p">[</span><span class="o">...</span><span class="p">,</span> <span class="mi">0</span><span class="p">]))</span>
<span class="n">ax</span><span class="p">[</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">plot</span><span class="p">(</span><span class="nb">list</span><span class="p">(</span><span class="nb">range</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">loss_history_sil</span><span class="p">))),</span> <span class="n">loss_history_sil</span><span class="p">,</span> <span class="n">linewidth</span><span class="o">=</span><span class="mi">1</span><span class="p">)</span>
<span class="n">ax</span><span class="p">[</span><span class="mi">4</span><span class="p">]</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">clamp_and_detach</span><span class="p">(</span><span class="n">target_image</span><span class="p">))</span>
<span class="n">ax</span><span class="p">[</span><span class="mi">5</span><span class="p">]</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">clamp_and_detach</span><span class="p">(</span><span class="n">target_silhouette</span><span class="p">))</span>
<span class="k">for</span> <span class="n">ax_</span><span class="p">,</span> <span class="n">title_</span> <span class="ow">in</span> <span class="nb">zip</span><span class="p">(</span>
<span class="n">ax</span><span class="p">,</span>
<span class="p">(</span>
<span class="s2">"loss color"</span><span class="p">,</span> <span class="s2">"rendered image"</span><span class="p">,</span> <span class="s2">"rendered silhouette"</span><span class="p">,</span>
<span class="s2">"loss silhouette"</span><span class="p">,</span> <span class="s2">"target image"</span><span class="p">,</span> <span class="s2">"target silhouette"</span><span class="p">,</span>
<span class="p">)</span>
<span class="p">):</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">title_</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s1">'loss'</span><span class="p">):</span>
<span class="n">ax_</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">)</span>
<span class="n">ax_</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">)</span>
<span class="n">ax_</span><span class="o">.</span><span class="n">set_title</span><span class="p">(</span><span class="n">title_</span><span class="p">)</span>
<span class="n">fig</span><span class="o">.</span><span class="n">canvas</span><span class="o">.</span><span class="n">draw</span><span class="p">();</span> <span class="n">fig</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
<span class="n">display</span><span class="o">.</span><span class="n">clear_output</span><span class="p">(</span><span class="n">wait</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">display</span><span class="o">.</span><span class="n">display</span><span class="p">(</span><span class="n">fig</span><span class="p">)</span>
<span class="k">return</span> <span class="n">fig</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="5.-Fit-the-radiance-field">5. Fit the radiance field<a class="anchor-link" href="#5.-Fit-the-radiance-field"></a></h2><p>Here we carry out the radiance field fitting with differentiable rendering.</p>
<p>In order to fit the radiance field, we render it from the viewpoints of the <code>target_cameras</code>
and compare the resulting renders with the observed <code>target_images</code> and <code>target_silhouettes</code>.</p>
<p>The comparison is done by evaluating the mean huber (smooth-l1) error between corresponding
pairs of <code>target_images</code>/<code>rendered_images</code> and <code>target_silhouettes</code>/<code>rendered_silhouettes</code>.</p>
<p>Since we use the <code>MonteCarloRaysampler</code>, the outputs of the training renderer <code>renderer_mc</code>
are colors of pixels that are randomly sampled from the image plane, not a lattice of pixels forming
a valid image. Thus, in order to compare the rendered colors with the ground truth, we
utilize the random MonteCarlo pixel locations to sample the ground truth images/silhouettes
<code>target_silhouettes</code>/<code>rendered_silhouettes</code> at the xy locations corresponding to the render
locations. This is done with the helper function <code>sample_images_at_mc_locs</code>, which is
described in the previous cell.</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># First move all relevant variables to the correct device.</span>
<span class="n">renderer_grid</span> <span class="o">=</span> <span class="n">renderer_grid</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="n">renderer_mc</span> <span class="o">=</span> <span class="n">renderer_mc</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="n">target_cameras</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="n">target_images</span> <span class="o">=</span> <span class="n">target_images</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="n">target_silhouettes</span> <span class="o">=</span> <span class="n">target_silhouettes</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="c1"># Set the seed for reproducibility</span>
<span class="n">torch</span><span class="o">.</span><span class="n">manual_seed</span><span class="p">(</span><span class="mi">1</span><span class="p">)</span>
<span class="c1"># Instantiate the radiance field model.</span>
<span class="n">neural_radiance_field</span> <span class="o">=</span> <span class="n">NeuralRadianceField</span><span class="p">()</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="c1"># Instantiate the Adam optimizer. We set its master learning rate to 1e-3.</span>
<span class="n">lr</span> <span class="o">=</span> <span class="mf">1e-3</span>
<span class="n">optimizer</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">optim</span><span class="o">.</span><span class="n">Adam</span><span class="p">(</span><span class="n">neural_radiance_field</span><span class="o">.</span><span class="n">parameters</span><span class="p">(),</span> <span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">)</span>
<span class="c1"># We sample 6 random cameras in a minibatch. Each camera</span>
<span class="c1"># emits raysampler_mc.n_pts_per_image rays.</span>
<span class="n">batch_size</span> <span class="o">=</span> <span class="mi">6</span>
<span class="c1"># 3000 iterations take ~20 min on a Tesla M40 and lead to</span>
<span class="c1"># reasonably sharp results. However, for the best possible</span>
<span class="c1"># results, we recommend setting n_iter=20000.</span>
<span class="n">n_iter</span> <span class="o">=</span> <span class="mi">3000</span>
<span class="c1"># Init the loss history buffers.</span>
<span class="n">loss_history_color</span><span class="p">,</span> <span class="n">loss_history_sil</span> <span class="o">=</span> <span class="p">[],</span> <span class="p">[]</span>
<span class="c1"># The main optimization loop.</span>
<span class="k">for</span> <span class="n">iteration</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">n_iter</span><span class="p">):</span>
<span class="c1"># In case we reached the last 75% of iterations,</span>
<span class="c1"># decrease the learning rate of the optimizer 10-fold.</span>
<span class="k">if</span> <span class="n">iteration</span> <span class="o">==</span> <span class="nb">round</span><span class="p">(</span><span class="n">n_iter</span> <span class="o">*</span> <span class="mf">0.75</span><span class="p">):</span>
<span class="nb">print</span><span class="p">(</span><span class="s1">'Decreasing LR 10-fold ...'</span><span class="p">)</span>
<span class="n">optimizer</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">optim</span><span class="o">.</span><span class="n">Adam</span><span class="p">(</span>
<span class="n">neural_radiance_field</span><span class="o">.</span><span class="n">parameters</span><span class="p">(),</span> <span class="n">lr</span><span class="o">=</span><span class="n">lr</span> <span class="o">*</span> <span class="mf">0.1</span>
<span class="p">)</span>
<span class="c1"># Zero the optimizer gradient.</span>
<span class="n">optimizer</span><span class="o">.</span><span class="n">zero_grad</span><span class="p">()</span>
<span class="c1"># Sample random batch indices.</span>
<span class="n">batch_idx</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randperm</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">target_cameras</span><span class="p">))[:</span><span class="n">batch_size</span><span class="p">]</span>
<span class="c1"># Sample the minibatch of cameras.</span>
<span class="n">batch_cameras</span> <span class="o">=</span> <span class="n">FoVPerspectiveCameras</span><span class="p">(</span>
<span class="n">R</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">R</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">T</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">T</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">znear</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">znear</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">zfar</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">zfar</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">aspect_ratio</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">aspect_ratio</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">fov</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">fov</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">device</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># Evaluate the nerf model.</span>
<span class="n">rendered_images_silhouettes</span><span class="p">,</span> <span class="n">sampled_rays</span> <span class="o">=</span> <span class="n">renderer_mc</span><span class="p">(</span>
<span class="n">cameras</span><span class="o">=</span><span class="n">batch_cameras</span><span class="p">,</span>
<span class="n">volumetric_function</span><span class="o">=</span><span class="n">neural_radiance_field</span>
<span class="p">)</span>
<span class="n">rendered_images</span><span class="p">,</span> <span class="n">rendered_silhouettes</span> <span class="o">=</span> <span class="p">(</span>
<span class="n">rendered_images_silhouettes</span><span class="o">.</span><span class="n">split</span><span class="p">([</span><span class="mi">3</span><span class="p">,</span> <span class="mi">1</span><span class="p">],</span> <span class="n">dim</span><span class="o">=-</span><span class="mi">1</span><span class="p">)</span>
<span class="p">)</span>
<span class="c1"># Compute the silhouette error as the mean huber</span>
<span class="c1"># loss between the predicted masks and the</span>
<span class="c1"># sampled target silhouettes.</span>
<span class="n">silhouettes_at_rays</span> <span class="o">=</span> <span class="n">sample_images_at_mc_locs</span><span class="p">(</span>
<span class="n">target_silhouettes</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">,</span> <span class="o">...</span><span class="p">,</span> <span class="kc">None</span><span class="p">],</span>
<span class="n">sampled_rays</span><span class="o">.</span><span class="n">xys</span>
<span class="p">)</span>
<span class="n">sil_err</span> <span class="o">=</span> <span class="n">huber</span><span class="p">(</span>
<span class="n">rendered_silhouettes</span><span class="p">,</span>
<span class="n">silhouettes_at_rays</span><span class="p">,</span>
<span class="p">)</span><span class="o">.</span><span class="n">abs</span><span class="p">()</span><span class="o">.</span><span class="n">mean</span><span class="p">()</span>
<span class="c1"># Compute the color error as the mean huber</span>
<span class="c1"># loss between the rendered colors and the</span>
<span class="c1"># sampled target images.</span>
<span class="n">colors_at_rays</span> <span class="o">=</span> <span class="n">sample_images_at_mc_locs</span><span class="p">(</span>
<span class="n">target_images</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">sampled_rays</span><span class="o">.</span><span class="n">xys</span>
<span class="p">)</span>
<span class="n">color_err</span> <span class="o">=</span> <span class="n">huber</span><span class="p">(</span>
<span class="n">rendered_images</span><span class="p">,</span>
<span class="n">colors_at_rays</span><span class="p">,</span>
<span class="p">)</span><span class="o">.</span><span class="n">abs</span><span class="p">()</span><span class="o">.</span><span class="n">mean</span><span class="p">()</span>
<span class="c1"># The optimization loss is a simple</span>
<span class="c1"># sum of the color and silhouette errors.</span>
<span class="n">loss</span> <span class="o">=</span> <span class="n">color_err</span> <span class="o">+</span> <span class="n">sil_err</span>
<span class="c1"># Log the loss history.</span>
<span class="n">loss_history_color</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="nb">float</span><span class="p">(</span><span class="n">color_err</span><span class="p">))</span>
<span class="n">loss_history_sil</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="nb">float</span><span class="p">(</span><span class="n">sil_err</span><span class="p">))</span>
<span class="c1"># Every 10 iterations, print the current values of the losses.</span>
<span class="k">if</span> <span class="n">iteration</span> <span class="o">%</span> <span class="mi">10</span> <span class="o">==</span> <span class="mi">0</span><span class="p">:</span>
<span class="nb">print</span><span class="p">(</span>
<span class="sa">f</span><span class="s1">'Iteration </span><span class="si">{</span><span class="n">iteration</span><span class="si">:</span><span class="s1">05d</span><span class="si">}</span><span class="s1">:'</span>
<span class="o">+</span> <span class="sa">f</span><span class="s1">' loss color = </span><span class="si">{</span><span class="nb">float</span><span class="p">(</span><span class="n">color_err</span><span class="p">)</span><span class="si">:</span><span class="s1">1.2e</span><span class="si">}</span><span class="s1">'</span>
<span class="o">+</span> <span class="sa">f</span><span class="s1">' loss silhouette = </span><span class="si">{</span><span class="nb">float</span><span class="p">(</span><span class="n">sil_err</span><span class="p">)</span><span class="si">:</span><span class="s1">1.2e</span><span class="si">}</span><span class="s1">'</span>
<span class="p">)</span>
<span class="c1"># Take the optimization step.</span>
<span class="n">loss</span><span class="o">.</span><span class="n">backward</span><span class="p">()</span>
<span class="n">optimizer</span><span class="o">.</span><span class="n">step</span><span class="p">()</span>
<span class="c1"># Visualize the full renders every 100 iterations.</span>
<span class="k">if</span> <span class="n">iteration</span> <span class="o">%</span> <span class="mi">100</span> <span class="o">==</span> <span class="mi">0</span><span class="p">:</span>
<span class="n">show_idx</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randperm</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">target_cameras</span><span class="p">))[:</span><span class="mi">1</span><span class="p">]</span>
<span class="n">show_full_render</span><span class="p">(</span>
<span class="n">neural_radiance_field</span><span class="p">,</span>
<span class="n">FoVPerspectiveCameras</span><span class="p">(</span>
<span class="n">R</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">R</span><span class="p">[</span><span class="n">show_idx</span><span class="p">],</span>
<span class="n">T</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">T</span><span class="p">[</span><span class="n">show_idx</span><span class="p">],</span>
<span class="n">znear</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">znear</span><span class="p">[</span><span class="n">show_idx</span><span class="p">],</span>
<span class="n">zfar</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">zfar</span><span class="p">[</span><span class="n">show_idx</span><span class="p">],</span>
<span class="n">aspect_ratio</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">aspect_ratio</span><span class="p">[</span><span class="n">show_idx</span><span class="p">],</span>
<span class="n">fov</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">fov</span><span class="p">[</span><span class="n">show_idx</span><span class="p">],</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">device</span><span class="p">,</span>
<span class="p">),</span>
<span class="n">target_images</span><span class="p">[</span><span class="n">show_idx</span><span class="p">][</span><span class="mi">0</span><span class="p">],</span>
<span class="n">target_silhouettes</span><span class="p">[</span><span class="n">show_idx</span><span class="p">][</span><span class="mi">0</span><span class="p">],</span>
<span class="n">loss_history_color</span><span class="p">,</span>
<span class="n">loss_history_sil</span><span class="p">,</span>
<span class="p">)</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="5.-Visualizing-the-optimized-neural-radiance-field">5. Visualizing the optimized neural radiance field<a class="anchor-link" href="#5.-Visualizing-the-optimized-neural-radiance-field"></a></h2><p>Finally, we visualize the neural radiance field by rendering from multiple viewpoints that rotate around the volume's y-axis.</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="k">def</span> <span class="nf">generate_rotating_nerf</span><span class="p">(</span><span class="n">neural_radiance_field</span><span class="p">,</span> <span class="n">n_frames</span> <span class="o">=</span> <span class="mi">50</span><span class="p">):</span>
<span class="n">logRs</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">zeros</span><span class="p">(</span><span class="n">n_frames</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
<span class="n">logRs</span><span class="p">[:,</span> <span class="mi">1</span><span class="p">]</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">linspace</span><span class="p">(</span><span class="o">-</span><span class="mf">3.14</span><span class="p">,</span> <span class="mf">3.14</span><span class="p">,</span> <span class="n">n_frames</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
<span class="n">Rs</span> <span class="o">=</span> <span class="n">so3_exp_map</span><span class="p">(</span><span class="n">logRs</span><span class="p">)</span>
<span class="n">Ts</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">zeros</span><span class="p">(</span><span class="n">n_frames</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
<span class="n">Ts</span><span class="p">[:,</span> <span class="mi">2</span><span class="p">]</span> <span class="o">=</span> <span class="mf">2.7</span>
<span class="n">frames</span> <span class="o">=</span> <span class="p">[]</span>
<span class="nb">print</span><span class="p">(</span><span class="s1">'Rendering rotating NeRF ...'</span><span class="p">)</span>
<span class="k">for</span> <span class="n">R</span><span class="p">,</span> <span class="n">T</span> <span class="ow">in</span> <span class="nb">zip</span><span class="p">(</span><span class="n">tqdm</span><span class="p">(</span><span class="n">Rs</span><span class="p">),</span> <span class="n">Ts</span><span class="p">):</span>
<span class="n">camera</span> <span class="o">=</span> <span class="n">FoVPerspectiveCameras</span><span class="p">(</span>
<span class="n">R</span><span class="o">=</span><span class="n">R</span><span class="p">[</span><span class="kc">None</span><span class="p">],</span>
<span class="n">T</span><span class="o">=</span><span class="n">T</span><span class="p">[</span><span class="kc">None</span><span class="p">],</span>
<span class="n">znear</span><span class="o">=</span><span class="n">target_cameras</span><span class="o">.</span><span class="n">znear</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
<span class="n">zfar</span><span class="o">=</span><span class="n">target_cameras</span><span class="o">.</span><span class="n">zfar</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
<span class="n">aspect_ratio</span><span class="o">=</span><span class="n">target_cameras</span><span class="o">.</span><span class="n">aspect_ratio</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
<span class="n">fov</span><span class="o">=</span><span class="n">target_cameras</span><span class="o">.</span><span class="n">fov</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
<span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># Note that we again render with `NDCGridRaySampler`</span>
<span class="c1"># and the batched_forward function of neural_radiance_field.</span>
<span class="n">frames</span><span class="o">.</span><span class="n">append</span><span class="p">(</span>
<span class="n">renderer_grid</span><span class="p">(</span>
<span class="n">cameras</span><span class="o">=</span><span class="n">camera</span><span class="p">,</span>
<span class="n">volumetric_function</span><span class="o">=</span><span class="n">neural_radiance_field</span><span class="o">.</span><span class="n">batched_forward</span><span class="p">,</span>
<span class="p">)[</span><span class="mi">0</span><span class="p">][</span><span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]</span>
<span class="p">)</span>
<span class="k">return</span> <span class="n">torch</span><span class="o">.</span><span class="n">cat</span><span class="p">(</span><span class="n">frames</span><span class="p">)</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">no_grad</span><span class="p">():</span>
<span class="n">rotating_nerf_frames</span> <span class="o">=</span> <span class="n">generate_rotating_nerf</span><span class="p">(</span><span class="n">neural_radiance_field</span><span class="p">,</span> <span class="n">n_frames</span><span class="o">=</span><span class="mi">3</span><span class="o">*</span><span class="mi">5</span><span class="p">)</span>
<span class="n">image_grid</span><span class="p">(</span><span class="n">rotating_nerf_frames</span><span class="o">.</span><span class="n">clamp</span><span class="p">(</span><span class="mf">0.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">)</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">(),</span> <span class="n">rows</span><span class="o">=</span><span class="mi">3</span><span class="p">,</span> <span class="n">cols</span><span class="o">=</span><span class="mi">5</span><span class="p">,</span> <span class="n">rgb</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">fill</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">plt</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="6.-Conclusion">6. Conclusion<a class="anchor-link" href="#6.-Conclusion"></a></h2><p>In this tutorial, we have shown how to optimize an implicit representation of a scene such that the renders of the scene from known viewpoints match the observed images for each viewpoint. The rendering was carried out using the PyTorch3D's implicit function renderer composed of either a <code>MonteCarloRaysampler</code> or <code>NDCGridRaysampler</code>, and an <code>EmissionAbsorptionRaymarcher</code>.</p>
</div>
</div>
</div>
</div></div></div></div></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2021 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>

View File

@ -0,0 +1,956 @@
<!DOCTYPE html><html lang=""><head><meta charSet="utf-8"/><meta http-equiv="X-UA-Compatible" content="IE=edge"/><title>PyTorch3D · A library for deep learning with 3D data</title><meta name="viewport" content="width=device-width, initial-scale=1.0"/><meta name="generator" content="Docusaurus"/><meta name="description" content="A library for deep learning with 3D data"/><meta property="og:title" content="PyTorch3D · A library for deep learning with 3D data"/><meta property="og:type" content="website"/><meta property="og:url" content="https://pytorch3d.org/"/><meta property="og:description" content="A library for deep learning with 3D data"/><meta property="og:image" content="https://pytorch3d.org/img/pytorch3dlogoicon.svg"/><meta name="twitter:card" content="summary"/><meta name="twitter:image" content="https://pytorch3d.org/img/pytorch3dlogoicon.svg"/><link rel="shortcut icon" href="/img/pytorch3dfavicon.png"/><link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/default.min.css"/><script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_volume">Fit a volume via raymarching</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/fit_simple_neural_radiance_field">Fit a simplified NeRF via raymarching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {
var links = coll[i].nextElementSibling.getElementsByTagName('*');
if (checkActiveCategory){
for (var j = 0; j < links.length; j++) {
if (links[j].classList.contains('navListItemActive')){
coll[i].nextElementSibling.classList.toggle('hide');
coll[i].childNodes[1].classList.toggle('rotate');
checkActiveCategory = false;
break;
}
}
}
coll[i].addEventListener('click', function() {
var arrow = this.childNodes[1];
arrow.classList.toggle('rotate');
var content = this.nextElementSibling;
content.classList.toggle('hide');
});
}
document.addEventListener('DOMContentLoaded', function() {
createToggler('#navToggler', '#docsNav', 'docsSliderActive');
createToggler('#tocToggler', 'body', 'tocActive');
var headings = document.querySelector('.toc-headings');
headings && headings.addEventListener('click', function(event) {
var el = event.target;
while(el !== headings){
if (el.tagName === 'A') {
document.body.classList.remove('tocActive');
break;
} else{
el = el.parentNode;
}
}
}, false);
function createToggler(togglerSelector, targetSelector, className) {
var toggler = document.querySelector(togglerSelector);
var target = document.querySelector(targetSelector);
if (!toggler) {
return;
}
toggler.onclick = function(event) {
event.preventDefault();
target.classList.toggle(className);
};
}
});
</script></nav></div><div class="container mainContainer"><div class="wrapper"><div class="tutorialButtonsWrapper"><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="https://colab.research.google.com/github/facebookresearch/pytorch3d/blob/stable/docs/tutorials/fit_simple_neural_radiance_field.ipynb" target="_blank"><img class="colabButton" align="left" src="/img/colab_icon.png"/>Run in Google Colab</a></div><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="/files/fit_simple_neural_radiance_field.ipynb" target="_blank"><svg aria-hidden="true" focusable="false" data-prefix="fas" data-icon="file-download" class="svg-inline--fa fa-file-download fa-w-12" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><path fill="currentColor" d="M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm76.45 211.36l-96.42 95.7c-6.65 6.61-17.39 6.61-24.04 0l-96.42-95.7C73.42 337.29 80.54 320 94.82 320H160v-80c0-8.84 7.16-16 16-16h32c8.84 0 16 7.16 16 16v80h65.18c14.28 0 21.4 17.29 11.27 27.36zM377 105L279.1 7c-4.5-4.5-10.6-7-17-7H256v128h128v-6.1c0-6.3-2.5-12.4-7-16.9z"></path></svg>Download Tutorial Jupyter Notebook</a></div><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="/files/fit_simple_neural_radiance_field.py" target="_blank"><svg aria-hidden="true" focusable="false" data-prefix="fas" data-icon="file-download" class="svg-inline--fa fa-file-download fa-w-12" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><path fill="currentColor" d="M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm76.45 211.36l-96.42 95.7c-6.65 6.61-17.39 6.61-24.04 0l-96.42-95.7C73.42 337.29 80.54 320 94.82 320H160v-80c0-8.84 7.16-16 16-16h32c8.84 0 16 7.16 16 16v80h65.18c14.28 0 21.4 17.29 11.27 27.36zM377 105L279.1 7c-4.5-4.5-10.6-7-17-7H256v128h128v-6.1c0-6.3-2.5-12.4-7-16.9z"></path></svg>Download Tutorial Source Code</a></div></div><div class="tutorialBody">
<script
src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js">
</script>
<script
src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js">
</script>
<div class="notebook">
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h1 id="Fit-a-simple-Neural-Radiance-Field-via-raymarching">Fit a simple Neural Radiance Field via raymarching<a class="anchor-link" href="#Fit-a-simple-Neural-Radiance-Field-via-raymarching"></a></h1><p>This tutorial shows how to fit Neural Radiance Field given a set of views of a scene using differentiable implicit function rendering.</p>
<p>More specifically, this tutorial will explain how to:</p>
<ol>
<li>Create a differentiable implicit function renderer with either image-grid or Monte Carlo ray sampling.</li>
<li>Create an Implicit model of a scene.</li>
<li>Fit the implicit function (Neural Radiance Field) based on input images using the differentiable implicit renderer. </li>
<li>Visualize the learnt implicit function.</li>
</ol>
<p>Note that the presented implicit model is a simplified version of NeRF:<br/>
<em>Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, Ren Ng: NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis, ECCV 2020.</em></p>
<p>The simplifications include:</p>
<ul>
<li><em>Ray sampling</em>: This notebook does not perform stratified ray sampling but rather ray sampling at equidistant depths.</li>
<li><em>Rendering</em>: We do a single rendering pass, as opposed to the original implementation that does a coarse and fine rendering pass.</li>
<li><em>Architecture</em>: Our network is shallower which allows for faster optimization possibly at the cost of surface details.</li>
<li><em>Mask loss</em>: Since our observations include segmentation masks, we also optimize a silhouette loss that forces rays to either get fully absorbed inside the volume, or to completely pass through it.</li>
</ul>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="0.-Install-and-Import-modules">0. Install and Import modules<a class="anchor-link" href="#0.-Install-and-Import-modules"></a></h2><p>Ensure <code>torch</code> and <code>torchvision</code> are installed. If <code>pytorch3d</code> is not installed, install it using the following cell:</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="kn">import</span> <span class="nn">os</span>
<span class="kn">import</span> <span class="nn">sys</span>
<span class="kn">import</span> <span class="nn">torch</span>
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
<span class="k">try</span><span class="p">:</span>
<span class="kn">import</span> <span class="nn">pytorch3d</span>
<span class="k">except</span> <span class="ne">ModuleNotFoundError</span><span class="p">:</span>
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s2">"1.9"</span><span class="p">)</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s2">"linux"</span><span class="p">):</span>
<span class="c1"># We try to install PyTorch3D via a released wheel.</span>
<span class="n">version_str</span><span class="o">=</span><span class="s2">""</span><span class="o">.</span><span class="n">join</span><span class="p">([</span>
<span class="sa">f</span><span class="s2">"py3</span><span class="si">{</span><span class="n">sys</span><span class="o">.</span><span class="n">version_info</span><span class="o">.</span><span class="n">minor</span><span class="si">}</span><span class="s2">_cu"</span><span class="p">,</span>
<span class="n">torch</span><span class="o">.</span><span class="n">version</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">replace</span><span class="p">(</span><span class="s2">"."</span><span class="p">,</span><span class="s2">""</span><span class="p">),</span>
<span class="sa">f</span><span class="s2">"_pyt</span><span class="si">{</span><span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="p">[</span><span class="mi">0</span><span class="p">:</span><span class="mi">5</span><span class="p">:</span><span class="mi">2</span><span class="p">]</span><span class="si">}</span><span class="s2">"</span>
<span class="p">])</span>
<span class="o">!</span>pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/<span class="o">{</span>version_str<span class="o">}</span>/download.html
<span class="k">else</span><span class="p">:</span>
<span class="c1"># We try to install PyTorch3D from source.</span>
<span class="o">!</span>curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
<span class="o">!</span>tar xzf <span class="m">1</span>.10.0.tar.gz
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">"CUB_HOME"</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">getcwd</span><span class="p">()</span> <span class="o">+</span> <span class="s2">"/cub-1.10.0"</span>
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># %matplotlib inline</span>
<span class="c1"># %matplotlib notebook</span>
<span class="kn">import</span> <span class="nn">os</span>
<span class="kn">import</span> <span class="nn">sys</span>
<span class="kn">import</span> <span class="nn">time</span>
<span class="kn">import</span> <span class="nn">json</span>
<span class="kn">import</span> <span class="nn">glob</span>
<span class="kn">import</span> <span class="nn">torch</span>
<span class="kn">import</span> <span class="nn">math</span>
<span class="kn">import</span> <span class="nn">matplotlib.pyplot</span> <span class="k">as</span> <span class="nn">plt</span>
<span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="kn">from</span> <span class="nn">PIL</span> <span class="kn">import</span> <span class="n">Image</span>
<span class="kn">from</span> <span class="nn">IPython</span> <span class="kn">import</span> <span class="n">display</span>
<span class="kn">from</span> <span class="nn">tqdm.notebook</span> <span class="kn">import</span> <span class="n">tqdm</span>
<span class="c1"># Data structures and functions for rendering</span>
<span class="kn">from</span> <span class="nn">pytorch3d.structures</span> <span class="kn">import</span> <span class="n">Volumes</span>
<span class="kn">from</span> <span class="nn">pytorch3d.transforms</span> <span class="kn">import</span> <span class="n">so3_exp_map</span>
<span class="kn">from</span> <span class="nn">pytorch3d.renderer</span> <span class="kn">import</span> <span class="p">(</span>
<span class="n">FoVPerspectiveCameras</span><span class="p">,</span>
<span class="n">NDCGridRaysampler</span><span class="p">,</span>
<span class="n">MonteCarloRaysampler</span><span class="p">,</span>
<span class="n">EmissionAbsorptionRaymarcher</span><span class="p">,</span>
<span class="n">ImplicitRenderer</span><span class="p">,</span>
<span class="n">RayBundle</span><span class="p">,</span>
<span class="n">ray_bundle_to_ray_points</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># obtain the utilized device</span>
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">is_available</span><span class="p">():</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">"cuda:0"</span><span class="p">)</span>
<span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">set_device</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="nb">print</span><span class="p">(</span>
<span class="s1">'Please note that NeRF is a resource-demanding method.'</span>
<span class="o">+</span> <span class="s1">' Running this notebook on CPU will be extremely slow.'</span>
<span class="o">+</span> <span class="s1">' We recommend running the example on a GPU'</span>
<span class="o">+</span> <span class="s1">' with at least 10 GB of memory.'</span>
<span class="p">)</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">"cpu"</span><span class="p">)</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/plot_image_grid.py
<span class="o">!</span>wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/generate_cow_renders.py
<span class="kn">from</span> <span class="nn">plot_image_grid</span> <span class="kn">import</span> <span class="n">image_grid</span>
<span class="kn">from</span> <span class="nn">generate_cow_renders</span> <span class="kn">import</span> <span class="n">generate_cow_renders</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<p>OR if running locally uncomment and run the following cell:</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># from utils.generate_cow_renders import generate_cow_renders</span>
<span class="c1"># from utils import image_grid</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="1.-Generate-images-of-the-scene-and-masks">1. Generate images of the scene and masks<a class="anchor-link" href="#1.-Generate-images-of-the-scene-and-masks"></a></h2><p>The following cell generates our training data.
It renders the cow mesh from the <code>fit_textured_mesh.ipynb</code> tutorial from several viewpoints and returns:</p>
<ol>
<li>A batch of image and silhouette tensors that are produced by the cow mesh renderer.</li>
<li>A set of cameras corresponding to each render.</li>
</ol>
<p>Note: For the purpose of this tutorial, which aims at explaining the details of implicit rendering, we do not explain how the mesh rendering, implemented in the <code>generate_cow_renders</code> function, works. Please refer to <code>fit_textured_mesh.ipynb</code> for a detailed explanation of mesh rendering.</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="n">target_cameras</span><span class="p">,</span> <span class="n">target_images</span><span class="p">,</span> <span class="n">target_silhouettes</span> <span class="o">=</span> <span class="n">generate_cow_renders</span><span class="p">(</span><span class="n">num_views</span><span class="o">=</span><span class="mi">40</span><span class="p">,</span> <span class="n">azimuth_range</span><span class="o">=</span><span class="mi">180</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="sa">f</span><span class="s1">'Generated </span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">target_images</span><span class="p">)</span><span class="si">}</span><span class="s1"> images/silhouettes/cameras.'</span><span class="p">)</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="2.-Initialize-the-implicit-renderer">2. Initialize the implicit renderer<a class="anchor-link" href="#2.-Initialize-the-implicit-renderer"></a></h2><p>The following initializes an implicit renderer that emits a ray from each pixel of a target image and samples a set of uniformly-spaced points along the ray. At each ray-point, the corresponding density and color value is obtained by querying the corresponding location in the neural model of the scene (the model is described &amp; instantiated in a later cell).</p>
<p>The renderer is composed of a <em>raymarcher</em> and a <em>raysampler</em>.</p>
<ul>
<li>The <em>raysampler</em> is responsible for emitting rays from image pixels and sampling the points along them. Here, we use two different raysamplers:<ul>
<li><code>MonteCarloRaysampler</code> is used to generate rays from a random subset of pixels of the image plane. The random subsampling of pixels is carried out during <strong>training</strong> to decrease the memory consumption of the implicit model.</li>
<li><code>NDCGridRaysampler</code> which follows the standard PyTorch3D coordinate grid convention (+X from right to left; +Y from bottom to top; +Z away from the user). In combination with the implicit model of the scene, <code>NDCGridRaysampler</code> consumes a large amount of memory and, hence, is only used for visualizing the results of the training at <strong>test</strong> time.</li>
</ul>
</li>
<li>The <em>raymarcher</em> takes the densities and colors sampled along each ray and renders each ray into a color and an opacity value of the ray's source pixel. Here we use the <code>EmissionAbsorptionRaymarcher</code> which implements the standard Emission-Absorption raymarching algorithm.</li>
</ul>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># render_size describes the size of both sides of the </span>
<span class="c1"># rendered images in pixels. Since an advantage of </span>
<span class="c1"># Neural Radiance Fields are high quality renders</span>
<span class="c1"># with a significant amount of details, we render</span>
<span class="c1"># the implicit function at double the size of </span>
<span class="c1"># target images.</span>
<span class="n">render_size</span> <span class="o">=</span> <span class="n">target_images</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="o">*</span> <span class="mi">2</span>
<span class="c1"># Our rendered scene is centered around (0,0,0) </span>
<span class="c1"># and is enclosed inside a bounding box</span>
<span class="c1"># whose side is roughly equal to 3.0 (world units).</span>
<span class="n">volume_extent_world</span> <span class="o">=</span> <span class="mf">3.0</span>
<span class="c1"># 1) Instantiate the raysamplers.</span>
<span class="c1"># Here, NDCGridRaysampler generates a rectangular image</span>
<span class="c1"># grid of rays whose coordinates follow the PyTorch3D</span>
<span class="c1"># coordinate conventions.</span>
<span class="n">raysampler_grid</span> <span class="o">=</span> <span class="n">NDCGridRaysampler</span><span class="p">(</span>
<span class="n">image_height</span><span class="o">=</span><span class="n">render_size</span><span class="p">,</span>
<span class="n">image_width</span><span class="o">=</span><span class="n">render_size</span><span class="p">,</span>
<span class="n">n_pts_per_ray</span><span class="o">=</span><span class="mi">128</span><span class="p">,</span>
<span class="n">min_depth</span><span class="o">=</span><span class="mf">0.1</span><span class="p">,</span>
<span class="n">max_depth</span><span class="o">=</span><span class="n">volume_extent_world</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># MonteCarloRaysampler generates a random subset </span>
<span class="c1"># of `n_rays_per_image` rays emitted from the image plane.</span>
<span class="n">raysampler_mc</span> <span class="o">=</span> <span class="n">MonteCarloRaysampler</span><span class="p">(</span>
<span class="n">min_x</span> <span class="o">=</span> <span class="o">-</span><span class="mf">1.0</span><span class="p">,</span>
<span class="n">max_x</span> <span class="o">=</span> <span class="mf">1.0</span><span class="p">,</span>
<span class="n">min_y</span> <span class="o">=</span> <span class="o">-</span><span class="mf">1.0</span><span class="p">,</span>
<span class="n">max_y</span> <span class="o">=</span> <span class="mf">1.0</span><span class="p">,</span>
<span class="n">n_rays_per_image</span><span class="o">=</span><span class="mi">750</span><span class="p">,</span>
<span class="n">n_pts_per_ray</span><span class="o">=</span><span class="mi">128</span><span class="p">,</span>
<span class="n">min_depth</span><span class="o">=</span><span class="mf">0.1</span><span class="p">,</span>
<span class="n">max_depth</span><span class="o">=</span><span class="n">volume_extent_world</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># 2) Instantiate the raymarcher.</span>
<span class="c1"># Here, we use the standard EmissionAbsorptionRaymarcher </span>
<span class="c1"># which marches along each ray in order to render</span>
<span class="c1"># the ray into a single 3D color vector </span>
<span class="c1"># and an opacity scalar.</span>
<span class="n">raymarcher</span> <span class="o">=</span> <span class="n">EmissionAbsorptionRaymarcher</span><span class="p">()</span>
<span class="c1"># Finally, instantiate the implicit renders</span>
<span class="c1"># for both raysamplers.</span>
<span class="n">renderer_grid</span> <span class="o">=</span> <span class="n">ImplicitRenderer</span><span class="p">(</span>
<span class="n">raysampler</span><span class="o">=</span><span class="n">raysampler_grid</span><span class="p">,</span> <span class="n">raymarcher</span><span class="o">=</span><span class="n">raymarcher</span><span class="p">,</span>
<span class="p">)</span>
<span class="n">renderer_mc</span> <span class="o">=</span> <span class="n">ImplicitRenderer</span><span class="p">(</span>
<span class="n">raysampler</span><span class="o">=</span><span class="n">raysampler_mc</span><span class="p">,</span> <span class="n">raymarcher</span><span class="o">=</span><span class="n">raymarcher</span><span class="p">,</span>
<span class="p">)</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="3.-Define-the-neural-radiance-field-model">3. Define the neural radiance field model<a class="anchor-link" href="#3.-Define-the-neural-radiance-field-model"></a></h2><p>In this cell we define the <code>NeuralRadianceField</code> module, which specifies a continuous field of colors and opacities over the 3D domain of the scene.</p>
<p>The <code>forward</code> function of <code>NeuralRadianceField</code> (NeRF) receives as input a set of tensors that parametrize a bundle of rendering rays. The ray bundle is later converted to 3D ray points in the world coordinates of the scene. Each 3D point is then mapped to a harmonic representation using the <code>HarmonicEmbedding</code> layer (defined in the next cell). The harmonic embeddings then enter the <em>color</em> and <em>opacity</em> branches of the NeRF model in order to label each ray point with a 3D vector and a 1D scalar ranging in [0-1] which define the point's RGB color and opacity respectively.</p>
<p>Since NeRF has a large memory footprint, we also implement the <code>NeuralRadianceField.forward_batched</code> method. The method splits the input rays into batches and executes the <code>forward</code> function for each batch separately in a for loop. This lets us render a large set of rays without running out of GPU memory. Standardly, <code>forward_batched</code> would be used to render rays emitted from all pixels of an image in order to produce a full-sized render of a scene.</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="k">class</span> <span class="nc">HarmonicEmbedding</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">n_harmonic_functions</span><span class="o">=</span><span class="mi">60</span><span class="p">,</span> <span class="n">omega0</span><span class="o">=</span><span class="mf">0.1</span><span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> Given an input tensor `x` of shape [minibatch, ... , dim],</span>
<span class="sd"> the harmonic embedding layer converts each feature</span>
<span class="sd"> in `x` into a series of harmonic features `embedding`</span>
<span class="sd"> as follows:</span>
<span class="sd"> embedding[..., i*dim:(i+1)*dim] = [</span>
<span class="sd"> sin(x[..., i]),</span>
<span class="sd"> sin(2*x[..., i]),</span>
<span class="sd"> sin(4*x[..., i]),</span>
<span class="sd"> ...</span>
<span class="sd"> sin(2**(self.n_harmonic_functions-1) * x[..., i]),</span>
<span class="sd"> cos(x[..., i]),</span>
<span class="sd"> cos(2*x[..., i]),</span>
<span class="sd"> cos(4*x[..., i]),</span>
<span class="sd"> ...</span>
<span class="sd"> cos(2**(self.n_harmonic_functions-1) * x[..., i])</span>
<span class="sd"> ]</span>
<span class="sd"> </span>
<span class="sd"> Note that `x` is also premultiplied by `omega0` before</span>
<span class="sd"> evaluating the harmonic functions.</span>
<span class="sd"> """</span>
<span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
<span class="bp">self</span><span class="o">.</span><span class="n">register_buffer</span><span class="p">(</span>
<span class="s1">'frequencies'</span><span class="p">,</span>
<span class="n">omega0</span> <span class="o">*</span> <span class="p">(</span><span class="mf">2.0</span> <span class="o">**</span> <span class="n">torch</span><span class="o">.</span><span class="n">arange</span><span class="p">(</span><span class="n">n_harmonic_functions</span><span class="p">)),</span>
<span class="p">)</span>
<span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> Args:</span>
<span class="sd"> x: tensor of shape [..., dim]</span>
<span class="sd"> Returns:</span>
<span class="sd"> embedding: a harmonic embedding of `x`</span>
<span class="sd"> of shape [..., n_harmonic_functions * dim * 2]</span>
<span class="sd"> """</span>
<span class="n">embed</span> <span class="o">=</span> <span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="o">...</span><span class="p">,</span> <span class="kc">None</span><span class="p">]</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">frequencies</span><span class="p">)</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">*</span><span class="n">x</span><span class="o">.</span><span class="n">shape</span><span class="p">[:</span><span class="o">-</span><span class="mi">1</span><span class="p">],</span> <span class="o">-</span><span class="mi">1</span><span class="p">)</span>
<span class="k">return</span> <span class="n">torch</span><span class="o">.</span><span class="n">cat</span><span class="p">((</span><span class="n">embed</span><span class="o">.</span><span class="n">sin</span><span class="p">(),</span> <span class="n">embed</span><span class="o">.</span><span class="n">cos</span><span class="p">()),</span> <span class="n">dim</span><span class="o">=-</span><span class="mi">1</span><span class="p">)</span>
<span class="k">class</span> <span class="nc">NeuralRadianceField</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">n_harmonic_functions</span><span class="o">=</span><span class="mi">60</span><span class="p">,</span> <span class="n">n_hidden_neurons</span><span class="o">=</span><span class="mi">256</span><span class="p">):</span>
<span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
<span class="sd">"""</span>
<span class="sd"> Args:</span>
<span class="sd"> n_harmonic_functions: The number of harmonic functions</span>
<span class="sd"> used to form the harmonic embedding of each point.</span>
<span class="sd"> n_hidden_neurons: The number of hidden units in the</span>
<span class="sd"> fully connected layers of the MLPs of the model.</span>
<span class="sd"> """</span>
<span class="c1"># The harmonic embedding layer converts input 3D coordinates</span>
<span class="c1"># to a representation that is more suitable for</span>
<span class="c1"># processing with a deep neural network.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">harmonic_embedding</span> <span class="o">=</span> <span class="n">HarmonicEmbedding</span><span class="p">(</span><span class="n">n_harmonic_functions</span><span class="p">)</span>
<span class="c1"># The dimension of the harmonic embedding.</span>
<span class="n">embedding_dim</span> <span class="o">=</span> <span class="n">n_harmonic_functions</span> <span class="o">*</span> <span class="mi">2</span> <span class="o">*</span> <span class="mi">3</span>
<span class="c1"># self.mlp is a simple 2-layer multi-layer perceptron</span>
<span class="c1"># which converts the input per-point harmonic embeddings</span>
<span class="c1"># to a latent representation.</span>
<span class="c1"># Not that we use Softplus activations instead of ReLU.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">mlp</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Sequential</span><span class="p">(</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="n">embedding_dim</span><span class="p">,</span> <span class="n">n_hidden_neurons</span><span class="p">),</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Softplus</span><span class="p">(</span><span class="n">beta</span><span class="o">=</span><span class="mf">10.0</span><span class="p">),</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="n">n_hidden_neurons</span><span class="p">,</span> <span class="n">n_hidden_neurons</span><span class="p">),</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Softplus</span><span class="p">(</span><span class="n">beta</span><span class="o">=</span><span class="mf">10.0</span><span class="p">),</span>
<span class="p">)</span>
<span class="c1"># Given features predicted by self.mlp, self.color_layer</span>
<span class="c1"># is responsible for predicting a 3-D per-point vector</span>
<span class="c1"># that represents the RGB color of the point.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">color_layer</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Sequential</span><span class="p">(</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="n">n_hidden_neurons</span> <span class="o">+</span> <span class="n">embedding_dim</span><span class="p">,</span> <span class="n">n_hidden_neurons</span><span class="p">),</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Softplus</span><span class="p">(</span><span class="n">beta</span><span class="o">=</span><span class="mf">10.0</span><span class="p">),</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="n">n_hidden_neurons</span><span class="p">,</span> <span class="mi">3</span><span class="p">),</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Sigmoid</span><span class="p">(),</span>
<span class="c1"># To ensure that the colors correctly range between [0-1],</span>
<span class="c1"># the layer is terminated with a sigmoid layer.</span>
<span class="p">)</span>
<span class="c1"># The density layer converts the features of self.mlp</span>
<span class="c1"># to a 1D density value representing the raw opacity</span>
<span class="c1"># of each point.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">density_layer</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Sequential</span><span class="p">(</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="n">n_hidden_neurons</span><span class="p">,</span> <span class="mi">1</span><span class="p">),</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Softplus</span><span class="p">(</span><span class="n">beta</span><span class="o">=</span><span class="mf">10.0</span><span class="p">),</span>
<span class="c1"># Sofplus activation ensures that the raw opacity</span>
<span class="c1"># is a non-negative number.</span>
<span class="p">)</span>
<span class="c1"># We set the bias of the density layer to -1.5</span>
<span class="c1"># in order to initialize the opacities of the</span>
<span class="c1"># ray points to values close to 0. </span>
<span class="c1"># This is a crucial detail for ensuring convergence</span>
<span class="c1"># of the model.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">density_layer</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">bias</span><span class="o">.</span><span class="n">data</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">=</span> <span class="o">-</span><span class="mf">1.5</span>
<span class="k">def</span> <span class="nf">_get_densities</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">features</span><span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> This function takes `features` predicted by `self.mlp`</span>
<span class="sd"> and converts them to `raw_densities` with `self.density_layer`.</span>
<span class="sd"> `raw_densities` are later mapped to [0-1] range with</span>
<span class="sd"> 1 - inverse exponential of `raw_densities`.</span>
<span class="sd"> """</span>
<span class="n">raw_densities</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">density_layer</span><span class="p">(</span><span class="n">features</span><span class="p">)</span>
<span class="k">return</span> <span class="mi">1</span> <span class="o">-</span> <span class="p">(</span><span class="o">-</span><span class="n">raw_densities</span><span class="p">)</span><span class="o">.</span><span class="n">exp</span><span class="p">()</span>
<span class="k">def</span> <span class="nf">_get_colors</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">features</span><span class="p">,</span> <span class="n">rays_directions</span><span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> This function takes per-point `features` predicted by `self.mlp`</span>
<span class="sd"> and evaluates the color model in order to attach to each</span>
<span class="sd"> point a 3D vector of its RGB color.</span>
<span class="sd"> </span>
<span class="sd"> In order to represent viewpoint dependent effects,</span>
<span class="sd"> before evaluating `self.color_layer`, `NeuralRadianceField`</span>
<span class="sd"> concatenates to the `features` a harmonic embedding</span>
<span class="sd"> of `ray_directions`, which are per-point directions </span>
<span class="sd"> of point rays expressed as 3D l2-normalized vectors</span>
<span class="sd"> in world coordinates.</span>
<span class="sd"> """</span>
<span class="n">spatial_size</span> <span class="o">=</span> <span class="n">features</span><span class="o">.</span><span class="n">shape</span><span class="p">[:</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>
<span class="c1"># Normalize the ray_directions to unit l2 norm.</span>
<span class="n">rays_directions_normed</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">functional</span><span class="o">.</span><span class="n">normalize</span><span class="p">(</span>
<span class="n">rays_directions</span><span class="p">,</span> <span class="n">dim</span><span class="o">=-</span><span class="mi">1</span>
<span class="p">)</span>
<span class="c1"># Obtain the harmonic embedding of the normalized ray directions.</span>
<span class="n">rays_embedding</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">harmonic_embedding</span><span class="p">(</span>
<span class="n">rays_directions_normed</span>
<span class="p">)</span>
<span class="c1"># Expand the ray directions tensor so that its spatial size</span>
<span class="c1"># is equal to the size of features.</span>
<span class="n">rays_embedding_expand</span> <span class="o">=</span> <span class="n">rays_embedding</span><span class="p">[</span><span class="o">...</span><span class="p">,</span> <span class="kc">None</span><span class="p">,</span> <span class="p">:]</span><span class="o">.</span><span class="n">expand</span><span class="p">(</span>
<span class="o">*</span><span class="n">spatial_size</span><span class="p">,</span> <span class="n">rays_embedding</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>
<span class="p">)</span>
<span class="c1"># Concatenate ray direction embeddings with </span>
<span class="c1"># features and evaluate the color model.</span>
<span class="n">color_layer_input</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">cat</span><span class="p">(</span>
<span class="p">(</span><span class="n">features</span><span class="p">,</span> <span class="n">rays_embedding_expand</span><span class="p">),</span>
<span class="n">dim</span><span class="o">=-</span><span class="mi">1</span>
<span class="p">)</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">color_layer</span><span class="p">(</span><span class="n">color_layer_input</span><span class="p">)</span>
<span class="k">def</span> <span class="nf">forward</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span>
<span class="n">ray_bundle</span><span class="p">:</span> <span class="n">RayBundle</span><span class="p">,</span>
<span class="o">**</span><span class="n">kwargs</span><span class="p">,</span>
<span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> The forward function accepts the parametrizations of</span>
<span class="sd"> 3D points sampled along projection rays. The forward</span>
<span class="sd"> pass is responsible for attaching a 3D vector</span>
<span class="sd"> and a 1D scalar representing the point's </span>
<span class="sd"> RGB color and opacity respectively.</span>
<span class="sd"> </span>
<span class="sd"> Args:</span>
<span class="sd"> ray_bundle: A RayBundle object containing the following variables:</span>
<span class="sd"> origins: A tensor of shape `(minibatch, ..., 3)` denoting the</span>
<span class="sd"> origins of the sampling rays in world coords.</span>
<span class="sd"> directions: A tensor of shape `(minibatch, ..., 3)`</span>
<span class="sd"> containing the direction vectors of sampling rays in world coords.</span>
<span class="sd"> lengths: A tensor of shape `(minibatch, ..., num_points_per_ray)`</span>
<span class="sd"> containing the lengths at which the rays are sampled.</span>
<span class="sd"> Returns:</span>
<span class="sd"> rays_densities: A tensor of shape `(minibatch, ..., num_points_per_ray, 1)`</span>
<span class="sd"> denoting the opacity of each ray point.</span>
<span class="sd"> rays_colors: A tensor of shape `(minibatch, ..., num_points_per_ray, 3)`</span>
<span class="sd"> denoting the color of each ray point.</span>
<span class="sd"> """</span>
<span class="c1"># We first convert the ray parametrizations to world</span>
<span class="c1"># coordinates with `ray_bundle_to_ray_points`.</span>
<span class="n">rays_points_world</span> <span class="o">=</span> <span class="n">ray_bundle_to_ray_points</span><span class="p">(</span><span class="n">ray_bundle</span><span class="p">)</span>
<span class="c1"># rays_points_world.shape = [minibatch x ... x 3]</span>
<span class="c1"># For each 3D world coordinate, we obtain its harmonic embedding.</span>
<span class="n">embeds</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">harmonic_embedding</span><span class="p">(</span>
<span class="n">rays_points_world</span>
<span class="p">)</span>
<span class="c1"># embeds.shape = [minibatch x ... x self.n_harmonic_functions*6]</span>
<span class="c1"># self.mlp maps each harmonic embedding to a latent feature space.</span>
<span class="n">features</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">mlp</span><span class="p">(</span><span class="n">embeds</span><span class="p">)</span>
<span class="c1"># features.shape = [minibatch x ... x n_hidden_neurons]</span>
<span class="c1"># Finally, given the per-point features, </span>
<span class="c1"># execute the density and color branches.</span>
<span class="n">rays_densities</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_densities</span><span class="p">(</span><span class="n">features</span><span class="p">)</span>
<span class="c1"># rays_densities.shape = [minibatch x ... x 1]</span>
<span class="n">rays_colors</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_colors</span><span class="p">(</span><span class="n">features</span><span class="p">,</span> <span class="n">ray_bundle</span><span class="o">.</span><span class="n">directions</span><span class="p">)</span>
<span class="c1"># rays_colors.shape = [minibatch x ... x 3]</span>
<span class="k">return</span> <span class="n">rays_densities</span><span class="p">,</span> <span class="n">rays_colors</span>
<span class="k">def</span> <span class="nf">batched_forward</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span>
<span class="n">ray_bundle</span><span class="p">:</span> <span class="n">RayBundle</span><span class="p">,</span>
<span class="n">n_batches</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="mi">16</span><span class="p">,</span>
<span class="o">**</span><span class="n">kwargs</span><span class="p">,</span>
<span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> This function is used to allow for memory efficient processing</span>
<span class="sd"> of input rays. The input rays are first split to `n_batches`</span>
<span class="sd"> chunks and passed through the `self.forward` function one at a time</span>
<span class="sd"> in a for loop. Combined with disabling PyTorch gradient caching</span>
<span class="sd"> (`torch.no_grad()`), this allows for rendering large batches</span>
<span class="sd"> of rays that do not all fit into GPU memory in a single forward pass.</span>
<span class="sd"> In our case, batched_forward is used to export a fully-sized render</span>
<span class="sd"> of the radiance field for visualization purposes.</span>
<span class="sd"> </span>
<span class="sd"> Args:</span>
<span class="sd"> ray_bundle: A RayBundle object containing the following variables:</span>
<span class="sd"> origins: A tensor of shape `(minibatch, ..., 3)` denoting the</span>
<span class="sd"> origins of the sampling rays in world coords.</span>
<span class="sd"> directions: A tensor of shape `(minibatch, ..., 3)`</span>
<span class="sd"> containing the direction vectors of sampling rays in world coords.</span>
<span class="sd"> lengths: A tensor of shape `(minibatch, ..., num_points_per_ray)`</span>
<span class="sd"> containing the lengths at which the rays are sampled.</span>
<span class="sd"> n_batches: Specifies the number of batches the input rays are split into.</span>
<span class="sd"> The larger the number of batches, the smaller the memory footprint</span>
<span class="sd"> and the lower the processing speed.</span>
<span class="sd"> Returns:</span>
<span class="sd"> rays_densities: A tensor of shape `(minibatch, ..., num_points_per_ray, 1)`</span>
<span class="sd"> denoting the opacity of each ray point.</span>
<span class="sd"> rays_colors: A tensor of shape `(minibatch, ..., num_points_per_ray, 3)`</span>
<span class="sd"> denoting the color of each ray point.</span>
<span class="sd"> """</span>
<span class="c1"># Parse out shapes needed for tensor reshaping in this function.</span>
<span class="n">n_pts_per_ray</span> <span class="o">=</span> <span class="n">ray_bundle</span><span class="o">.</span><span class="n">lengths</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>
<span class="n">spatial_size</span> <span class="o">=</span> <span class="p">[</span><span class="o">*</span><span class="n">ray_bundle</span><span class="o">.</span><span class="n">origins</span><span class="o">.</span><span class="n">shape</span><span class="p">[:</span><span class="o">-</span><span class="mi">1</span><span class="p">],</span> <span class="n">n_pts_per_ray</span><span class="p">]</span>
<span class="c1"># Split the rays to `n_batches` batches.</span>
<span class="n">tot_samples</span> <span class="o">=</span> <span class="n">ray_bundle</span><span class="o">.</span><span class="n">origins</span><span class="o">.</span><span class="n">shape</span><span class="p">[:</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span><span class="o">.</span><span class="n">numel</span><span class="p">()</span>
<span class="n">batches</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">chunk</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">arange</span><span class="p">(</span><span class="n">tot_samples</span><span class="p">),</span> <span class="n">n_batches</span><span class="p">)</span>
<span class="c1"># For each batch, execute the standard forward pass.</span>
<span class="n">batch_outputs</span> <span class="o">=</span> <span class="p">[</span>
<span class="bp">self</span><span class="o">.</span><span class="n">forward</span><span class="p">(</span>
<span class="n">RayBundle</span><span class="p">(</span>
<span class="n">origins</span><span class="o">=</span><span class="n">ray_bundle</span><span class="o">.</span><span class="n">origins</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="mi">3</span><span class="p">)[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">directions</span><span class="o">=</span><span class="n">ray_bundle</span><span class="o">.</span><span class="n">directions</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="mi">3</span><span class="p">)[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">lengths</span><span class="o">=</span><span class="n">ray_bundle</span><span class="o">.</span><span class="n">lengths</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="n">n_pts_per_ray</span><span class="p">)[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">xys</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="p">)</span>
<span class="p">)</span> <span class="k">for</span> <span class="n">batch_idx</span> <span class="ow">in</span> <span class="n">batches</span>
<span class="p">]</span>
<span class="c1"># Concatenate the per-batch rays_densities and rays_colors</span>
<span class="c1"># and reshape according to the sizes of the inputs.</span>
<span class="n">rays_densities</span><span class="p">,</span> <span class="n">rays_colors</span> <span class="o">=</span> <span class="p">[</span>
<span class="n">torch</span><span class="o">.</span><span class="n">cat</span><span class="p">(</span>
<span class="p">[</span><span class="n">batch_output</span><span class="p">[</span><span class="n">output_i</span><span class="p">]</span> <span class="k">for</span> <span class="n">batch_output</span> <span class="ow">in</span> <span class="n">batch_outputs</span><span class="p">],</span> <span class="n">dim</span><span class="o">=</span><span class="mi">0</span>
<span class="p">)</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">*</span><span class="n">spatial_size</span><span class="p">,</span> <span class="o">-</span><span class="mi">1</span><span class="p">)</span> <span class="k">for</span> <span class="n">output_i</span> <span class="ow">in</span> <span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>
<span class="p">]</span>
<span class="k">return</span> <span class="n">rays_densities</span><span class="p">,</span> <span class="n">rays_colors</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="4.-Helper-functions">4. Helper functions<a class="anchor-link" href="#4.-Helper-functions"></a></h2><p>In this function we define functions that help with the Neural Radiance Field optimization.</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="k">def</span> <span class="nf">huber</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">,</span> <span class="n">scaling</span><span class="o">=</span><span class="mf">0.1</span><span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> A helper function for evaluating the smooth L1 (huber) loss</span>
<span class="sd"> between the rendered silhouettes and colors.</span>
<span class="sd"> """</span>
<span class="n">diff_sq</span> <span class="o">=</span> <span class="p">(</span><span class="n">x</span> <span class="o">-</span> <span class="n">y</span><span class="p">)</span> <span class="o">**</span> <span class="mi">2</span>
<span class="n">loss</span> <span class="o">=</span> <span class="p">((</span><span class="mi">1</span> <span class="o">+</span> <span class="n">diff_sq</span> <span class="o">/</span> <span class="p">(</span><span class="n">scaling</span><span class="o">**</span><span class="mi">2</span><span class="p">))</span><span class="o">.</span><span class="n">clamp</span><span class="p">(</span><span class="mf">1e-4</span><span class="p">)</span><span class="o">.</span><span class="n">sqrt</span><span class="p">()</span> <span class="o">-</span> <span class="mi">1</span><span class="p">)</span> <span class="o">*</span> <span class="nb">float</span><span class="p">(</span><span class="n">scaling</span><span class="p">)</span>
<span class="k">return</span> <span class="n">loss</span>
<span class="k">def</span> <span class="nf">sample_images_at_mc_locs</span><span class="p">(</span><span class="n">target_images</span><span class="p">,</span> <span class="n">sampled_rays_xy</span><span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> Given a set of Monte Carlo pixel locations `sampled_rays_xy`,</span>
<span class="sd"> this method samples the tensor `target_images` at the</span>
<span class="sd"> respective 2D locations.</span>
<span class="sd"> </span>
<span class="sd"> This function is used in order to extract the colors from</span>
<span class="sd"> ground truth images that correspond to the colors</span>
<span class="sd"> rendered using `MonteCarloRaysampler`.</span>
<span class="sd"> """</span>
<span class="n">ba</span> <span class="o">=</span> <span class="n">target_images</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
<span class="n">dim</span> <span class="o">=</span> <span class="n">target_images</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>
<span class="n">spatial_size</span> <span class="o">=</span> <span class="n">sampled_rays_xy</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">1</span><span class="p">:</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>
<span class="c1"># In order to sample target_images, we utilize</span>
<span class="c1"># the grid_sample function which implements a</span>
<span class="c1"># bilinear image sampler.</span>
<span class="c1"># Note that we have to invert the sign of the </span>
<span class="c1"># sampled ray positions to convert the NDC xy locations</span>
<span class="c1"># of the MonteCarloRaysampler to the coordinate</span>
<span class="c1"># convention of grid_sample.</span>
<span class="n">images_sampled</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">functional</span><span class="o">.</span><span class="n">grid_sample</span><span class="p">(</span>
<span class="n">target_images</span><span class="o">.</span><span class="n">permute</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">),</span>
<span class="o">-</span><span class="n">sampled_rays_xy</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="n">ba</span><span class="p">,</span> <span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">),</span> <span class="c1"># note the sign inversion</span>
<span class="n">align_corners</span><span class="o">=</span><span class="kc">True</span>
<span class="p">)</span>
<span class="k">return</span> <span class="n">images_sampled</span><span class="o">.</span><span class="n">permute</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span><span class="o">.</span><span class="n">view</span><span class="p">(</span>
<span class="n">ba</span><span class="p">,</span> <span class="o">*</span><span class="n">spatial_size</span><span class="p">,</span> <span class="n">dim</span>
<span class="p">)</span>
<span class="k">def</span> <span class="nf">show_full_render</span><span class="p">(</span>
<span class="n">neural_radiance_field</span><span class="p">,</span> <span class="n">camera</span><span class="p">,</span>
<span class="n">target_image</span><span class="p">,</span> <span class="n">target_silhouette</span><span class="p">,</span>
<span class="n">loss_history_color</span><span class="p">,</span> <span class="n">loss_history_sil</span><span class="p">,</span>
<span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> This is a helper function for visualizing the</span>
<span class="sd"> intermediate results of the learning. </span>
<span class="sd"> </span>
<span class="sd"> Since the `NeuralRadianceField` suffers from</span>
<span class="sd"> a large memory footprint, which does not let us</span>
<span class="sd"> render the full image grid in a single forward pass,</span>
<span class="sd"> we utilize the `NeuralRadianceField.batched_forward`</span>
<span class="sd"> function in combination with disabling the gradient caching.</span>
<span class="sd"> This chunks the set of emitted rays to batches and </span>
<span class="sd"> evaluates the implicit function on one batch at a time</span>
<span class="sd"> to prevent GPU memory overflow.</span>
<span class="sd"> """</span>
<span class="c1"># Prevent gradient caching.</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">no_grad</span><span class="p">():</span>
<span class="c1"># Render using the grid renderer and the</span>
<span class="c1"># batched_forward function of neural_radiance_field.</span>
<span class="n">rendered_image_silhouette</span><span class="p">,</span> <span class="n">_</span> <span class="o">=</span> <span class="n">renderer_grid</span><span class="p">(</span>
<span class="n">cameras</span><span class="o">=</span><span class="n">camera</span><span class="p">,</span>
<span class="n">volumetric_function</span><span class="o">=</span><span class="n">neural_radiance_field</span><span class="o">.</span><span class="n">batched_forward</span>
<span class="p">)</span>
<span class="c1"># Split the rendering result to a silhouette render</span>
<span class="c1"># and the image render.</span>
<span class="n">rendered_image</span><span class="p">,</span> <span class="n">rendered_silhouette</span> <span class="o">=</span> <span class="p">(</span>
<span class="n">rendered_image_silhouette</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">split</span><span class="p">([</span><span class="mi">3</span><span class="p">,</span> <span class="mi">1</span><span class="p">],</span> <span class="n">dim</span><span class="o">=-</span><span class="mi">1</span><span class="p">)</span>
<span class="p">)</span>
<span class="c1"># Generate plots.</span>
<span class="n">fig</span><span class="p">,</span> <span class="n">ax</span> <span class="o">=</span> <span class="n">plt</span><span class="o">.</span><span class="n">subplots</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">15</span><span class="p">,</span> <span class="mi">10</span><span class="p">))</span>
<span class="n">ax</span> <span class="o">=</span> <span class="n">ax</span><span class="o">.</span><span class="n">ravel</span><span class="p">()</span>
<span class="n">clamp_and_detach</span> <span class="o">=</span> <span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="n">x</span><span class="o">.</span><span class="n">clamp</span><span class="p">(</span><span class="mf">0.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">)</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">detach</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">()</span>
<span class="n">ax</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">plot</span><span class="p">(</span><span class="nb">list</span><span class="p">(</span><span class="nb">range</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">loss_history_color</span><span class="p">))),</span> <span class="n">loss_history_color</span><span class="p">,</span> <span class="n">linewidth</span><span class="o">=</span><span class="mi">1</span><span class="p">)</span>
<span class="n">ax</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">clamp_and_detach</span><span class="p">(</span><span class="n">rendered_image</span><span class="p">))</span>
<span class="n">ax</span><span class="p">[</span><span class="mi">2</span><span class="p">]</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">clamp_and_detach</span><span class="p">(</span><span class="n">rendered_silhouette</span><span class="p">[</span><span class="o">...</span><span class="p">,</span> <span class="mi">0</span><span class="p">]))</span>
<span class="n">ax</span><span class="p">[</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">plot</span><span class="p">(</span><span class="nb">list</span><span class="p">(</span><span class="nb">range</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">loss_history_sil</span><span class="p">))),</span> <span class="n">loss_history_sil</span><span class="p">,</span> <span class="n">linewidth</span><span class="o">=</span><span class="mi">1</span><span class="p">)</span>
<span class="n">ax</span><span class="p">[</span><span class="mi">4</span><span class="p">]</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">clamp_and_detach</span><span class="p">(</span><span class="n">target_image</span><span class="p">))</span>
<span class="n">ax</span><span class="p">[</span><span class="mi">5</span><span class="p">]</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">clamp_and_detach</span><span class="p">(</span><span class="n">target_silhouette</span><span class="p">))</span>
<span class="k">for</span> <span class="n">ax_</span><span class="p">,</span> <span class="n">title_</span> <span class="ow">in</span> <span class="nb">zip</span><span class="p">(</span>
<span class="n">ax</span><span class="p">,</span>
<span class="p">(</span>
<span class="s2">"loss color"</span><span class="p">,</span> <span class="s2">"rendered image"</span><span class="p">,</span> <span class="s2">"rendered silhouette"</span><span class="p">,</span>
<span class="s2">"loss silhouette"</span><span class="p">,</span> <span class="s2">"target image"</span><span class="p">,</span> <span class="s2">"target silhouette"</span><span class="p">,</span>
<span class="p">)</span>
<span class="p">):</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">title_</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s1">'loss'</span><span class="p">):</span>
<span class="n">ax_</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">)</span>
<span class="n">ax_</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">)</span>
<span class="n">ax_</span><span class="o">.</span><span class="n">set_title</span><span class="p">(</span><span class="n">title_</span><span class="p">)</span>
<span class="n">fig</span><span class="o">.</span><span class="n">canvas</span><span class="o">.</span><span class="n">draw</span><span class="p">();</span> <span class="n">fig</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
<span class="n">display</span><span class="o">.</span><span class="n">clear_output</span><span class="p">(</span><span class="n">wait</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">display</span><span class="o">.</span><span class="n">display</span><span class="p">(</span><span class="n">fig</span><span class="p">)</span>
<span class="k">return</span> <span class="n">fig</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="5.-Fit-the-radiance-field">5. Fit the radiance field<a class="anchor-link" href="#5.-Fit-the-radiance-field"></a></h2><p>Here we carry out the radiance field fitting with differentiable rendering.</p>
<p>In order to fit the radiance field, we render it from the viewpoints of the <code>target_cameras</code>
and compare the resulting renders with the observed <code>target_images</code> and <code>target_silhouettes</code>.</p>
<p>The comparison is done by evaluating the mean huber (smooth-l1) error between corresponding
pairs of <code>target_images</code>/<code>rendered_images</code> and <code>target_silhouettes</code>/<code>rendered_silhouettes</code>.</p>
<p>Since we use the <code>MonteCarloRaysampler</code>, the outputs of the training renderer <code>renderer_mc</code>
are colors of pixels that are randomly sampled from the image plane, not a lattice of pixels forming
a valid image. Thus, in order to compare the rendered colors with the ground truth, we
utilize the random MonteCarlo pixel locations to sample the ground truth images/silhouettes
<code>target_silhouettes</code>/<code>rendered_silhouettes</code> at the xy locations corresponding to the render
locations. This is done with the helper function <code>sample_images_at_mc_locs</code>, which is
described in the previous cell.</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># First move all relevant variables to the correct device.</span>
<span class="n">renderer_grid</span> <span class="o">=</span> <span class="n">renderer_grid</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="n">renderer_mc</span> <span class="o">=</span> <span class="n">renderer_mc</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="n">target_cameras</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="n">target_images</span> <span class="o">=</span> <span class="n">target_images</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="n">target_silhouettes</span> <span class="o">=</span> <span class="n">target_silhouettes</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="c1"># Set the seed for reproducibility</span>
<span class="n">torch</span><span class="o">.</span><span class="n">manual_seed</span><span class="p">(</span><span class="mi">1</span><span class="p">)</span>
<span class="c1"># Instantiate the radiance field model.</span>
<span class="n">neural_radiance_field</span> <span class="o">=</span> <span class="n">NeuralRadianceField</span><span class="p">()</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="c1"># Instantiate the Adam optimizer. We set its master learning rate to 1e-3.</span>
<span class="n">lr</span> <span class="o">=</span> <span class="mf">1e-3</span>
<span class="n">optimizer</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">optim</span><span class="o">.</span><span class="n">Adam</span><span class="p">(</span><span class="n">neural_radiance_field</span><span class="o">.</span><span class="n">parameters</span><span class="p">(),</span> <span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">)</span>
<span class="c1"># We sample 6 random cameras in a minibatch. Each camera</span>
<span class="c1"># emits raysampler_mc.n_pts_per_image rays.</span>
<span class="n">batch_size</span> <span class="o">=</span> <span class="mi">6</span>
<span class="c1"># 3000 iterations take ~20 min on a Tesla M40 and lead to</span>
<span class="c1"># reasonably sharp results. However, for the best possible</span>
<span class="c1"># results, we recommend setting n_iter=20000.</span>
<span class="n">n_iter</span> <span class="o">=</span> <span class="mi">3000</span>
<span class="c1"># Init the loss history buffers.</span>
<span class="n">loss_history_color</span><span class="p">,</span> <span class="n">loss_history_sil</span> <span class="o">=</span> <span class="p">[],</span> <span class="p">[]</span>
<span class="c1"># The main optimization loop.</span>
<span class="k">for</span> <span class="n">iteration</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">n_iter</span><span class="p">):</span>
<span class="c1"># In case we reached the last 75% of iterations,</span>
<span class="c1"># decrease the learning rate of the optimizer 10-fold.</span>
<span class="k">if</span> <span class="n">iteration</span> <span class="o">==</span> <span class="nb">round</span><span class="p">(</span><span class="n">n_iter</span> <span class="o">*</span> <span class="mf">0.75</span><span class="p">):</span>
<span class="nb">print</span><span class="p">(</span><span class="s1">'Decreasing LR 10-fold ...'</span><span class="p">)</span>
<span class="n">optimizer</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">optim</span><span class="o">.</span><span class="n">Adam</span><span class="p">(</span>
<span class="n">neural_radiance_field</span><span class="o">.</span><span class="n">parameters</span><span class="p">(),</span> <span class="n">lr</span><span class="o">=</span><span class="n">lr</span> <span class="o">*</span> <span class="mf">0.1</span>
<span class="p">)</span>
<span class="c1"># Zero the optimizer gradient.</span>
<span class="n">optimizer</span><span class="o">.</span><span class="n">zero_grad</span><span class="p">()</span>
<span class="c1"># Sample random batch indices.</span>
<span class="n">batch_idx</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randperm</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">target_cameras</span><span class="p">))[:</span><span class="n">batch_size</span><span class="p">]</span>
<span class="c1"># Sample the minibatch of cameras.</span>
<span class="n">batch_cameras</span> <span class="o">=</span> <span class="n">FoVPerspectiveCameras</span><span class="p">(</span>
<span class="n">R</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">R</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">T</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">T</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">znear</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">znear</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">zfar</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">zfar</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">aspect_ratio</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">aspect_ratio</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">fov</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">fov</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">device</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># Evaluate the nerf model.</span>
<span class="n">rendered_images_silhouettes</span><span class="p">,</span> <span class="n">sampled_rays</span> <span class="o">=</span> <span class="n">renderer_mc</span><span class="p">(</span>
<span class="n">cameras</span><span class="o">=</span><span class="n">batch_cameras</span><span class="p">,</span>
<span class="n">volumetric_function</span><span class="o">=</span><span class="n">neural_radiance_field</span>
<span class="p">)</span>
<span class="n">rendered_images</span><span class="p">,</span> <span class="n">rendered_silhouettes</span> <span class="o">=</span> <span class="p">(</span>
<span class="n">rendered_images_silhouettes</span><span class="o">.</span><span class="n">split</span><span class="p">([</span><span class="mi">3</span><span class="p">,</span> <span class="mi">1</span><span class="p">],</span> <span class="n">dim</span><span class="o">=-</span><span class="mi">1</span><span class="p">)</span>
<span class="p">)</span>
<span class="c1"># Compute the silhouette error as the mean huber</span>
<span class="c1"># loss between the predicted masks and the</span>
<span class="c1"># sampled target silhouettes.</span>
<span class="n">silhouettes_at_rays</span> <span class="o">=</span> <span class="n">sample_images_at_mc_locs</span><span class="p">(</span>
<span class="n">target_silhouettes</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">,</span> <span class="o">...</span><span class="p">,</span> <span class="kc">None</span><span class="p">],</span>
<span class="n">sampled_rays</span><span class="o">.</span><span class="n">xys</span>
<span class="p">)</span>
<span class="n">sil_err</span> <span class="o">=</span> <span class="n">huber</span><span class="p">(</span>
<span class="n">rendered_silhouettes</span><span class="p">,</span>
<span class="n">silhouettes_at_rays</span><span class="p">,</span>
<span class="p">)</span><span class="o">.</span><span class="n">abs</span><span class="p">()</span><span class="o">.</span><span class="n">mean</span><span class="p">()</span>
<span class="c1"># Compute the color error as the mean huber</span>
<span class="c1"># loss between the rendered colors and the</span>
<span class="c1"># sampled target images.</span>
<span class="n">colors_at_rays</span> <span class="o">=</span> <span class="n">sample_images_at_mc_locs</span><span class="p">(</span>
<span class="n">target_images</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">sampled_rays</span><span class="o">.</span><span class="n">xys</span>
<span class="p">)</span>
<span class="n">color_err</span> <span class="o">=</span> <span class="n">huber</span><span class="p">(</span>
<span class="n">rendered_images</span><span class="p">,</span>
<span class="n">colors_at_rays</span><span class="p">,</span>
<span class="p">)</span><span class="o">.</span><span class="n">abs</span><span class="p">()</span><span class="o">.</span><span class="n">mean</span><span class="p">()</span>
<span class="c1"># The optimization loss is a simple</span>
<span class="c1"># sum of the color and silhouette errors.</span>
<span class="n">loss</span> <span class="o">=</span> <span class="n">color_err</span> <span class="o">+</span> <span class="n">sil_err</span>
<span class="c1"># Log the loss history.</span>
<span class="n">loss_history_color</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="nb">float</span><span class="p">(</span><span class="n">color_err</span><span class="p">))</span>
<span class="n">loss_history_sil</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="nb">float</span><span class="p">(</span><span class="n">sil_err</span><span class="p">))</span>
<span class="c1"># Every 10 iterations, print the current values of the losses.</span>
<span class="k">if</span> <span class="n">iteration</span> <span class="o">%</span> <span class="mi">10</span> <span class="o">==</span> <span class="mi">0</span><span class="p">:</span>
<span class="nb">print</span><span class="p">(</span>
<span class="sa">f</span><span class="s1">'Iteration </span><span class="si">{</span><span class="n">iteration</span><span class="si">:</span><span class="s1">05d</span><span class="si">}</span><span class="s1">:'</span>
<span class="o">+</span> <span class="sa">f</span><span class="s1">' loss color = </span><span class="si">{</span><span class="nb">float</span><span class="p">(</span><span class="n">color_err</span><span class="p">)</span><span class="si">:</span><span class="s1">1.2e</span><span class="si">}</span><span class="s1">'</span>
<span class="o">+</span> <span class="sa">f</span><span class="s1">' loss silhouette = </span><span class="si">{</span><span class="nb">float</span><span class="p">(</span><span class="n">sil_err</span><span class="p">)</span><span class="si">:</span><span class="s1">1.2e</span><span class="si">}</span><span class="s1">'</span>
<span class="p">)</span>
<span class="c1"># Take the optimization step.</span>
<span class="n">loss</span><span class="o">.</span><span class="n">backward</span><span class="p">()</span>
<span class="n">optimizer</span><span class="o">.</span><span class="n">step</span><span class="p">()</span>
<span class="c1"># Visualize the full renders every 100 iterations.</span>
<span class="k">if</span> <span class="n">iteration</span> <span class="o">%</span> <span class="mi">100</span> <span class="o">==</span> <span class="mi">0</span><span class="p">:</span>
<span class="n">show_idx</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randperm</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">target_cameras</span><span class="p">))[:</span><span class="mi">1</span><span class="p">]</span>
<span class="n">show_full_render</span><span class="p">(</span>
<span class="n">neural_radiance_field</span><span class="p">,</span>
<span class="n">FoVPerspectiveCameras</span><span class="p">(</span>
<span class="n">R</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">R</span><span class="p">[</span><span class="n">show_idx</span><span class="p">],</span>
<span class="n">T</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">T</span><span class="p">[</span><span class="n">show_idx</span><span class="p">],</span>
<span class="n">znear</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">znear</span><span class="p">[</span><span class="n">show_idx</span><span class="p">],</span>
<span class="n">zfar</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">zfar</span><span class="p">[</span><span class="n">show_idx</span><span class="p">],</span>
<span class="n">aspect_ratio</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">aspect_ratio</span><span class="p">[</span><span class="n">show_idx</span><span class="p">],</span>
<span class="n">fov</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">fov</span><span class="p">[</span><span class="n">show_idx</span><span class="p">],</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">device</span><span class="p">,</span>
<span class="p">),</span>
<span class="n">target_images</span><span class="p">[</span><span class="n">show_idx</span><span class="p">][</span><span class="mi">0</span><span class="p">],</span>
<span class="n">target_silhouettes</span><span class="p">[</span><span class="n">show_idx</span><span class="p">][</span><span class="mi">0</span><span class="p">],</span>
<span class="n">loss_history_color</span><span class="p">,</span>
<span class="n">loss_history_sil</span><span class="p">,</span>
<span class="p">)</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="5.-Visualizing-the-optimized-neural-radiance-field">5. Visualizing the optimized neural radiance field<a class="anchor-link" href="#5.-Visualizing-the-optimized-neural-radiance-field"></a></h2><p>Finally, we visualize the neural radiance field by rendering from multiple viewpoints that rotate around the volume's y-axis.</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="k">def</span> <span class="nf">generate_rotating_nerf</span><span class="p">(</span><span class="n">neural_radiance_field</span><span class="p">,</span> <span class="n">n_frames</span> <span class="o">=</span> <span class="mi">50</span><span class="p">):</span>
<span class="n">logRs</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">zeros</span><span class="p">(</span><span class="n">n_frames</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
<span class="n">logRs</span><span class="p">[:,</span> <span class="mi">1</span><span class="p">]</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">linspace</span><span class="p">(</span><span class="o">-</span><span class="mf">3.14</span><span class="p">,</span> <span class="mf">3.14</span><span class="p">,</span> <span class="n">n_frames</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
<span class="n">Rs</span> <span class="o">=</span> <span class="n">so3_exp_map</span><span class="p">(</span><span class="n">logRs</span><span class="p">)</span>
<span class="n">Ts</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">zeros</span><span class="p">(</span><span class="n">n_frames</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
<span class="n">Ts</span><span class="p">[:,</span> <span class="mi">2</span><span class="p">]</span> <span class="o">=</span> <span class="mf">2.7</span>
<span class="n">frames</span> <span class="o">=</span> <span class="p">[]</span>
<span class="nb">print</span><span class="p">(</span><span class="s1">'Rendering rotating NeRF ...'</span><span class="p">)</span>
<span class="k">for</span> <span class="n">R</span><span class="p">,</span> <span class="n">T</span> <span class="ow">in</span> <span class="nb">zip</span><span class="p">(</span><span class="n">tqdm</span><span class="p">(</span><span class="n">Rs</span><span class="p">),</span> <span class="n">Ts</span><span class="p">):</span>
<span class="n">camera</span> <span class="o">=</span> <span class="n">FoVPerspectiveCameras</span><span class="p">(</span>
<span class="n">R</span><span class="o">=</span><span class="n">R</span><span class="p">[</span><span class="kc">None</span><span class="p">],</span>
<span class="n">T</span><span class="o">=</span><span class="n">T</span><span class="p">[</span><span class="kc">None</span><span class="p">],</span>
<span class="n">znear</span><span class="o">=</span><span class="n">target_cameras</span><span class="o">.</span><span class="n">znear</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
<span class="n">zfar</span><span class="o">=</span><span class="n">target_cameras</span><span class="o">.</span><span class="n">zfar</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
<span class="n">aspect_ratio</span><span class="o">=</span><span class="n">target_cameras</span><span class="o">.</span><span class="n">aspect_ratio</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
<span class="n">fov</span><span class="o">=</span><span class="n">target_cameras</span><span class="o">.</span><span class="n">fov</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
<span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># Note that we again render with `NDCGridRaySampler`</span>
<span class="c1"># and the batched_forward function of neural_radiance_field.</span>
<span class="n">frames</span><span class="o">.</span><span class="n">append</span><span class="p">(</span>
<span class="n">renderer_grid</span><span class="p">(</span>
<span class="n">cameras</span><span class="o">=</span><span class="n">camera</span><span class="p">,</span>
<span class="n">volumetric_function</span><span class="o">=</span><span class="n">neural_radiance_field</span><span class="o">.</span><span class="n">batched_forward</span><span class="p">,</span>
<span class="p">)[</span><span class="mi">0</span><span class="p">][</span><span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]</span>
<span class="p">)</span>
<span class="k">return</span> <span class="n">torch</span><span class="o">.</span><span class="n">cat</span><span class="p">(</span><span class="n">frames</span><span class="p">)</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">no_grad</span><span class="p">():</span>
<span class="n">rotating_nerf_frames</span> <span class="o">=</span> <span class="n">generate_rotating_nerf</span><span class="p">(</span><span class="n">neural_radiance_field</span><span class="p">,</span> <span class="n">n_frames</span><span class="o">=</span><span class="mi">3</span><span class="o">*</span><span class="mi">5</span><span class="p">)</span>
<span class="n">image_grid</span><span class="p">(</span><span class="n">rotating_nerf_frames</span><span class="o">.</span><span class="n">clamp</span><span class="p">(</span><span class="mf">0.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">)</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">(),</span> <span class="n">rows</span><span class="o">=</span><span class="mi">3</span><span class="p">,</span> <span class="n">cols</span><span class="o">=</span><span class="mi">5</span><span class="p">,</span> <span class="n">rgb</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">fill</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">plt</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="6.-Conclusion">6. Conclusion<a class="anchor-link" href="#6.-Conclusion"></a></h2><p>In this tutorial, we have shown how to optimize an implicit representation of a scene such that the renders of the scene from known viewpoints match the observed images for each viewpoint. The rendering was carried out using the PyTorch3D's implicit function renderer composed of either a <code>MonteCarloRaysampler</code> or <code>NDCGridRaysampler</code>, and an <code>EmissionAbsorptionRaymarcher</code>.</p>
</div>
</div>
</div>
</div></div></div></div></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2021 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_volume">Fit a volume via raymarching</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_simple_neural_radiance_field">Fit a simplified NeRF via raymarching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_volume">Fit a volume via raymarching</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_simple_neural_radiance_field">Fit a simplified NeRF via raymarching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -0,0 +1,548 @@
<!DOCTYPE html><html lang=""><head><meta charSet="utf-8"/><meta http-equiv="X-UA-Compatible" content="IE=edge"/><title>PyTorch3D · A library for deep learning with 3D data</title><meta name="viewport" content="width=device-width, initial-scale=1.0"/><meta name="generator" content="Docusaurus"/><meta name="description" content="A library for deep learning with 3D data"/><meta property="og:title" content="PyTorch3D · A library for deep learning with 3D data"/><meta property="og:type" content="website"/><meta property="og:url" content="https://pytorch3d.org/"/><meta property="og:description" content="A library for deep learning with 3D data"/><meta property="og:image" content="https://pytorch3d.org/img/pytorch3dlogoicon.svg"/><meta name="twitter:card" content="summary"/><meta name="twitter:image" content="https://pytorch3d.org/img/pytorch3dlogoicon.svg"/><link rel="shortcut icon" href="/img/pytorch3dfavicon.png"/><link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/default.min.css"/><script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/fit_textured_volume">Fit a volume via raymarching</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_simple_neural_radiance_field">Fit a simplified NeRF via raymarching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {
var links = coll[i].nextElementSibling.getElementsByTagName('*');
if (checkActiveCategory){
for (var j = 0; j < links.length; j++) {
if (links[j].classList.contains('navListItemActive')){
coll[i].nextElementSibling.classList.toggle('hide');
coll[i].childNodes[1].classList.toggle('rotate');
checkActiveCategory = false;
break;
}
}
}
coll[i].addEventListener('click', function() {
var arrow = this.childNodes[1];
arrow.classList.toggle('rotate');
var content = this.nextElementSibling;
content.classList.toggle('hide');
});
}
document.addEventListener('DOMContentLoaded', function() {
createToggler('#navToggler', '#docsNav', 'docsSliderActive');
createToggler('#tocToggler', 'body', 'tocActive');
var headings = document.querySelector('.toc-headings');
headings && headings.addEventListener('click', function(event) {
var el = event.target;
while(el !== headings){
if (el.tagName === 'A') {
document.body.classList.remove('tocActive');
break;
} else{
el = el.parentNode;
}
}
}, false);
function createToggler(togglerSelector, targetSelector, className) {
var toggler = document.querySelector(togglerSelector);
var target = document.querySelector(targetSelector);
if (!toggler) {
return;
}
toggler.onclick = function(event) {
event.preventDefault();
target.classList.toggle(className);
};
}
});
</script></nav></div><div class="container mainContainer"><div class="wrapper"><div class="tutorialButtonsWrapper"><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="https://colab.research.google.com/github/facebookresearch/pytorch3d/blob/stable/docs/tutorials/fit_textured_volume.ipynb" target="_blank"><img class="colabButton" align="left" src="/img/colab_icon.png"/>Run in Google Colab</a></div><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="/files/fit_textured_volume.ipynb" target="_blank"><svg aria-hidden="true" focusable="false" data-prefix="fas" data-icon="file-download" class="svg-inline--fa fa-file-download fa-w-12" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><path fill="currentColor" d="M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm76.45 211.36l-96.42 95.7c-6.65 6.61-17.39 6.61-24.04 0l-96.42-95.7C73.42 337.29 80.54 320 94.82 320H160v-80c0-8.84 7.16-16 16-16h32c8.84 0 16 7.16 16 16v80h65.18c14.28 0 21.4 17.29 11.27 27.36zM377 105L279.1 7c-4.5-4.5-10.6-7-17-7H256v128h128v-6.1c0-6.3-2.5-12.4-7-16.9z"></path></svg>Download Tutorial Jupyter Notebook</a></div><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="/files/fit_textured_volume.py" target="_blank"><svg aria-hidden="true" focusable="false" data-prefix="fas" data-icon="file-download" class="svg-inline--fa fa-file-download fa-w-12" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><path fill="currentColor" d="M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm76.45 211.36l-96.42 95.7c-6.65 6.61-17.39 6.61-24.04 0l-96.42-95.7C73.42 337.29 80.54 320 94.82 320H160v-80c0-8.84 7.16-16 16-16h32c8.84 0 16 7.16 16 16v80h65.18c14.28 0 21.4 17.29 11.27 27.36zM377 105L279.1 7c-4.5-4.5-10.6-7-17-7H256v128h128v-6.1c0-6.3-2.5-12.4-7-16.9z"></path></svg>Download Tutorial Source Code</a></div></div><div class="tutorialBody">
<script
src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js">
</script>
<script
src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js">
</script>
<div class="notebook">
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h1 id="Fit-a-volume-via-raymarching">Fit a volume via raymarching<a class="anchor-link" href="#Fit-a-volume-via-raymarching"></a></h1><p>This tutorial shows how to fit a volume given a set of views of a scene using differentiable volumetric rendering.</p>
<p>More specifically, this tutorial will explain how to:</p>
<ol>
<li>Create a differentiable volumetric renderer.</li>
<li>Create a Volumetric model (including how to use the <code>Volumes</code> class).</li>
<li>Fit the volume based on the images using the differentiable volumetric renderer. </li>
<li>Visualize the predicted volume.</li>
</ol>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="0.-Install-and-Import-modules">0. Install and Import modules<a class="anchor-link" href="#0.-Install-and-Import-modules"></a></h2><p>Ensure <code>torch</code> and <code>torchvision</code> are installed. If <code>pytorch3d</code> is not installed, install it using the following cell:</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="kn">import</span> <span class="nn">os</span>
<span class="kn">import</span> <span class="nn">sys</span>
<span class="kn">import</span> <span class="nn">torch</span>
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
<span class="k">try</span><span class="p">:</span>
<span class="kn">import</span> <span class="nn">pytorch3d</span>
<span class="k">except</span> <span class="ne">ModuleNotFoundError</span><span class="p">:</span>
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s2">"1.9"</span><span class="p">)</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s2">"linux"</span><span class="p">):</span>
<span class="c1"># We try to install PyTorch3D via a released wheel.</span>
<span class="n">version_str</span><span class="o">=</span><span class="s2">""</span><span class="o">.</span><span class="n">join</span><span class="p">([</span>
<span class="sa">f</span><span class="s2">"py3</span><span class="si">{</span><span class="n">sys</span><span class="o">.</span><span class="n">version_info</span><span class="o">.</span><span class="n">minor</span><span class="si">}</span><span class="s2">_cu"</span><span class="p">,</span>
<span class="n">torch</span><span class="o">.</span><span class="n">version</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">replace</span><span class="p">(</span><span class="s2">"."</span><span class="p">,</span><span class="s2">""</span><span class="p">),</span>
<span class="sa">f</span><span class="s2">"_pyt</span><span class="si">{</span><span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="p">[</span><span class="mi">0</span><span class="p">:</span><span class="mi">5</span><span class="p">:</span><span class="mi">2</span><span class="p">]</span><span class="si">}</span><span class="s2">"</span>
<span class="p">])</span>
<span class="o">!</span>pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/<span class="o">{</span>version_str<span class="o">}</span>/download.html
<span class="k">else</span><span class="p">:</span>
<span class="c1"># We try to install PyTorch3D from source.</span>
<span class="o">!</span>curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
<span class="o">!</span>tar xzf <span class="m">1</span>.10.0.tar.gz
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">"CUB_HOME"</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">getcwd</span><span class="p">()</span> <span class="o">+</span> <span class="s2">"/cub-1.10.0"</span>
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="kn">import</span> <span class="nn">os</span>
<span class="kn">import</span> <span class="nn">sys</span>
<span class="kn">import</span> <span class="nn">time</span>
<span class="kn">import</span> <span class="nn">json</span>
<span class="kn">import</span> <span class="nn">glob</span>
<span class="kn">import</span> <span class="nn">torch</span>
<span class="kn">import</span> <span class="nn">math</span>
<span class="kn">from</span> <span class="nn">tqdm.notebook</span> <span class="kn">import</span> <span class="n">tqdm</span>
<span class="kn">import</span> <span class="nn">matplotlib.pyplot</span> <span class="k">as</span> <span class="nn">plt</span>
<span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="kn">from</span> <span class="nn">PIL</span> <span class="kn">import</span> <span class="n">Image</span>
<span class="kn">from</span> <span class="nn">IPython</span> <span class="kn">import</span> <span class="n">display</span>
<span class="c1"># Data structures and functions for rendering</span>
<span class="kn">from</span> <span class="nn">pytorch3d.structures</span> <span class="kn">import</span> <span class="n">Volumes</span>
<span class="kn">from</span> <span class="nn">pytorch3d.renderer</span> <span class="kn">import</span> <span class="p">(</span>
<span class="n">FoVPerspectiveCameras</span><span class="p">,</span>
<span class="n">VolumeRenderer</span><span class="p">,</span>
<span class="n">NDCGridRaysampler</span><span class="p">,</span>
<span class="n">EmissionAbsorptionRaymarcher</span>
<span class="p">)</span>
<span class="kn">from</span> <span class="nn">pytorch3d.transforms</span> <span class="kn">import</span> <span class="n">so3_exp_map</span>
<span class="c1"># obtain the utilized device</span>
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">is_available</span><span class="p">():</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">"cuda:0"</span><span class="p">)</span>
<span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">set_device</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">"cpu"</span><span class="p">)</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/plot_image_grid.py
<span class="o">!</span>wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/generate_cow_renders.py
<span class="kn">from</span> <span class="nn">plot_image_grid</span> <span class="kn">import</span> <span class="n">image_grid</span>
<span class="kn">from</span> <span class="nn">generate_cow_renders</span> <span class="kn">import</span> <span class="n">generate_cow_renders</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<p>OR if running locally uncomment and run the following cell:</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># from utils.generate_cow_renders import generate_cow_renders</span>
<span class="c1"># from utils import image_grid</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="1.-Generate-images-of-the-scene-and-masks">1. Generate images of the scene and masks<a class="anchor-link" href="#1.-Generate-images-of-the-scene-and-masks"></a></h2><p>The following cell generates our training data.
It renders the cow mesh from the <code>fit_textured_mesh.ipynb</code> tutorial from several viewpoints and returns:</p>
<ol>
<li>A batch of image and silhouette tensors that are produced by the cow mesh renderer.</li>
<li>A set of cameras corresponding to each render.</li>
</ol>
<p>Note: For the purpose of this tutorial, which aims at explaining the details of volumetric rendering, we do not explain how the mesh rendering, implemented in the <code>generate_cow_renders</code> function, works. Please refer to <code>fit_textured_mesh.ipynb</code> for a detailed explanation of mesh rendering.</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="n">target_cameras</span><span class="p">,</span> <span class="n">target_images</span><span class="p">,</span> <span class="n">target_silhouettes</span> <span class="o">=</span> <span class="n">generate_cow_renders</span><span class="p">(</span><span class="n">num_views</span><span class="o">=</span><span class="mi">40</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="sa">f</span><span class="s1">'Generated </span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">target_images</span><span class="p">)</span><span class="si">}</span><span class="s1"> images/silhouettes/cameras.'</span><span class="p">)</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="2.-Initialize-the-volumetric-renderer">2. Initialize the volumetric renderer<a class="anchor-link" href="#2.-Initialize-the-volumetric-renderer"></a></h2><p>The following initializes a volumetric renderer that emits a ray from each pixel of a target image and samples a set of uniformly-spaced points along the ray. At each ray-point, the corresponding density and color value is obtained by querying the corresponding location in the volumetric model of the scene (the model is described &amp; instantiated in a later cell).</p>
<p>The renderer is composed of a <em>raymarcher</em> and a <em>raysampler</em>.</p>
<ul>
<li>The <em>raysampler</em> is responsible for emitting rays from image pixels and sampling the points along them. Here, we use the <code>NDCGridRaysampler</code> which follows the standard PyTorch3D coordinate grid convention (+X from right to left; +Y from bottom to top; +Z away from the user).</li>
<li>The <em>raymarcher</em> takes the densities and colors sampled along each ray and renders each ray into a color and an opacity value of the ray's source pixel. Here we use the <code>EmissionAbsorptionRaymarcher</code> which implements the standard Emission-Absorption raymarching algorithm.</li>
</ul>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># render_size describes the size of both sides of the </span>
<span class="c1"># rendered images in pixels. We set this to the same size</span>
<span class="c1"># as the target images. I.e. we render at the same</span>
<span class="c1"># size as the ground truth images.</span>
<span class="n">render_size</span> <span class="o">=</span> <span class="n">target_images</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span>
<span class="c1"># Our rendered scene is centered around (0,0,0) </span>
<span class="c1"># and is enclosed inside a bounding box</span>
<span class="c1"># whose side is roughly equal to 3.0 (world units).</span>
<span class="n">volume_extent_world</span> <span class="o">=</span> <span class="mf">3.0</span>
<span class="c1"># 1) Instantiate the raysampler.</span>
<span class="c1"># Here, NDCGridRaysampler generates a rectangular image</span>
<span class="c1"># grid of rays whose coordinates follow the PyTorch3D</span>
<span class="c1"># coordinate conventions.</span>
<span class="c1"># Since we use a volume of size 128^3, we sample n_pts_per_ray=150,</span>
<span class="c1"># which roughly corresponds to a one ray-point per voxel.</span>
<span class="c1"># We further set the min_depth=0.1 since there is no surface within</span>
<span class="c1"># 0.1 units of any camera plane.</span>
<span class="n">raysampler</span> <span class="o">=</span> <span class="n">NDCGridRaysampler</span><span class="p">(</span>
<span class="n">image_width</span><span class="o">=</span><span class="n">render_size</span><span class="p">,</span>
<span class="n">image_height</span><span class="o">=</span><span class="n">render_size</span><span class="p">,</span>
<span class="n">n_pts_per_ray</span><span class="o">=</span><span class="mi">150</span><span class="p">,</span>
<span class="n">min_depth</span><span class="o">=</span><span class="mf">0.1</span><span class="p">,</span>
<span class="n">max_depth</span><span class="o">=</span><span class="n">volume_extent_world</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># 2) Instantiate the raymarcher.</span>
<span class="c1"># Here, we use the standard EmissionAbsorptionRaymarcher </span>
<span class="c1"># which marches along each ray in order to render</span>
<span class="c1"># each ray into a single 3D color vector </span>
<span class="c1"># and an opacity scalar.</span>
<span class="n">raymarcher</span> <span class="o">=</span> <span class="n">EmissionAbsorptionRaymarcher</span><span class="p">()</span>
<span class="c1"># Finally, instantiate the volumetric render</span>
<span class="c1"># with the raysampler and raymarcher objects.</span>
<span class="n">renderer</span> <span class="o">=</span> <span class="n">VolumeRenderer</span><span class="p">(</span>
<span class="n">raysampler</span><span class="o">=</span><span class="n">raysampler</span><span class="p">,</span> <span class="n">raymarcher</span><span class="o">=</span><span class="n">raymarcher</span><span class="p">,</span>
<span class="p">)</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="3.-Initialize-the-volumetric-model">3. Initialize the volumetric model<a class="anchor-link" href="#3.-Initialize-the-volumetric-model"></a></h2><p>Next we instantiate a volumetric model of the scene. This quantizes the 3D space to cubical voxels, where each voxel is described with a 3D vector representing the voxel's RGB color and a density scalar which describes the opacity of the voxel (ranging between [0-1], the higher the more opaque).</p>
<p>In order to ensure the range of densities and colors is between [0-1], we represent both volume colors and densities in the logarithmic space. During the forward function of the model, the log-space values are passed through the sigmoid function to bring the log-space values to the correct range.</p>
<p>Additionally, <code>VolumeModel</code> contains the renderer object. This object stays unaltered throughout the optimization.</p>
<p>In this cell we also define the <code>huber</code> loss function which computes the discrepancy between the rendered colors and masks.</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="k">class</span> <span class="nc">VolumeModel</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">renderer</span><span class="p">,</span> <span class="n">volume_size</span><span class="o">=</span><span class="p">[</span><span class="mi">64</span><span class="p">]</span> <span class="o">*</span> <span class="mi">3</span><span class="p">,</span> <span class="n">voxel_size</span><span class="o">=</span><span class="mf">0.1</span><span class="p">):</span>
<span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
<span class="c1"># After evaluating torch.sigmoid(self.log_colors), we get </span>
<span class="c1"># densities close to zero.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">log_densities</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Parameter</span><span class="p">(</span><span class="o">-</span><span class="mf">4.0</span> <span class="o">*</span> <span class="n">torch</span><span class="o">.</span><span class="n">ones</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="o">*</span><span class="n">volume_size</span><span class="p">))</span>
<span class="c1"># After evaluating torch.sigmoid(self.log_colors), we get </span>
<span class="c1"># a neutral gray color everywhere.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">log_colors</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Parameter</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">zeros</span><span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="o">*</span><span class="n">volume_size</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_voxel_size</span> <span class="o">=</span> <span class="n">voxel_size</span>
<span class="c1"># Store the renderer module as well.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_renderer</span> <span class="o">=</span> <span class="n">renderer</span>
<span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">cameras</span><span class="p">):</span>
<span class="n">batch_size</span> <span class="o">=</span> <span class="n">cameras</span><span class="o">.</span><span class="n">R</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
<span class="c1"># Convert the log-space values to the densities/colors</span>
<span class="n">densities</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">sigmoid</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">log_densities</span><span class="p">)</span>
<span class="n">colors</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">sigmoid</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">log_colors</span><span class="p">)</span>
<span class="c1"># Instantiate the Volumes object, making sure</span>
<span class="c1"># the densities and colors are correctly</span>
<span class="c1"># expanded batch_size-times.</span>
<span class="n">volumes</span> <span class="o">=</span> <span class="n">Volumes</span><span class="p">(</span>
<span class="n">densities</span> <span class="o">=</span> <span class="n">densities</span><span class="p">[</span><span class="kc">None</span><span class="p">]</span><span class="o">.</span><span class="n">expand</span><span class="p">(</span>
<span class="n">batch_size</span><span class="p">,</span> <span class="o">*</span><span class="bp">self</span><span class="o">.</span><span class="n">log_densities</span><span class="o">.</span><span class="n">shape</span><span class="p">),</span>
<span class="n">features</span> <span class="o">=</span> <span class="n">colors</span><span class="p">[</span><span class="kc">None</span><span class="p">]</span><span class="o">.</span><span class="n">expand</span><span class="p">(</span>
<span class="n">batch_size</span><span class="p">,</span> <span class="o">*</span><span class="bp">self</span><span class="o">.</span><span class="n">log_colors</span><span class="o">.</span><span class="n">shape</span><span class="p">),</span>
<span class="n">voxel_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">_voxel_size</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># Given cameras and volumes, run the renderer</span>
<span class="c1"># and return only the first output value </span>
<span class="c1"># (the 2nd output is a representation of the sampled</span>
<span class="c1"># rays which can be omitted for our purpose).</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_renderer</span><span class="p">(</span><span class="n">cameras</span><span class="o">=</span><span class="n">cameras</span><span class="p">,</span> <span class="n">volumes</span><span class="o">=</span><span class="n">volumes</span><span class="p">)[</span><span class="mi">0</span><span class="p">]</span>
<span class="c1"># A helper function for evaluating the smooth L1 (huber) loss</span>
<span class="c1"># between the rendered silhouettes and colors.</span>
<span class="k">def</span> <span class="nf">huber</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">,</span> <span class="n">scaling</span><span class="o">=</span><span class="mf">0.1</span><span class="p">):</span>
<span class="n">diff_sq</span> <span class="o">=</span> <span class="p">(</span><span class="n">x</span> <span class="o">-</span> <span class="n">y</span><span class="p">)</span> <span class="o">**</span> <span class="mi">2</span>
<span class="n">loss</span> <span class="o">=</span> <span class="p">((</span><span class="mi">1</span> <span class="o">+</span> <span class="n">diff_sq</span> <span class="o">/</span> <span class="p">(</span><span class="n">scaling</span><span class="o">**</span><span class="mi">2</span><span class="p">))</span><span class="o">.</span><span class="n">clamp</span><span class="p">(</span><span class="mf">1e-4</span><span class="p">)</span><span class="o">.</span><span class="n">sqrt</span><span class="p">()</span> <span class="o">-</span> <span class="mi">1</span><span class="p">)</span> <span class="o">*</span> <span class="nb">float</span><span class="p">(</span><span class="n">scaling</span><span class="p">)</span>
<span class="k">return</span> <span class="n">loss</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="4.-Fit-the-volume">4. Fit the volume<a class="anchor-link" href="#4.-Fit-the-volume"></a></h2><p>Here we carry out the volume fitting with differentiable rendering.</p>
<p>In order to fit the volume, we render it from the viewpoints of the <code>target_cameras</code>
and compare the resulting renders with the observed <code>target_images</code> and <code>target_silhouettes</code>.</p>
<p>The comparison is done by evaluating the mean huber (smooth-l1) error between corresponding
pairs of <code>target_images</code>/<code>rendered_images</code> and <code>target_silhouettes</code>/<code>rendered_silhouettes</code>.</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># First move all relevant variables to the correct device.</span>
<span class="n">target_cameras</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="n">target_images</span> <span class="o">=</span> <span class="n">target_images</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="n">target_silhouettes</span> <span class="o">=</span> <span class="n">target_silhouettes</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="c1"># Instantiate the volumetric model.</span>
<span class="c1"># We use a cubical volume with the size of </span>
<span class="c1"># one side = 128. The size of each voxel of the volume </span>
<span class="c1"># is set to volume_extent_world / volume_size s.t. the</span>
<span class="c1"># volume represents the space enclosed in a 3D bounding box</span>
<span class="c1"># centered at (0, 0, 0) with the size of each side equal to 3.</span>
<span class="n">volume_size</span> <span class="o">=</span> <span class="mi">128</span>
<span class="n">volume_model</span> <span class="o">=</span> <span class="n">VolumeModel</span><span class="p">(</span>
<span class="n">renderer</span><span class="p">,</span>
<span class="n">volume_size</span><span class="o">=</span><span class="p">[</span><span class="n">volume_size</span><span class="p">]</span> <span class="o">*</span> <span class="mi">3</span><span class="p">,</span>
<span class="n">voxel_size</span> <span class="o">=</span> <span class="n">volume_extent_world</span> <span class="o">/</span> <span class="n">volume_size</span><span class="p">,</span>
<span class="p">)</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="c1"># Instantiate the Adam optimizer. We set its master learning rate to 0.1.</span>
<span class="n">lr</span> <span class="o">=</span> <span class="mf">0.1</span>
<span class="n">optimizer</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">optim</span><span class="o">.</span><span class="n">Adam</span><span class="p">(</span><span class="n">volume_model</span><span class="o">.</span><span class="n">parameters</span><span class="p">(),</span> <span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">)</span>
<span class="c1"># We do 300 Adam iterations and sample 10 random images in each minibatch.</span>
<span class="n">batch_size</span> <span class="o">=</span> <span class="mi">10</span>
<span class="n">n_iter</span> <span class="o">=</span> <span class="mi">300</span>
<span class="k">for</span> <span class="n">iteration</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">n_iter</span><span class="p">):</span>
<span class="c1"># In case we reached the last 75% of iterations,</span>
<span class="c1"># decrease the learning rate of the optimizer 10-fold.</span>
<span class="k">if</span> <span class="n">iteration</span> <span class="o">==</span> <span class="nb">round</span><span class="p">(</span><span class="n">n_iter</span> <span class="o">*</span> <span class="mf">0.75</span><span class="p">):</span>
<span class="nb">print</span><span class="p">(</span><span class="s1">'Decreasing LR 10-fold ...'</span><span class="p">)</span>
<span class="n">optimizer</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">optim</span><span class="o">.</span><span class="n">Adam</span><span class="p">(</span>
<span class="n">volume_model</span><span class="o">.</span><span class="n">parameters</span><span class="p">(),</span> <span class="n">lr</span><span class="o">=</span><span class="n">lr</span> <span class="o">*</span> <span class="mf">0.1</span>
<span class="p">)</span>
<span class="c1"># Zero the optimizer gradient.</span>
<span class="n">optimizer</span><span class="o">.</span><span class="n">zero_grad</span><span class="p">()</span>
<span class="c1"># Sample random batch indices.</span>
<span class="n">batch_idx</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randperm</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">target_cameras</span><span class="p">))[:</span><span class="n">batch_size</span><span class="p">]</span>
<span class="c1"># Sample the minibatch of cameras.</span>
<span class="n">batch_cameras</span> <span class="o">=</span> <span class="n">FoVPerspectiveCameras</span><span class="p">(</span>
<span class="n">R</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">R</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">T</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">T</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">znear</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">znear</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">zfar</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">zfar</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">aspect_ratio</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">aspect_ratio</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">fov</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">fov</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">device</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># Evaluate the volumetric model.</span>
<span class="n">rendered_images</span><span class="p">,</span> <span class="n">rendered_silhouettes</span> <span class="o">=</span> <span class="n">volume_model</span><span class="p">(</span>
<span class="n">batch_cameras</span>
<span class="p">)</span><span class="o">.</span><span class="n">split</span><span class="p">([</span><span class="mi">3</span><span class="p">,</span> <span class="mi">1</span><span class="p">],</span> <span class="n">dim</span><span class="o">=-</span><span class="mi">1</span><span class="p">)</span>
<span class="c1"># Compute the silhouette error as the mean huber</span>
<span class="c1"># loss between the predicted masks and the</span>
<span class="c1"># target silhouettes.</span>
<span class="n">sil_err</span> <span class="o">=</span> <span class="n">huber</span><span class="p">(</span>
<span class="n">rendered_silhouettes</span><span class="p">[</span><span class="o">...</span><span class="p">,</span> <span class="mi">0</span><span class="p">],</span> <span class="n">target_silhouettes</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="p">)</span><span class="o">.</span><span class="n">abs</span><span class="p">()</span><span class="o">.</span><span class="n">mean</span><span class="p">()</span>
<span class="c1"># Compute the color error as the mean huber</span>
<span class="c1"># loss between the rendered colors and the</span>
<span class="c1"># target ground truth images.</span>
<span class="n">color_err</span> <span class="o">=</span> <span class="n">huber</span><span class="p">(</span>
<span class="n">rendered_images</span><span class="p">,</span> <span class="n">target_images</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="p">)</span><span class="o">.</span><span class="n">abs</span><span class="p">()</span><span class="o">.</span><span class="n">mean</span><span class="p">()</span>
<span class="c1"># The optimization loss is a simple</span>
<span class="c1"># sum of the color and silhouette errors.</span>
<span class="n">loss</span> <span class="o">=</span> <span class="n">color_err</span> <span class="o">+</span> <span class="n">sil_err</span>
<span class="c1"># Print the current values of the losses.</span>
<span class="k">if</span> <span class="n">iteration</span> <span class="o">%</span> <span class="mi">10</span> <span class="o">==</span> <span class="mi">0</span><span class="p">:</span>
<span class="nb">print</span><span class="p">(</span>
<span class="sa">f</span><span class="s1">'Iteration </span><span class="si">{</span><span class="n">iteration</span><span class="si">:</span><span class="s1">05d</span><span class="si">}</span><span class="s1">:'</span>
<span class="o">+</span> <span class="sa">f</span><span class="s1">' color_err = </span><span class="si">{</span><span class="nb">float</span><span class="p">(</span><span class="n">color_err</span><span class="p">)</span><span class="si">:</span><span class="s1">1.2e</span><span class="si">}</span><span class="s1">'</span>
<span class="o">+</span> <span class="sa">f</span><span class="s1">' mask_err = </span><span class="si">{</span><span class="nb">float</span><span class="p">(</span><span class="n">sil_err</span><span class="p">)</span><span class="si">:</span><span class="s1">1.2e</span><span class="si">}</span><span class="s1">'</span>
<span class="p">)</span>
<span class="c1"># Take the optimization step.</span>
<span class="n">loss</span><span class="o">.</span><span class="n">backward</span><span class="p">()</span>
<span class="n">optimizer</span><span class="o">.</span><span class="n">step</span><span class="p">()</span>
<span class="c1"># Visualize the renders every 40 iterations.</span>
<span class="k">if</span> <span class="n">iteration</span> <span class="o">%</span> <span class="mi">40</span> <span class="o">==</span> <span class="mi">0</span><span class="p">:</span>
<span class="c1"># Visualize only a single randomly selected element of the batch.</span>
<span class="n">im_show_idx</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">randint</span><span class="p">(</span><span class="n">low</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> <span class="n">high</span><span class="o">=</span><span class="n">batch_size</span><span class="p">,</span> <span class="n">size</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,)))</span>
<span class="n">fig</span><span class="p">,</span> <span class="n">ax</span> <span class="o">=</span> <span class="n">plt</span><span class="o">.</span><span class="n">subplots</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="mi">10</span><span class="p">))</span>
<span class="n">ax</span> <span class="o">=</span> <span class="n">ax</span><span class="o">.</span><span class="n">ravel</span><span class="p">()</span>
<span class="n">clamp_and_detach</span> <span class="o">=</span> <span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="n">x</span><span class="o">.</span><span class="n">clamp</span><span class="p">(</span><span class="mf">0.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">)</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">detach</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">()</span>
<span class="n">ax</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">clamp_and_detach</span><span class="p">(</span><span class="n">rendered_images</span><span class="p">[</span><span class="n">im_show_idx</span><span class="p">]))</span>
<span class="n">ax</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">clamp_and_detach</span><span class="p">(</span><span class="n">target_images</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">[</span><span class="n">im_show_idx</span><span class="p">],</span> <span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]))</span>
<span class="n">ax</span><span class="p">[</span><span class="mi">2</span><span class="p">]</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">clamp_and_detach</span><span class="p">(</span><span class="n">rendered_silhouettes</span><span class="p">[</span><span class="n">im_show_idx</span><span class="p">,</span> <span class="o">...</span><span class="p">,</span> <span class="mi">0</span><span class="p">]))</span>
<span class="n">ax</span><span class="p">[</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">clamp_and_detach</span><span class="p">(</span><span class="n">target_silhouettes</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">[</span><span class="n">im_show_idx</span><span class="p">]]))</span>
<span class="k">for</span> <span class="n">ax_</span><span class="p">,</span> <span class="n">title_</span> <span class="ow">in</span> <span class="nb">zip</span><span class="p">(</span>
<span class="n">ax</span><span class="p">,</span>
<span class="p">(</span><span class="s2">"rendered image"</span><span class="p">,</span> <span class="s2">"target image"</span><span class="p">,</span> <span class="s2">"rendered silhouette"</span><span class="p">,</span> <span class="s2">"target silhouette"</span><span class="p">)</span>
<span class="p">):</span>
<span class="n">ax_</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">)</span>
<span class="n">ax_</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">)</span>
<span class="n">ax_</span><span class="o">.</span><span class="n">set_title</span><span class="p">(</span><span class="n">title_</span><span class="p">)</span>
<span class="n">fig</span><span class="o">.</span><span class="n">canvas</span><span class="o">.</span><span class="n">draw</span><span class="p">();</span> <span class="n">fig</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
<span class="n">display</span><span class="o">.</span><span class="n">clear_output</span><span class="p">(</span><span class="n">wait</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">display</span><span class="o">.</span><span class="n">display</span><span class="p">(</span><span class="n">fig</span><span class="p">)</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="5.-Visualizing-the-optimized-volume">5. Visualizing the optimized volume<a class="anchor-link" href="#5.-Visualizing-the-optimized-volume"></a></h2><p>Finally, we visualize the optimized volume by rendering from multiple viewpoints that rotate around the volume's y-axis.</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="k">def</span> <span class="nf">generate_rotating_volume</span><span class="p">(</span><span class="n">volume_model</span><span class="p">,</span> <span class="n">n_frames</span> <span class="o">=</span> <span class="mi">50</span><span class="p">):</span>
<span class="n">logRs</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">zeros</span><span class="p">(</span><span class="n">n_frames</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
<span class="n">logRs</span><span class="p">[:,</span> <span class="mi">1</span><span class="p">]</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">linspace</span><span class="p">(</span><span class="mf">0.0</span><span class="p">,</span> <span class="mf">2.0</span> <span class="o">*</span> <span class="mf">3.14</span><span class="p">,</span> <span class="n">n_frames</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
<span class="n">Rs</span> <span class="o">=</span> <span class="n">so3_exp_map</span><span class="p">(</span><span class="n">logRs</span><span class="p">)</span>
<span class="n">Ts</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">zeros</span><span class="p">(</span><span class="n">n_frames</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
<span class="n">Ts</span><span class="p">[:,</span> <span class="mi">2</span><span class="p">]</span> <span class="o">=</span> <span class="mf">2.7</span>
<span class="n">frames</span> <span class="o">=</span> <span class="p">[]</span>
<span class="nb">print</span><span class="p">(</span><span class="s1">'Generating rotating volume ...'</span><span class="p">)</span>
<span class="k">for</span> <span class="n">R</span><span class="p">,</span> <span class="n">T</span> <span class="ow">in</span> <span class="nb">zip</span><span class="p">(</span><span class="n">tqdm</span><span class="p">(</span><span class="n">Rs</span><span class="p">),</span> <span class="n">Ts</span><span class="p">):</span>
<span class="n">camera</span> <span class="o">=</span> <span class="n">FoVPerspectiveCameras</span><span class="p">(</span>
<span class="n">R</span><span class="o">=</span><span class="n">R</span><span class="p">[</span><span class="kc">None</span><span class="p">],</span>
<span class="n">T</span><span class="o">=</span><span class="n">T</span><span class="p">[</span><span class="kc">None</span><span class="p">],</span>
<span class="n">znear</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">znear</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
<span class="n">zfar</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">zfar</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
<span class="n">aspect_ratio</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">aspect_ratio</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
<span class="n">fov</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">fov</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
<span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">,</span>
<span class="p">)</span>
<span class="n">frames</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">volume_model</span><span class="p">(</span><span class="n">camera</span><span class="p">)[</span><span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">clamp</span><span class="p">(</span><span class="mf">0.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">))</span>
<span class="k">return</span> <span class="n">torch</span><span class="o">.</span><span class="n">cat</span><span class="p">(</span><span class="n">frames</span><span class="p">)</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">no_grad</span><span class="p">():</span>
<span class="n">rotating_volume_frames</span> <span class="o">=</span> <span class="n">generate_rotating_volume</span><span class="p">(</span><span class="n">volume_model</span><span class="p">,</span> <span class="n">n_frames</span><span class="o">=</span><span class="mi">7</span><span class="o">*</span><span class="mi">4</span><span class="p">)</span>
<span class="n">image_grid</span><span class="p">(</span><span class="n">rotating_volume_frames</span><span class="o">.</span><span class="n">clamp</span><span class="p">(</span><span class="mf">0.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">)</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">(),</span> <span class="n">rows</span><span class="o">=</span><span class="mi">4</span><span class="p">,</span> <span class="n">cols</span><span class="o">=</span><span class="mi">7</span><span class="p">,</span> <span class="n">rgb</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">fill</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">plt</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="6.-Conclusion">6. Conclusion<a class="anchor-link" href="#6.-Conclusion"></a></h2><p>In this tutorial, we have shown how to optimize a 3D volumetric representation of a scene such that the renders of the volume from known viewpoints match the observed images for each viewpoint. The rendering was carried out using the PyTorch3D's volumetric renderer composed of an <code>NDCGridRaysampler</code> and an <code>EmissionAbsorptionRaymarcher</code>.</p>
</div>
</div>
</div>
</div></div></div></div></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2021 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>

View File

@ -0,0 +1,548 @@
<!DOCTYPE html><html lang=""><head><meta charSet="utf-8"/><meta http-equiv="X-UA-Compatible" content="IE=edge"/><title>PyTorch3D · A library for deep learning with 3D data</title><meta name="viewport" content="width=device-width, initial-scale=1.0"/><meta name="generator" content="Docusaurus"/><meta name="description" content="A library for deep learning with 3D data"/><meta property="og:title" content="PyTorch3D · A library for deep learning with 3D data"/><meta property="og:type" content="website"/><meta property="og:url" content="https://pytorch3d.org/"/><meta property="og:description" content="A library for deep learning with 3D data"/><meta property="og:image" content="https://pytorch3d.org/img/pytorch3dlogoicon.svg"/><meta name="twitter:card" content="summary"/><meta name="twitter:image" content="https://pytorch3d.org/img/pytorch3dlogoicon.svg"/><link rel="shortcut icon" href="/img/pytorch3dfavicon.png"/><link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/default.min.css"/><script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/fit_textured_volume">Fit a volume via raymarching</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_simple_neural_radiance_field">Fit a simplified NeRF via raymarching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {
var links = coll[i].nextElementSibling.getElementsByTagName('*');
if (checkActiveCategory){
for (var j = 0; j < links.length; j++) {
if (links[j].classList.contains('navListItemActive')){
coll[i].nextElementSibling.classList.toggle('hide');
coll[i].childNodes[1].classList.toggle('rotate');
checkActiveCategory = false;
break;
}
}
}
coll[i].addEventListener('click', function() {
var arrow = this.childNodes[1];
arrow.classList.toggle('rotate');
var content = this.nextElementSibling;
content.classList.toggle('hide');
});
}
document.addEventListener('DOMContentLoaded', function() {
createToggler('#navToggler', '#docsNav', 'docsSliderActive');
createToggler('#tocToggler', 'body', 'tocActive');
var headings = document.querySelector('.toc-headings');
headings && headings.addEventListener('click', function(event) {
var el = event.target;
while(el !== headings){
if (el.tagName === 'A') {
document.body.classList.remove('tocActive');
break;
} else{
el = el.parentNode;
}
}
}, false);
function createToggler(togglerSelector, targetSelector, className) {
var toggler = document.querySelector(togglerSelector);
var target = document.querySelector(targetSelector);
if (!toggler) {
return;
}
toggler.onclick = function(event) {
event.preventDefault();
target.classList.toggle(className);
};
}
});
</script></nav></div><div class="container mainContainer"><div class="wrapper"><div class="tutorialButtonsWrapper"><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="https://colab.research.google.com/github/facebookresearch/pytorch3d/blob/stable/docs/tutorials/fit_textured_volume.ipynb" target="_blank"><img class="colabButton" align="left" src="/img/colab_icon.png"/>Run in Google Colab</a></div><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="/files/fit_textured_volume.ipynb" target="_blank"><svg aria-hidden="true" focusable="false" data-prefix="fas" data-icon="file-download" class="svg-inline--fa fa-file-download fa-w-12" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><path fill="currentColor" d="M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm76.45 211.36l-96.42 95.7c-6.65 6.61-17.39 6.61-24.04 0l-96.42-95.7C73.42 337.29 80.54 320 94.82 320H160v-80c0-8.84 7.16-16 16-16h32c8.84 0 16 7.16 16 16v80h65.18c14.28 0 21.4 17.29 11.27 27.36zM377 105L279.1 7c-4.5-4.5-10.6-7-17-7H256v128h128v-6.1c0-6.3-2.5-12.4-7-16.9z"></path></svg>Download Tutorial Jupyter Notebook</a></div><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="/files/fit_textured_volume.py" target="_blank"><svg aria-hidden="true" focusable="false" data-prefix="fas" data-icon="file-download" class="svg-inline--fa fa-file-download fa-w-12" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><path fill="currentColor" d="M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm76.45 211.36l-96.42 95.7c-6.65 6.61-17.39 6.61-24.04 0l-96.42-95.7C73.42 337.29 80.54 320 94.82 320H160v-80c0-8.84 7.16-16 16-16h32c8.84 0 16 7.16 16 16v80h65.18c14.28 0 21.4 17.29 11.27 27.36zM377 105L279.1 7c-4.5-4.5-10.6-7-17-7H256v128h128v-6.1c0-6.3-2.5-12.4-7-16.9z"></path></svg>Download Tutorial Source Code</a></div></div><div class="tutorialBody">
<script
src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js">
</script>
<script
src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js">
</script>
<div class="notebook">
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h1 id="Fit-a-volume-via-raymarching">Fit a volume via raymarching<a class="anchor-link" href="#Fit-a-volume-via-raymarching"></a></h1><p>This tutorial shows how to fit a volume given a set of views of a scene using differentiable volumetric rendering.</p>
<p>More specifically, this tutorial will explain how to:</p>
<ol>
<li>Create a differentiable volumetric renderer.</li>
<li>Create a Volumetric model (including how to use the <code>Volumes</code> class).</li>
<li>Fit the volume based on the images using the differentiable volumetric renderer. </li>
<li>Visualize the predicted volume.</li>
</ol>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="0.-Install-and-Import-modules">0. Install and Import modules<a class="anchor-link" href="#0.-Install-and-Import-modules"></a></h2><p>Ensure <code>torch</code> and <code>torchvision</code> are installed. If <code>pytorch3d</code> is not installed, install it using the following cell:</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="kn">import</span> <span class="nn">os</span>
<span class="kn">import</span> <span class="nn">sys</span>
<span class="kn">import</span> <span class="nn">torch</span>
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
<span class="k">try</span><span class="p">:</span>
<span class="kn">import</span> <span class="nn">pytorch3d</span>
<span class="k">except</span> <span class="ne">ModuleNotFoundError</span><span class="p">:</span>
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s2">"1.9"</span><span class="p">)</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s2">"linux"</span><span class="p">):</span>
<span class="c1"># We try to install PyTorch3D via a released wheel.</span>
<span class="n">version_str</span><span class="o">=</span><span class="s2">""</span><span class="o">.</span><span class="n">join</span><span class="p">([</span>
<span class="sa">f</span><span class="s2">"py3</span><span class="si">{</span><span class="n">sys</span><span class="o">.</span><span class="n">version_info</span><span class="o">.</span><span class="n">minor</span><span class="si">}</span><span class="s2">_cu"</span><span class="p">,</span>
<span class="n">torch</span><span class="o">.</span><span class="n">version</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">replace</span><span class="p">(</span><span class="s2">"."</span><span class="p">,</span><span class="s2">""</span><span class="p">),</span>
<span class="sa">f</span><span class="s2">"_pyt</span><span class="si">{</span><span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="p">[</span><span class="mi">0</span><span class="p">:</span><span class="mi">5</span><span class="p">:</span><span class="mi">2</span><span class="p">]</span><span class="si">}</span><span class="s2">"</span>
<span class="p">])</span>
<span class="o">!</span>pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/<span class="o">{</span>version_str<span class="o">}</span>/download.html
<span class="k">else</span><span class="p">:</span>
<span class="c1"># We try to install PyTorch3D from source.</span>
<span class="o">!</span>curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
<span class="o">!</span>tar xzf <span class="m">1</span>.10.0.tar.gz
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">"CUB_HOME"</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">getcwd</span><span class="p">()</span> <span class="o">+</span> <span class="s2">"/cub-1.10.0"</span>
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="kn">import</span> <span class="nn">os</span>
<span class="kn">import</span> <span class="nn">sys</span>
<span class="kn">import</span> <span class="nn">time</span>
<span class="kn">import</span> <span class="nn">json</span>
<span class="kn">import</span> <span class="nn">glob</span>
<span class="kn">import</span> <span class="nn">torch</span>
<span class="kn">import</span> <span class="nn">math</span>
<span class="kn">from</span> <span class="nn">tqdm.notebook</span> <span class="kn">import</span> <span class="n">tqdm</span>
<span class="kn">import</span> <span class="nn">matplotlib.pyplot</span> <span class="k">as</span> <span class="nn">plt</span>
<span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="kn">from</span> <span class="nn">PIL</span> <span class="kn">import</span> <span class="n">Image</span>
<span class="kn">from</span> <span class="nn">IPython</span> <span class="kn">import</span> <span class="n">display</span>
<span class="c1"># Data structures and functions for rendering</span>
<span class="kn">from</span> <span class="nn">pytorch3d.structures</span> <span class="kn">import</span> <span class="n">Volumes</span>
<span class="kn">from</span> <span class="nn">pytorch3d.renderer</span> <span class="kn">import</span> <span class="p">(</span>
<span class="n">FoVPerspectiveCameras</span><span class="p">,</span>
<span class="n">VolumeRenderer</span><span class="p">,</span>
<span class="n">NDCGridRaysampler</span><span class="p">,</span>
<span class="n">EmissionAbsorptionRaymarcher</span>
<span class="p">)</span>
<span class="kn">from</span> <span class="nn">pytorch3d.transforms</span> <span class="kn">import</span> <span class="n">so3_exp_map</span>
<span class="c1"># obtain the utilized device</span>
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">is_available</span><span class="p">():</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">"cuda:0"</span><span class="p">)</span>
<span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">set_device</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">"cpu"</span><span class="p">)</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/plot_image_grid.py
<span class="o">!</span>wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/generate_cow_renders.py
<span class="kn">from</span> <span class="nn">plot_image_grid</span> <span class="kn">import</span> <span class="n">image_grid</span>
<span class="kn">from</span> <span class="nn">generate_cow_renders</span> <span class="kn">import</span> <span class="n">generate_cow_renders</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<p>OR if running locally uncomment and run the following cell:</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># from utils.generate_cow_renders import generate_cow_renders</span>
<span class="c1"># from utils import image_grid</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="1.-Generate-images-of-the-scene-and-masks">1. Generate images of the scene and masks<a class="anchor-link" href="#1.-Generate-images-of-the-scene-and-masks"></a></h2><p>The following cell generates our training data.
It renders the cow mesh from the <code>fit_textured_mesh.ipynb</code> tutorial from several viewpoints and returns:</p>
<ol>
<li>A batch of image and silhouette tensors that are produced by the cow mesh renderer.</li>
<li>A set of cameras corresponding to each render.</li>
</ol>
<p>Note: For the purpose of this tutorial, which aims at explaining the details of volumetric rendering, we do not explain how the mesh rendering, implemented in the <code>generate_cow_renders</code> function, works. Please refer to <code>fit_textured_mesh.ipynb</code> for a detailed explanation of mesh rendering.</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="n">target_cameras</span><span class="p">,</span> <span class="n">target_images</span><span class="p">,</span> <span class="n">target_silhouettes</span> <span class="o">=</span> <span class="n">generate_cow_renders</span><span class="p">(</span><span class="n">num_views</span><span class="o">=</span><span class="mi">40</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="sa">f</span><span class="s1">'Generated </span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">target_images</span><span class="p">)</span><span class="si">}</span><span class="s1"> images/silhouettes/cameras.'</span><span class="p">)</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="2.-Initialize-the-volumetric-renderer">2. Initialize the volumetric renderer<a class="anchor-link" href="#2.-Initialize-the-volumetric-renderer"></a></h2><p>The following initializes a volumetric renderer that emits a ray from each pixel of a target image and samples a set of uniformly-spaced points along the ray. At each ray-point, the corresponding density and color value is obtained by querying the corresponding location in the volumetric model of the scene (the model is described &amp; instantiated in a later cell).</p>
<p>The renderer is composed of a <em>raymarcher</em> and a <em>raysampler</em>.</p>
<ul>
<li>The <em>raysampler</em> is responsible for emitting rays from image pixels and sampling the points along them. Here, we use the <code>NDCGridRaysampler</code> which follows the standard PyTorch3D coordinate grid convention (+X from right to left; +Y from bottom to top; +Z away from the user).</li>
<li>The <em>raymarcher</em> takes the densities and colors sampled along each ray and renders each ray into a color and an opacity value of the ray's source pixel. Here we use the <code>EmissionAbsorptionRaymarcher</code> which implements the standard Emission-Absorption raymarching algorithm.</li>
</ul>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># render_size describes the size of both sides of the </span>
<span class="c1"># rendered images in pixels. We set this to the same size</span>
<span class="c1"># as the target images. I.e. we render at the same</span>
<span class="c1"># size as the ground truth images.</span>
<span class="n">render_size</span> <span class="o">=</span> <span class="n">target_images</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span>
<span class="c1"># Our rendered scene is centered around (0,0,0) </span>
<span class="c1"># and is enclosed inside a bounding box</span>
<span class="c1"># whose side is roughly equal to 3.0 (world units).</span>
<span class="n">volume_extent_world</span> <span class="o">=</span> <span class="mf">3.0</span>
<span class="c1"># 1) Instantiate the raysampler.</span>
<span class="c1"># Here, NDCGridRaysampler generates a rectangular image</span>
<span class="c1"># grid of rays whose coordinates follow the PyTorch3D</span>
<span class="c1"># coordinate conventions.</span>
<span class="c1"># Since we use a volume of size 128^3, we sample n_pts_per_ray=150,</span>
<span class="c1"># which roughly corresponds to a one ray-point per voxel.</span>
<span class="c1"># We further set the min_depth=0.1 since there is no surface within</span>
<span class="c1"># 0.1 units of any camera plane.</span>
<span class="n">raysampler</span> <span class="o">=</span> <span class="n">NDCGridRaysampler</span><span class="p">(</span>
<span class="n">image_width</span><span class="o">=</span><span class="n">render_size</span><span class="p">,</span>
<span class="n">image_height</span><span class="o">=</span><span class="n">render_size</span><span class="p">,</span>
<span class="n">n_pts_per_ray</span><span class="o">=</span><span class="mi">150</span><span class="p">,</span>
<span class="n">min_depth</span><span class="o">=</span><span class="mf">0.1</span><span class="p">,</span>
<span class="n">max_depth</span><span class="o">=</span><span class="n">volume_extent_world</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># 2) Instantiate the raymarcher.</span>
<span class="c1"># Here, we use the standard EmissionAbsorptionRaymarcher </span>
<span class="c1"># which marches along each ray in order to render</span>
<span class="c1"># each ray into a single 3D color vector </span>
<span class="c1"># and an opacity scalar.</span>
<span class="n">raymarcher</span> <span class="o">=</span> <span class="n">EmissionAbsorptionRaymarcher</span><span class="p">()</span>
<span class="c1"># Finally, instantiate the volumetric render</span>
<span class="c1"># with the raysampler and raymarcher objects.</span>
<span class="n">renderer</span> <span class="o">=</span> <span class="n">VolumeRenderer</span><span class="p">(</span>
<span class="n">raysampler</span><span class="o">=</span><span class="n">raysampler</span><span class="p">,</span> <span class="n">raymarcher</span><span class="o">=</span><span class="n">raymarcher</span><span class="p">,</span>
<span class="p">)</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="3.-Initialize-the-volumetric-model">3. Initialize the volumetric model<a class="anchor-link" href="#3.-Initialize-the-volumetric-model"></a></h2><p>Next we instantiate a volumetric model of the scene. This quantizes the 3D space to cubical voxels, where each voxel is described with a 3D vector representing the voxel's RGB color and a density scalar which describes the opacity of the voxel (ranging between [0-1], the higher the more opaque).</p>
<p>In order to ensure the range of densities and colors is between [0-1], we represent both volume colors and densities in the logarithmic space. During the forward function of the model, the log-space values are passed through the sigmoid function to bring the log-space values to the correct range.</p>
<p>Additionally, <code>VolumeModel</code> contains the renderer object. This object stays unaltered throughout the optimization.</p>
<p>In this cell we also define the <code>huber</code> loss function which computes the discrepancy between the rendered colors and masks.</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="k">class</span> <span class="nc">VolumeModel</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">renderer</span><span class="p">,</span> <span class="n">volume_size</span><span class="o">=</span><span class="p">[</span><span class="mi">64</span><span class="p">]</span> <span class="o">*</span> <span class="mi">3</span><span class="p">,</span> <span class="n">voxel_size</span><span class="o">=</span><span class="mf">0.1</span><span class="p">):</span>
<span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
<span class="c1"># After evaluating torch.sigmoid(self.log_colors), we get </span>
<span class="c1"># densities close to zero.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">log_densities</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Parameter</span><span class="p">(</span><span class="o">-</span><span class="mf">4.0</span> <span class="o">*</span> <span class="n">torch</span><span class="o">.</span><span class="n">ones</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="o">*</span><span class="n">volume_size</span><span class="p">))</span>
<span class="c1"># After evaluating torch.sigmoid(self.log_colors), we get </span>
<span class="c1"># a neutral gray color everywhere.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">log_colors</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Parameter</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">zeros</span><span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="o">*</span><span class="n">volume_size</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_voxel_size</span> <span class="o">=</span> <span class="n">voxel_size</span>
<span class="c1"># Store the renderer module as well.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_renderer</span> <span class="o">=</span> <span class="n">renderer</span>
<span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">cameras</span><span class="p">):</span>
<span class="n">batch_size</span> <span class="o">=</span> <span class="n">cameras</span><span class="o">.</span><span class="n">R</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
<span class="c1"># Convert the log-space values to the densities/colors</span>
<span class="n">densities</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">sigmoid</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">log_densities</span><span class="p">)</span>
<span class="n">colors</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">sigmoid</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">log_colors</span><span class="p">)</span>
<span class="c1"># Instantiate the Volumes object, making sure</span>
<span class="c1"># the densities and colors are correctly</span>
<span class="c1"># expanded batch_size-times.</span>
<span class="n">volumes</span> <span class="o">=</span> <span class="n">Volumes</span><span class="p">(</span>
<span class="n">densities</span> <span class="o">=</span> <span class="n">densities</span><span class="p">[</span><span class="kc">None</span><span class="p">]</span><span class="o">.</span><span class="n">expand</span><span class="p">(</span>
<span class="n">batch_size</span><span class="p">,</span> <span class="o">*</span><span class="bp">self</span><span class="o">.</span><span class="n">log_densities</span><span class="o">.</span><span class="n">shape</span><span class="p">),</span>
<span class="n">features</span> <span class="o">=</span> <span class="n">colors</span><span class="p">[</span><span class="kc">None</span><span class="p">]</span><span class="o">.</span><span class="n">expand</span><span class="p">(</span>
<span class="n">batch_size</span><span class="p">,</span> <span class="o">*</span><span class="bp">self</span><span class="o">.</span><span class="n">log_colors</span><span class="o">.</span><span class="n">shape</span><span class="p">),</span>
<span class="n">voxel_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">_voxel_size</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># Given cameras and volumes, run the renderer</span>
<span class="c1"># and return only the first output value </span>
<span class="c1"># (the 2nd output is a representation of the sampled</span>
<span class="c1"># rays which can be omitted for our purpose).</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_renderer</span><span class="p">(</span><span class="n">cameras</span><span class="o">=</span><span class="n">cameras</span><span class="p">,</span> <span class="n">volumes</span><span class="o">=</span><span class="n">volumes</span><span class="p">)[</span><span class="mi">0</span><span class="p">]</span>
<span class="c1"># A helper function for evaluating the smooth L1 (huber) loss</span>
<span class="c1"># between the rendered silhouettes and colors.</span>
<span class="k">def</span> <span class="nf">huber</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">,</span> <span class="n">scaling</span><span class="o">=</span><span class="mf">0.1</span><span class="p">):</span>
<span class="n">diff_sq</span> <span class="o">=</span> <span class="p">(</span><span class="n">x</span> <span class="o">-</span> <span class="n">y</span><span class="p">)</span> <span class="o">**</span> <span class="mi">2</span>
<span class="n">loss</span> <span class="o">=</span> <span class="p">((</span><span class="mi">1</span> <span class="o">+</span> <span class="n">diff_sq</span> <span class="o">/</span> <span class="p">(</span><span class="n">scaling</span><span class="o">**</span><span class="mi">2</span><span class="p">))</span><span class="o">.</span><span class="n">clamp</span><span class="p">(</span><span class="mf">1e-4</span><span class="p">)</span><span class="o">.</span><span class="n">sqrt</span><span class="p">()</span> <span class="o">-</span> <span class="mi">1</span><span class="p">)</span> <span class="o">*</span> <span class="nb">float</span><span class="p">(</span><span class="n">scaling</span><span class="p">)</span>
<span class="k">return</span> <span class="n">loss</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="4.-Fit-the-volume">4. Fit the volume<a class="anchor-link" href="#4.-Fit-the-volume"></a></h2><p>Here we carry out the volume fitting with differentiable rendering.</p>
<p>In order to fit the volume, we render it from the viewpoints of the <code>target_cameras</code>
and compare the resulting renders with the observed <code>target_images</code> and <code>target_silhouettes</code>.</p>
<p>The comparison is done by evaluating the mean huber (smooth-l1) error between corresponding
pairs of <code>target_images</code>/<code>rendered_images</code> and <code>target_silhouettes</code>/<code>rendered_silhouettes</code>.</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># First move all relevant variables to the correct device.</span>
<span class="n">target_cameras</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="n">target_images</span> <span class="o">=</span> <span class="n">target_images</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="n">target_silhouettes</span> <span class="o">=</span> <span class="n">target_silhouettes</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="c1"># Instantiate the volumetric model.</span>
<span class="c1"># We use a cubical volume with the size of </span>
<span class="c1"># one side = 128. The size of each voxel of the volume </span>
<span class="c1"># is set to volume_extent_world / volume_size s.t. the</span>
<span class="c1"># volume represents the space enclosed in a 3D bounding box</span>
<span class="c1"># centered at (0, 0, 0) with the size of each side equal to 3.</span>
<span class="n">volume_size</span> <span class="o">=</span> <span class="mi">128</span>
<span class="n">volume_model</span> <span class="o">=</span> <span class="n">VolumeModel</span><span class="p">(</span>
<span class="n">renderer</span><span class="p">,</span>
<span class="n">volume_size</span><span class="o">=</span><span class="p">[</span><span class="n">volume_size</span><span class="p">]</span> <span class="o">*</span> <span class="mi">3</span><span class="p">,</span>
<span class="n">voxel_size</span> <span class="o">=</span> <span class="n">volume_extent_world</span> <span class="o">/</span> <span class="n">volume_size</span><span class="p">,</span>
<span class="p">)</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="c1"># Instantiate the Adam optimizer. We set its master learning rate to 0.1.</span>
<span class="n">lr</span> <span class="o">=</span> <span class="mf">0.1</span>
<span class="n">optimizer</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">optim</span><span class="o">.</span><span class="n">Adam</span><span class="p">(</span><span class="n">volume_model</span><span class="o">.</span><span class="n">parameters</span><span class="p">(),</span> <span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">)</span>
<span class="c1"># We do 300 Adam iterations and sample 10 random images in each minibatch.</span>
<span class="n">batch_size</span> <span class="o">=</span> <span class="mi">10</span>
<span class="n">n_iter</span> <span class="o">=</span> <span class="mi">300</span>
<span class="k">for</span> <span class="n">iteration</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">n_iter</span><span class="p">):</span>
<span class="c1"># In case we reached the last 75% of iterations,</span>
<span class="c1"># decrease the learning rate of the optimizer 10-fold.</span>
<span class="k">if</span> <span class="n">iteration</span> <span class="o">==</span> <span class="nb">round</span><span class="p">(</span><span class="n">n_iter</span> <span class="o">*</span> <span class="mf">0.75</span><span class="p">):</span>
<span class="nb">print</span><span class="p">(</span><span class="s1">'Decreasing LR 10-fold ...'</span><span class="p">)</span>
<span class="n">optimizer</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">optim</span><span class="o">.</span><span class="n">Adam</span><span class="p">(</span>
<span class="n">volume_model</span><span class="o">.</span><span class="n">parameters</span><span class="p">(),</span> <span class="n">lr</span><span class="o">=</span><span class="n">lr</span> <span class="o">*</span> <span class="mf">0.1</span>
<span class="p">)</span>
<span class="c1"># Zero the optimizer gradient.</span>
<span class="n">optimizer</span><span class="o">.</span><span class="n">zero_grad</span><span class="p">()</span>
<span class="c1"># Sample random batch indices.</span>
<span class="n">batch_idx</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randperm</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">target_cameras</span><span class="p">))[:</span><span class="n">batch_size</span><span class="p">]</span>
<span class="c1"># Sample the minibatch of cameras.</span>
<span class="n">batch_cameras</span> <span class="o">=</span> <span class="n">FoVPerspectiveCameras</span><span class="p">(</span>
<span class="n">R</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">R</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">T</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">T</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">znear</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">znear</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">zfar</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">zfar</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">aspect_ratio</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">aspect_ratio</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">fov</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">fov</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">device</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># Evaluate the volumetric model.</span>
<span class="n">rendered_images</span><span class="p">,</span> <span class="n">rendered_silhouettes</span> <span class="o">=</span> <span class="n">volume_model</span><span class="p">(</span>
<span class="n">batch_cameras</span>
<span class="p">)</span><span class="o">.</span><span class="n">split</span><span class="p">([</span><span class="mi">3</span><span class="p">,</span> <span class="mi">1</span><span class="p">],</span> <span class="n">dim</span><span class="o">=-</span><span class="mi">1</span><span class="p">)</span>
<span class="c1"># Compute the silhouette error as the mean huber</span>
<span class="c1"># loss between the predicted masks and the</span>
<span class="c1"># target silhouettes.</span>
<span class="n">sil_err</span> <span class="o">=</span> <span class="n">huber</span><span class="p">(</span>
<span class="n">rendered_silhouettes</span><span class="p">[</span><span class="o">...</span><span class="p">,</span> <span class="mi">0</span><span class="p">],</span> <span class="n">target_silhouettes</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="p">)</span><span class="o">.</span><span class="n">abs</span><span class="p">()</span><span class="o">.</span><span class="n">mean</span><span class="p">()</span>
<span class="c1"># Compute the color error as the mean huber</span>
<span class="c1"># loss between the rendered colors and the</span>
<span class="c1"># target ground truth images.</span>
<span class="n">color_err</span> <span class="o">=</span> <span class="n">huber</span><span class="p">(</span>
<span class="n">rendered_images</span><span class="p">,</span> <span class="n">target_images</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">],</span>
<span class="p">)</span><span class="o">.</span><span class="n">abs</span><span class="p">()</span><span class="o">.</span><span class="n">mean</span><span class="p">()</span>
<span class="c1"># The optimization loss is a simple</span>
<span class="c1"># sum of the color and silhouette errors.</span>
<span class="n">loss</span> <span class="o">=</span> <span class="n">color_err</span> <span class="o">+</span> <span class="n">sil_err</span>
<span class="c1"># Print the current values of the losses.</span>
<span class="k">if</span> <span class="n">iteration</span> <span class="o">%</span> <span class="mi">10</span> <span class="o">==</span> <span class="mi">0</span><span class="p">:</span>
<span class="nb">print</span><span class="p">(</span>
<span class="sa">f</span><span class="s1">'Iteration </span><span class="si">{</span><span class="n">iteration</span><span class="si">:</span><span class="s1">05d</span><span class="si">}</span><span class="s1">:'</span>
<span class="o">+</span> <span class="sa">f</span><span class="s1">' color_err = </span><span class="si">{</span><span class="nb">float</span><span class="p">(</span><span class="n">color_err</span><span class="p">)</span><span class="si">:</span><span class="s1">1.2e</span><span class="si">}</span><span class="s1">'</span>
<span class="o">+</span> <span class="sa">f</span><span class="s1">' mask_err = </span><span class="si">{</span><span class="nb">float</span><span class="p">(</span><span class="n">sil_err</span><span class="p">)</span><span class="si">:</span><span class="s1">1.2e</span><span class="si">}</span><span class="s1">'</span>
<span class="p">)</span>
<span class="c1"># Take the optimization step.</span>
<span class="n">loss</span><span class="o">.</span><span class="n">backward</span><span class="p">()</span>
<span class="n">optimizer</span><span class="o">.</span><span class="n">step</span><span class="p">()</span>
<span class="c1"># Visualize the renders every 40 iterations.</span>
<span class="k">if</span> <span class="n">iteration</span> <span class="o">%</span> <span class="mi">40</span> <span class="o">==</span> <span class="mi">0</span><span class="p">:</span>
<span class="c1"># Visualize only a single randomly selected element of the batch.</span>
<span class="n">im_show_idx</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">randint</span><span class="p">(</span><span class="n">low</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> <span class="n">high</span><span class="o">=</span><span class="n">batch_size</span><span class="p">,</span> <span class="n">size</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,)))</span>
<span class="n">fig</span><span class="p">,</span> <span class="n">ax</span> <span class="o">=</span> <span class="n">plt</span><span class="o">.</span><span class="n">subplots</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="mi">10</span><span class="p">))</span>
<span class="n">ax</span> <span class="o">=</span> <span class="n">ax</span><span class="o">.</span><span class="n">ravel</span><span class="p">()</span>
<span class="n">clamp_and_detach</span> <span class="o">=</span> <span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="n">x</span><span class="o">.</span><span class="n">clamp</span><span class="p">(</span><span class="mf">0.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">)</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">detach</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">()</span>
<span class="n">ax</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">clamp_and_detach</span><span class="p">(</span><span class="n">rendered_images</span><span class="p">[</span><span class="n">im_show_idx</span><span class="p">]))</span>
<span class="n">ax</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">clamp_and_detach</span><span class="p">(</span><span class="n">target_images</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">[</span><span class="n">im_show_idx</span><span class="p">],</span> <span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]))</span>
<span class="n">ax</span><span class="p">[</span><span class="mi">2</span><span class="p">]</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">clamp_and_detach</span><span class="p">(</span><span class="n">rendered_silhouettes</span><span class="p">[</span><span class="n">im_show_idx</span><span class="p">,</span> <span class="o">...</span><span class="p">,</span> <span class="mi">0</span><span class="p">]))</span>
<span class="n">ax</span><span class="p">[</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">clamp_and_detach</span><span class="p">(</span><span class="n">target_silhouettes</span><span class="p">[</span><span class="n">batch_idx</span><span class="p">[</span><span class="n">im_show_idx</span><span class="p">]]))</span>
<span class="k">for</span> <span class="n">ax_</span><span class="p">,</span> <span class="n">title_</span> <span class="ow">in</span> <span class="nb">zip</span><span class="p">(</span>
<span class="n">ax</span><span class="p">,</span>
<span class="p">(</span><span class="s2">"rendered image"</span><span class="p">,</span> <span class="s2">"target image"</span><span class="p">,</span> <span class="s2">"rendered silhouette"</span><span class="p">,</span> <span class="s2">"target silhouette"</span><span class="p">)</span>
<span class="p">):</span>
<span class="n">ax_</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">)</span>
<span class="n">ax_</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">)</span>
<span class="n">ax_</span><span class="o">.</span><span class="n">set_title</span><span class="p">(</span><span class="n">title_</span><span class="p">)</span>
<span class="n">fig</span><span class="o">.</span><span class="n">canvas</span><span class="o">.</span><span class="n">draw</span><span class="p">();</span> <span class="n">fig</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
<span class="n">display</span><span class="o">.</span><span class="n">clear_output</span><span class="p">(</span><span class="n">wait</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">display</span><span class="o">.</span><span class="n">display</span><span class="p">(</span><span class="n">fig</span><span class="p">)</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="5.-Visualizing-the-optimized-volume">5. Visualizing the optimized volume<a class="anchor-link" href="#5.-Visualizing-the-optimized-volume"></a></h2><p>Finally, we visualize the optimized volume by rendering from multiple viewpoints that rotate around the volume's y-axis.</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="k">def</span> <span class="nf">generate_rotating_volume</span><span class="p">(</span><span class="n">volume_model</span><span class="p">,</span> <span class="n">n_frames</span> <span class="o">=</span> <span class="mi">50</span><span class="p">):</span>
<span class="n">logRs</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">zeros</span><span class="p">(</span><span class="n">n_frames</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
<span class="n">logRs</span><span class="p">[:,</span> <span class="mi">1</span><span class="p">]</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">linspace</span><span class="p">(</span><span class="mf">0.0</span><span class="p">,</span> <span class="mf">2.0</span> <span class="o">*</span> <span class="mf">3.14</span><span class="p">,</span> <span class="n">n_frames</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
<span class="n">Rs</span> <span class="o">=</span> <span class="n">so3_exp_map</span><span class="p">(</span><span class="n">logRs</span><span class="p">)</span>
<span class="n">Ts</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">zeros</span><span class="p">(</span><span class="n">n_frames</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
<span class="n">Ts</span><span class="p">[:,</span> <span class="mi">2</span><span class="p">]</span> <span class="o">=</span> <span class="mf">2.7</span>
<span class="n">frames</span> <span class="o">=</span> <span class="p">[]</span>
<span class="nb">print</span><span class="p">(</span><span class="s1">'Generating rotating volume ...'</span><span class="p">)</span>
<span class="k">for</span> <span class="n">R</span><span class="p">,</span> <span class="n">T</span> <span class="ow">in</span> <span class="nb">zip</span><span class="p">(</span><span class="n">tqdm</span><span class="p">(</span><span class="n">Rs</span><span class="p">),</span> <span class="n">Ts</span><span class="p">):</span>
<span class="n">camera</span> <span class="o">=</span> <span class="n">FoVPerspectiveCameras</span><span class="p">(</span>
<span class="n">R</span><span class="o">=</span><span class="n">R</span><span class="p">[</span><span class="kc">None</span><span class="p">],</span>
<span class="n">T</span><span class="o">=</span><span class="n">T</span><span class="p">[</span><span class="kc">None</span><span class="p">],</span>
<span class="n">znear</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">znear</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
<span class="n">zfar</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">zfar</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
<span class="n">aspect_ratio</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">aspect_ratio</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
<span class="n">fov</span> <span class="o">=</span> <span class="n">target_cameras</span><span class="o">.</span><span class="n">fov</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
<span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">,</span>
<span class="p">)</span>
<span class="n">frames</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">volume_model</span><span class="p">(</span><span class="n">camera</span><span class="p">)[</span><span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">clamp</span><span class="p">(</span><span class="mf">0.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">))</span>
<span class="k">return</span> <span class="n">torch</span><span class="o">.</span><span class="n">cat</span><span class="p">(</span><span class="n">frames</span><span class="p">)</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">no_grad</span><span class="p">():</span>
<span class="n">rotating_volume_frames</span> <span class="o">=</span> <span class="n">generate_rotating_volume</span><span class="p">(</span><span class="n">volume_model</span><span class="p">,</span> <span class="n">n_frames</span><span class="o">=</span><span class="mi">7</span><span class="o">*</span><span class="mi">4</span><span class="p">)</span>
<span class="n">image_grid</span><span class="p">(</span><span class="n">rotating_volume_frames</span><span class="o">.</span><span class="n">clamp</span><span class="p">(</span><span class="mf">0.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">)</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">(),</span> <span class="n">rows</span><span class="o">=</span><span class="mi">4</span><span class="p">,</span> <span class="n">cols</span><span class="o">=</span><span class="mi">7</span><span class="p">,</span> <span class="n">rgb</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">fill</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">plt</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="6.-Conclusion">6. Conclusion<a class="anchor-link" href="#6.-Conclusion"></a></h2><p>In this tutorial, we have shown how to optimize a 3D volumetric representation of a scene such that the renders of the volume from known viewpoints match the observed images for each viewpoint. The rendering was carried out using the PyTorch3D's volumetric renderer composed of an <code>NDCGridRaysampler</code> and an <code>EmissionAbsorptionRaymarcher</code>.</p>
</div>
</div>
</div>
</div></div></div></div></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2021 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_volume">Fit a volume via raymarching</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_simple_neural_radiance_field">Fit a simplified NeRF via raymarching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_volume">Fit a volume via raymarching</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_simple_neural_radiance_field">Fit a simplified NeRF via raymarching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_volume">Fit a volume via raymarching</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_simple_neural_radiance_field">Fit a simplified NeRF via raymarching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_volume">Fit a volume via raymarching</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_simple_neural_radiance_field">Fit a simplified NeRF via raymarching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_volume">Fit a volume via raymarching</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_simple_neural_radiance_field">Fit a simplified NeRF via raymarching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_volume">Fit a volume via raymarching</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_simple_neural_radiance_field">Fit a simplified NeRF via raymarching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {

View File

@ -6,7 +6,7 @@
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_volume">Fit a volume via raymarching</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_simple_neural_radiance_field">Fit a simplified NeRF via raymarching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {