Jeremy Francis Reizenstein aa880b723b v0.7.5
2023-10-31 13:08:44 -07:00

424 lines
41 KiB
HTML
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

<!DOCTYPE html><html lang=""><head><meta charSet="utf-8"/><meta http-equiv="X-UA-Compatible" content="IE=edge"/><title>PyTorch3D · A library for deep learning with 3D data</title><meta name="viewport" content="width=device-width, initial-scale=1.0"/><meta name="generator" content="Docusaurus"/><meta name="description" content="A library for deep learning with 3D data"/><meta property="og:title" content="PyTorch3D · A library for deep learning with 3D data"/><meta property="og:type" content="website"/><meta property="og:url" content="https://pytorch3d.org/"/><meta property="og:description" content="A library for deep learning with 3D data"/><meta property="og:image" content="https://pytorch3d.org/img/pytorch3dlogoicon.svg"/><meta name="twitter:card" content="summary"/><meta name="twitter:image" content="https://pytorch3d.org/img/pytorch3dlogoicon.svg"/><link rel="shortcut icon" href="/img/pytorch3dfavicon.png"/><link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/default.min.css"/><script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-157376881-1', 'auto');
ga('send', 'pageview');
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i></i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_volume">Fit a volume via raymarching</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_simple_neural_radiance_field">Fit a simplified NeRF via raymarching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Implicitron</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/implicitron_volumes">Training a custom volumes function with implicitron</a></li><li class="navListItem"><a class="navItem" href="/tutorials/implicitron_config_system">Implicitron config system deep dive</a></li></ul></div></div></section></div><script>
var coll = document.getElementsByClassName('collapsible');
var checkActiveCategory = true;
for (var i = 0; i < coll.length; i++) {
var links = coll[i].nextElementSibling.getElementsByTagName('*');
if (checkActiveCategory){
for (var j = 0; j < links.length; j++) {
if (links[j].classList.contains('navListItemActive')){
coll[i].nextElementSibling.classList.toggle('hide');
coll[i].childNodes[1].classList.toggle('rotate');
checkActiveCategory = false;
break;
}
}
}
coll[i].addEventListener('click', function() {
var arrow = this.childNodes[1];
arrow.classList.toggle('rotate');
var content = this.nextElementSibling;
content.classList.toggle('hide');
});
}
document.addEventListener('DOMContentLoaded', function() {
createToggler('#navToggler', '#docsNav', 'docsSliderActive');
createToggler('#tocToggler', 'body', 'tocActive');
var headings = document.querySelector('.toc-headings');
headings && headings.addEventListener('click', function(event) {
var el = event.target;
while(el !== headings){
if (el.tagName === 'A') {
document.body.classList.remove('tocActive');
break;
} else{
el = el.parentNode;
}
}
}, false);
function createToggler(togglerSelector, targetSelector, className) {
var toggler = document.querySelector(togglerSelector);
var target = document.querySelector(targetSelector);
if (!toggler) {
return;
}
toggler.onclick = function(event) {
event.preventDefault();
target.classList.toggle(className);
};
}
});
</script></nav></div><div class="container mainContainer"><div class="wrapper"><div class="tutorialButtonsWrapper"><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="https://colab.research.google.com/github/facebookresearch/pytorch3d/blob/stable/docs/tutorials/bundle_adjustment.ipynb" target="_blank"><img class="colabButton" align="left" src="/img/colab_icon.png"/>Run in Google Colab</a></div><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="/files/bundle_adjustment.ipynb" target="_blank"><svg aria-hidden="true" focusable="false" data-prefix="fas" data-icon="file-download" class="svg-inline--fa fa-file-download fa-w-12" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><path fill="currentColor" d="M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm76.45 211.36l-96.42 95.7c-6.65 6.61-17.39 6.61-24.04 0l-96.42-95.7C73.42 337.29 80.54 320 94.82 320H160v-80c0-8.84 7.16-16 16-16h32c8.84 0 16 7.16 16 16v80h65.18c14.28 0 21.4 17.29 11.27 27.36zM377 105L279.1 7c-4.5-4.5-10.6-7-17-7H256v128h128v-6.1c0-6.3-2.5-12.4-7-16.9z"></path></svg>Download Tutorial Jupyter Notebook</a></div><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="/files/bundle_adjustment.py" target="_blank"><svg aria-hidden="true" focusable="false" data-prefix="fas" data-icon="file-download" class="svg-inline--fa fa-file-download fa-w-12" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><path fill="currentColor" d="M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm76.45 211.36l-96.42 95.7c-6.65 6.61-17.39 6.61-24.04 0l-96.42-95.7C73.42 337.29 80.54 320 94.82 320H160v-80c0-8.84 7.16-16 16-16h32c8.84 0 16 7.16 16 16v80h65.18c14.28 0 21.4 17.29 11.27 27.36zM377 105L279.1 7c-4.5-4.5-10.6-7-17-7H256v128h128v-6.1c0-6.3-2.5-12.4-7-16.9z"></path></svg>Download Tutorial Source Code</a></div></div><div class="tutorialBody">
<script
src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js">
</script>
<script
src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js">
</script>
<div class="notebook">
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h1 id="Absolute-camera-orientation-given-set-of-relative-camera-pairs">Absolute camera orientation given set of relative camera pairs<a class="anchor-link" href="#Absolute-camera-orientation-given-set-of-relative-camera-pairs"></a></h1><p>This tutorial showcases the <code>cameras</code>, <code>transforms</code> and <code>so3</code> API.</p>
<p>The problem we deal with is defined as follows:</p>
<p>Given an optical system of $N$ cameras with extrinsics $\{g_1, ..., g_N | g_i \in SE(3)\}$, and a set of relative camera positions $\{g_{ij} | g_{ij}\in SE(3)\}$ that map between coordinate frames of randomly selected pairs of cameras $(i, j)$, we search for the absolute extrinsic parameters $\{g_1, ..., g_N\}$ that are consistent with the relative camera motions.</p>
<p>More formally:
$$
g_1, ..., g_N =
{\arg \min}_{g_1, ..., g_N} \sum_{g_{ij}} d(g_{ij}, g_i^{-1} g_j),
$$,
where $d(g_i, g_j)$ is a suitable metric that compares the extrinsics of cameras $g_i$ and $g_j$.</p>
<p>Visually, the problem can be described as follows. The picture below depicts the situation at the beginning of our optimization. The ground truth cameras are plotted in purple while the randomly initialized estimated cameras are plotted in orange:
<img alt="Initialization" src="https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/data/bundle_adjustment_initialization.png?raw=1"/></p>
<p>Our optimization seeks to align the estimated (orange) cameras with the ground truth (purple) cameras, by minimizing the discrepancies between pairs of relative cameras. Thus, the solution to the problem should look as follows:
<img alt="Solution" src="https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/data/bundle_adjustment_final.png?raw=1"/></p>
<p>In practice, the camera extrinsics $g_{ij}$ and $g_i$ are represented using objects from the <code>SfMPerspectiveCameras</code> class initialized with the corresponding rotation and translation matrices <code>R_absolute</code> and <code>T_absolute</code> that define the extrinsic parameters $g = (R, T); R \in SO(3); T \in \mathbb{R}^3$. In order to ensure that <code>R_absolute</code> is a valid rotation matrix, we represent it using an exponential map (implemented with <code>so3_exp_map</code>) of the axis-angle representation of the rotation <code>log_R_absolute</code>.</p>
<p>Note that the solution to this problem could only be recovered up to an unknown global rigid transformation $g_{glob} \in SE(3)$. Thus, for simplicity, we assume knowledge of the absolute extrinsics of the first camera $g_0$. We set $g_0$ as a trivial camera $g_0 = (I, \vec{0})$.</p>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="0.-Install-and-Import-Modules">0. Install and Import Modules<a class="anchor-link" href="#0.-Install-and-Import-Modules"></a></h2>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<p>Ensure <code>torch</code> and <code>torchvision</code> are installed. If <code>pytorch3d</code> is not installed, install it using the following cell:</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="kn">import</span> <span class="nn">os</span>
<span class="kn">import</span> <span class="nn">sys</span>
<span class="kn">import</span> <span class="nn">torch</span>
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
<span class="k">try</span><span class="p">:</span>
<span class="kn">import</span> <span class="nn">pytorch3d</span>
<span class="k">except</span> <span class="ne">ModuleNotFoundError</span><span class="p">:</span>
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s2">"2.1."</span><span class="p">)</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s2">"linux"</span><span class="p">):</span>
<span class="c1"># We try to install PyTorch3D via a released wheel.</span>
<span class="n">pyt_version_str</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="s2">"+"</span><span class="p">)[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">replace</span><span class="p">(</span><span class="s2">"."</span><span class="p">,</span> <span class="s2">""</span><span class="p">)</span>
<span class="n">version_str</span><span class="o">=</span><span class="s2">""</span><span class="o">.</span><span class="n">join</span><span class="p">([</span>
<span class="sa">f</span><span class="s2">"py3</span><span class="si">{</span><span class="n">sys</span><span class="o">.</span><span class="n">version_info</span><span class="o">.</span><span class="n">minor</span><span class="si">}</span><span class="s2">_cu"</span><span class="p">,</span>
<span class="n">torch</span><span class="o">.</span><span class="n">version</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">replace</span><span class="p">(</span><span class="s2">"."</span><span class="p">,</span><span class="s2">""</span><span class="p">),</span>
<span class="sa">f</span><span class="s2">"_pyt</span><span class="si">{</span><span class="n">pyt_version_str</span><span class="si">}</span><span class="s2">"</span>
<span class="p">])</span>
<span class="o">!</span>pip<span class="w"> </span>install<span class="w"> </span>fvcore<span class="w"> </span>iopath
<span class="o">!</span>pip<span class="w"> </span>install<span class="w"> </span>--no-index<span class="w"> </span>--no-cache-dir<span class="w"> </span>pytorch3d<span class="w"> </span>-f<span class="w"> </span>https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/<span class="o">{</span>version_str<span class="o">}</span>/download.html
<span class="k">else</span><span class="p">:</span>
<span class="c1"># We try to install PyTorch3D from source.</span>
<span class="o">!</span>pip<span class="w"> </span>install<span class="w"> </span><span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># imports</span>
<span class="kn">import</span> <span class="nn">torch</span>
<span class="kn">from</span> <span class="nn">pytorch3d.transforms.so3</span> <span class="kn">import</span> <span class="p">(</span>
<span class="n">so3_exp_map</span><span class="p">,</span>
<span class="n">so3_relative_angle</span><span class="p">,</span>
<span class="p">)</span>
<span class="kn">from</span> <span class="nn">pytorch3d.renderer.cameras</span> <span class="kn">import</span> <span class="p">(</span>
<span class="n">SfMPerspectiveCameras</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># add path for demo utils</span>
<span class="kn">import</span> <span class="nn">sys</span>
<span class="kn">import</span> <span class="nn">os</span>
<span class="n">sys</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">abspath</span><span class="p">(</span><span class="s1">''</span><span class="p">))</span>
<span class="c1"># set for reproducibility</span>
<span class="n">torch</span><span class="o">.</span><span class="n">manual_seed</span><span class="p">(</span><span class="mi">42</span><span class="p">)</span>
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">is_available</span><span class="p">():</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">"cuda:0"</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">"cpu"</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="s2">"WARNING: CPU only, this will be slow!"</span><span class="p">)</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<p>If using <strong>Google Colab</strong>, fetch the utils file for plotting the camera scene, and the ground truth camera positions:</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>wget<span class="w"> </span>https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/docs/tutorials/utils/camera_visualization.py
<span class="kn">from</span> <span class="nn">camera_visualization</span> <span class="kn">import</span> <span class="n">plot_camera_scene</span>
<span class="o">!</span>mkdir<span class="w"> </span>data
<span class="o">!</span>wget<span class="w"> </span>-P<span class="w"> </span>data<span class="w"> </span>https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/docs/tutorials/data/camera_graph.pth
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<p>OR if running <strong>locally</strong> uncomment and run the following cell:</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># from utils import plot_camera_scene</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="1.-Set-up-Cameras-and-load-ground-truth-positions">1. Set up Cameras and load ground truth positions<a class="anchor-link" href="#1.-Set-up-Cameras-and-load-ground-truth-positions"></a></h2>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># load the SE3 graph of relative/absolute camera positions</span>
<span class="n">camera_graph_file</span> <span class="o">=</span> <span class="s1">'./data/camera_graph.pth'</span>
<span class="p">(</span><span class="n">R_absolute_gt</span><span class="p">,</span> <span class="n">T_absolute_gt</span><span class="p">),</span> \
<span class="p">(</span><span class="n">R_relative</span><span class="p">,</span> <span class="n">T_relative</span><span class="p">),</span> \
<span class="n">relative_edges</span> <span class="o">=</span> \
<span class="n">torch</span><span class="o">.</span><span class="n">load</span><span class="p">(</span><span class="n">camera_graph_file</span><span class="p">)</span>
<span class="c1"># create the relative cameras</span>
<span class="n">cameras_relative</span> <span class="o">=</span> <span class="n">SfMPerspectiveCameras</span><span class="p">(</span>
<span class="n">R</span> <span class="o">=</span> <span class="n">R_relative</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">),</span>
<span class="n">T</span> <span class="o">=</span> <span class="n">T_relative</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">),</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">device</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># create the absolute ground truth cameras</span>
<span class="n">cameras_absolute_gt</span> <span class="o">=</span> <span class="n">SfMPerspectiveCameras</span><span class="p">(</span>
<span class="n">R</span> <span class="o">=</span> <span class="n">R_absolute_gt</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">),</span>
<span class="n">T</span> <span class="o">=</span> <span class="n">T_absolute_gt</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">),</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">device</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># the number of absolute camera positions</span>
<span class="n">N</span> <span class="o">=</span> <span class="n">R_absolute_gt</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="2.-Define-optimization-functions">2. Define optimization functions<a class="anchor-link" href="#2.-Define-optimization-functions"></a></h2><h3 id="Relative-cameras-and-camera-distance">Relative cameras and camera distance<a class="anchor-link" href="#Relative-cameras-and-camera-distance"></a></h3><p>We now define two functions crucial for the optimization.</p>
<p><strong><code>calc_camera_distance</code></strong> compares a pair of cameras. This function is important as it defines the loss that we are minimizing. The method utilizes the <code>so3_relative_angle</code> function from the SO3 API.</p>
<p><strong><code>get_relative_camera</code></strong> computes the parameters of a relative camera that maps between a pair of absolute cameras. Here we utilize the <code>compose</code> and <code>inverse</code> class methods from the PyTorch3D Transforms API.</p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="k">def</span> <span class="nf">calc_camera_distance</span><span class="p">(</span><span class="n">cam_1</span><span class="p">,</span> <span class="n">cam_2</span><span class="p">):</span>
<span class="w"> </span><span class="sd">"""</span>
<span class="sd"> Calculates the divergence of a batch of pairs of cameras cam_1, cam_2.</span>
<span class="sd"> The distance is composed of the cosine of the relative angle between </span>
<span class="sd"> the rotation components of the camera extrinsics and the l2 distance</span>
<span class="sd"> between the translation vectors.</span>
<span class="sd"> """</span>
<span class="c1"># rotation distance</span>
<span class="n">R_distance</span> <span class="o">=</span> <span class="p">(</span><span class="mf">1.</span><span class="o">-</span><span class="n">so3_relative_angle</span><span class="p">(</span><span class="n">cam_1</span><span class="o">.</span><span class="n">R</span><span class="p">,</span> <span class="n">cam_2</span><span class="o">.</span><span class="n">R</span><span class="p">,</span> <span class="n">cos_angle</span><span class="o">=</span><span class="kc">True</span><span class="p">))</span><span class="o">.</span><span class="n">mean</span><span class="p">()</span>
<span class="c1"># translation distance</span>
<span class="n">T_distance</span> <span class="o">=</span> <span class="p">((</span><span class="n">cam_1</span><span class="o">.</span><span class="n">T</span> <span class="o">-</span> <span class="n">cam_2</span><span class="o">.</span><span class="n">T</span><span class="p">)</span><span class="o">**</span><span class="mi">2</span><span class="p">)</span><span class="o">.</span><span class="n">sum</span><span class="p">(</span><span class="mi">1</span><span class="p">)</span><span class="o">.</span><span class="n">mean</span><span class="p">()</span>
<span class="c1"># the final distance is the sum</span>
<span class="k">return</span> <span class="n">R_distance</span> <span class="o">+</span> <span class="n">T_distance</span>
<span class="k">def</span> <span class="nf">get_relative_camera</span><span class="p">(</span><span class="n">cams</span><span class="p">,</span> <span class="n">edges</span><span class="p">):</span>
<span class="w"> </span><span class="sd">"""</span>
<span class="sd"> For each pair of indices (i,j) in "edges" generate a camera</span>
<span class="sd"> that maps from the coordinates of the camera cams[i] to </span>
<span class="sd"> the coordinates of the camera cams[j]</span>
<span class="sd"> """</span>
<span class="c1"># first generate the world-to-view Transform3d objects of each </span>
<span class="c1"># camera pair (i, j) according to the edges argument</span>
<span class="n">trans_i</span><span class="p">,</span> <span class="n">trans_j</span> <span class="o">=</span> <span class="p">[</span>
<span class="n">SfMPerspectiveCameras</span><span class="p">(</span>
<span class="n">R</span> <span class="o">=</span> <span class="n">cams</span><span class="o">.</span><span class="n">R</span><span class="p">[</span><span class="n">edges</span><span class="p">[:,</span> <span class="n">i</span><span class="p">]],</span>
<span class="n">T</span> <span class="o">=</span> <span class="n">cams</span><span class="o">.</span><span class="n">T</span><span class="p">[</span><span class="n">edges</span><span class="p">[:,</span> <span class="n">i</span><span class="p">]],</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">device</span><span class="p">,</span>
<span class="p">)</span><span class="o">.</span><span class="n">get_world_to_view_transform</span><span class="p">()</span>
<span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>
<span class="p">]</span>
<span class="c1"># compose the relative transformation as g_i^{-1} g_j</span>
<span class="n">trans_rel</span> <span class="o">=</span> <span class="n">trans_i</span><span class="o">.</span><span class="n">inverse</span><span class="p">()</span><span class="o">.</span><span class="n">compose</span><span class="p">(</span><span class="n">trans_j</span><span class="p">)</span>
<span class="c1"># generate a camera from the relative transform</span>
<span class="n">matrix_rel</span> <span class="o">=</span> <span class="n">trans_rel</span><span class="o">.</span><span class="n">get_matrix</span><span class="p">()</span>
<span class="n">cams_relative</span> <span class="o">=</span> <span class="n">SfMPerspectiveCameras</span><span class="p">(</span>
<span class="n">R</span> <span class="o">=</span> <span class="n">matrix_rel</span><span class="p">[:,</span> <span class="p">:</span><span class="mi">3</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">],</span>
<span class="n">T</span> <span class="o">=</span> <span class="n">matrix_rel</span><span class="p">[:,</span> <span class="mi">3</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">],</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">device</span><span class="p">,</span>
<span class="p">)</span>
<span class="k">return</span> <span class="n">cams_relative</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="3.-Optimization">3. Optimization<a class="anchor-link" href="#3.-Optimization"></a></h2><p>Finally, we start the optimization of the absolute cameras.</p>
<p>We use SGD with momentum and optimize over <code>log_R_absolute</code> and <code>T_absolute</code>.</p>
<p>As mentioned earlier, <code>log_R_absolute</code> is the axis angle representation of the rotation part of our absolute cameras. We can obtain the 3x3 rotation matrix <code>R_absolute</code> that corresponds to <code>log_R_absolute</code> with:</p>
<p><code>R_absolute = so3_exp_map(log_R_absolute)</code></p>
</div>
</div>
</div>
<div class="cell border-box-sizing code_cell rendered">
<div class="input">
<div class="prompt input_prompt">In [ ]:</div>
<div class="inner_cell">
<div class="input_area">
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># initialize the absolute log-rotations/translations with random entries</span>
<span class="n">log_R_absolute_init</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="n">N</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">float32</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
<span class="n">T_absolute_init</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="n">N</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">float32</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
<span class="c1"># furthermore, we know that the first camera is a trivial one </span>
<span class="c1"># (see the description above)</span>
<span class="n">log_R_absolute_init</span><span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="p">:]</span> <span class="o">=</span> <span class="mf">0.</span>
<span class="n">T_absolute_init</span><span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="p">:]</span> <span class="o">=</span> <span class="mf">0.</span>
<span class="c1"># instantiate a copy of the initialization of log_R / T</span>
<span class="n">log_R_absolute</span> <span class="o">=</span> <span class="n">log_R_absolute_init</span><span class="o">.</span><span class="n">clone</span><span class="p">()</span><span class="o">.</span><span class="n">detach</span><span class="p">()</span>
<span class="n">log_R_absolute</span><span class="o">.</span><span class="n">requires_grad</span> <span class="o">=</span> <span class="kc">True</span>
<span class="n">T_absolute</span> <span class="o">=</span> <span class="n">T_absolute_init</span><span class="o">.</span><span class="n">clone</span><span class="p">()</span><span class="o">.</span><span class="n">detach</span><span class="p">()</span>
<span class="n">T_absolute</span><span class="o">.</span><span class="n">requires_grad</span> <span class="o">=</span> <span class="kc">True</span>
<span class="c1"># the mask the specifies which cameras are going to be optimized</span>
<span class="c1"># (since we know the first camera is already correct, </span>
<span class="c1"># we only optimize over the 2nd-to-last cameras)</span>
<span class="n">camera_mask</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">ones</span><span class="p">(</span><span class="n">N</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">float32</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
<span class="n">camera_mask</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">=</span> <span class="mf">0.</span>
<span class="c1"># init the optimizer</span>
<span class="n">optimizer</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">optim</span><span class="o">.</span><span class="n">SGD</span><span class="p">([</span><span class="n">log_R_absolute</span><span class="p">,</span> <span class="n">T_absolute</span><span class="p">],</span> <span class="n">lr</span><span class="o">=</span><span class="mf">.1</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="mf">0.9</span><span class="p">)</span>
<span class="c1"># run the optimization</span>
<span class="n">n_iter</span> <span class="o">=</span> <span class="mi">2000</span> <span class="c1"># fix the number of iterations</span>
<span class="k">for</span> <span class="n">it</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">n_iter</span><span class="p">):</span>
<span class="c1"># re-init the optimizer gradients</span>
<span class="n">optimizer</span><span class="o">.</span><span class="n">zero_grad</span><span class="p">()</span>
<span class="c1"># compute the absolute camera rotations as </span>
<span class="c1"># an exponential map of the logarithms (=axis-angles)</span>
<span class="c1"># of the absolute rotations</span>
<span class="n">R_absolute</span> <span class="o">=</span> <span class="n">so3_exp_map</span><span class="p">(</span><span class="n">log_R_absolute</span> <span class="o">*</span> <span class="n">camera_mask</span><span class="p">)</span>
<span class="c1"># get the current absolute cameras</span>
<span class="n">cameras_absolute</span> <span class="o">=</span> <span class="n">SfMPerspectiveCameras</span><span class="p">(</span>
<span class="n">R</span> <span class="o">=</span> <span class="n">R_absolute</span><span class="p">,</span>
<span class="n">T</span> <span class="o">=</span> <span class="n">T_absolute</span> <span class="o">*</span> <span class="n">camera_mask</span><span class="p">,</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">device</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># compute the relative cameras as a composition of the absolute cameras</span>
<span class="n">cameras_relative_composed</span> <span class="o">=</span> \
<span class="n">get_relative_camera</span><span class="p">(</span><span class="n">cameras_absolute</span><span class="p">,</span> <span class="n">relative_edges</span><span class="p">)</span>
<span class="c1"># compare the composed cameras with the ground truth relative cameras</span>
<span class="c1"># camera_distance corresponds to $d$ from the description</span>
<span class="n">camera_distance</span> <span class="o">=</span> \
<span class="n">calc_camera_distance</span><span class="p">(</span><span class="n">cameras_relative_composed</span><span class="p">,</span> <span class="n">cameras_relative</span><span class="p">)</span>
<span class="c1"># our loss function is the camera_distance</span>
<span class="n">camera_distance</span><span class="o">.</span><span class="n">backward</span><span class="p">()</span>
<span class="c1"># apply the gradients</span>
<span class="n">optimizer</span><span class="o">.</span><span class="n">step</span><span class="p">()</span>
<span class="c1"># plot and print status message</span>
<span class="k">if</span> <span class="n">it</span> <span class="o">%</span> <span class="mi">200</span><span class="o">==</span><span class="mi">0</span> <span class="ow">or</span> <span class="n">it</span><span class="o">==</span><span class="n">n_iter</span><span class="o">-</span><span class="mi">1</span><span class="p">:</span>
<span class="n">status</span> <span class="o">=</span> <span class="s1">'iteration=</span><span class="si">%3d</span><span class="s1">; camera_distance=</span><span class="si">%1.3e</span><span class="s1">'</span> <span class="o">%</span> <span class="p">(</span><span class="n">it</span><span class="p">,</span> <span class="n">camera_distance</span><span class="p">)</span>
<span class="n">plot_camera_scene</span><span class="p">(</span><span class="n">cameras_absolute</span><span class="p">,</span> <span class="n">cameras_absolute_gt</span><span class="p">,</span> <span class="n">status</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="s1">'Optimization finished.'</span><span class="p">)</span>
</pre></div>
</div>
</div>
</div>
</div>
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="4.-Conclusion">4. Conclusion<a class="anchor-link" href="#4.-Conclusion"></a></h2><p>In this tutorial we learnt how to initialize a batch of SfM Cameras, set up loss functions for bundle adjustment, and run an optimization loop.</p>
</div>
</div>
</div>
</div></div></div></div></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2023 Meta Platforms, Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>