diff --git a/css/main.css b/css/main.css index e91e7f45..a0384b60 100644 --- a/css/main.css +++ b/css/main.css @@ -1 +1 @@ -a,abbr,acronym,address,applet,article,aside,audio,b,big,blockquote,body,canvas,caption,center,cite,code,dd,del,details,dfn,div,dl,dt,em,embed,fieldset,figcaption,figure,footer,form,h1,h2,h3,h4,h5,h6,header,hgroup,html,i,iframe,img,ins,kbd,label,legend,li,mark,menu,nav,object,ol,output,p,pre,q,ruby,s,samp,section,small,span,strike,strong,sub,summary,sup,table,tbody,td,tfoot,th,thead,time,tr,tt,u,ul,var,video{border:0;font:inherit;font-size:100%;margin:0;padding:0;vertical-align:baseline}body{color:#24292e;font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Helvetica,Arial,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol;font-size:16px;line-height:1.5;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%;word-wrap:break-word}*{box-sizing:border-box}b,strong{font-weight:600}em,i{font-style:italic}[type=checkbox]{box-sizing:border-box;padding:0}a,a:hover{color:#812ce5;text-decoration:none}a:active,a:hover{outline-width:0}a:not([href]){color:inherit;text-decoration:none}p{margin-bottom:1em;margin-top:0}h1,h2,h3,h4,h5,h6{color:inherit;font-weight:600;line-height:1.25;margin-bottom:16px;margin-top:1.5em}h1{font-size:32px}h2{font-size:24px}h3{font-size:20px}h4{font-size:16px}h5{font-size:14px}h6{font-size:13.6px}ol,ul{margin-bottom:1em;margin-top:0;padding-left:2em}ol ol,ul ol{list-style-type:lower-roman}ol ol,ol ul,ul ol,ul ul{margin-bottom:0;margin-top:0}ol ol ol,ol ul ol,ul ol ol,ul ul ol{list-style-type:lower-alpha}li{word-wrap:break-all}li>p{margin-top:1em}li+li{margin-top:.25em}img{border-style:none;box-sizing:content-box;max-width:100%}img[align=right]{padding-left:1.25em}img[align=left]{padding-right:1.25em}table{border-collapse:collapse;border-spacing:0;display:block;margin-bottom:16px;margin-top:0;overflow:auto;width:100%}table tr{background-color:transparent;border-top:1px solid #dfe2e5}table tr:nth-child(2n){background-color:#f6f8fa}table td,table th{border:1px solid #dfe2e5;padding:6px 13px}table th{background-color:inherit;font-weight:600}table td,table th{color:inherit}blockquote{color:#6a737d;font-size:16px;margin:0 0 16px;padding:0 1em}blockquote>:first-child{margin-top:0}blockquote>:last-child{margin-bottom:0}code{background-color:rgba(27,31,35,.05);border-radius:3px;color:inherit;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-size:85%;margin:0;padding:3.2px 6.4px}pre{margin-bottom:16px}pre code{background-color:transparent;border:0;display:inline;font-size:85%;line-height:inherit;margin:0;max-width:auto;overflow:visible;padding:0;white-space:pre;word-break:normal;word-wrap:normal}kbd{background-color:#fafbfc;border:1px solid #d1d5da;border-bottom-color:#c6cbd1;border-radius:3px;box-shadow:inset 0 -1px 0 #c6cbd1;color:#444d56;display:inline-block;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-size:68.75%;line-height:10px;padding:3px 5px;vertical-align:middle}hr{border:1px solid #dfe2e5;box-sizing:content-box;margin:1.5em 0;overflow:hidden;padding:0}hr:after,hr:before{content:"";display:table}hr:after{clear:both}body{background-color:#fff;min-height:100vh;text-rendering:optimizeLegibility}@media only screen and (min-width:736px){body{display:flex;flex-direction:column}}article:after,article:before{content:"";display:table}article:after{clear:both}article>:first-child{margin-top:0}article>:last-child{margin-bottom:0}article iframe,article p img{display:block;margin-left:auto;margin-right:auto;max-width:100%}.anchor{display:block;position:relative;top:-80px}.hash-link{line-height:1;margin-left:-20px;opacity:0;padding-right:4px;transition:opacity .3s}.hash-link:hover{opacity:1!important;transition:none}.hash-link .hash-link-icon{vertical-align:middle}.button{border:1px solid #812ce5;border-radius:3px;color:#812ce5;display:inline-block;font-size:14px;font-weight:400;line-height:1.2em;padding:10px;text-decoration:none!important;text-transform:uppercase;transition:background .3s,color .3s}.button:hover{background:#812ce5;color:#fff}h1:hover .hash-link,h2:hover .hash-link,h3:hover .hash-link,h4:hover .hash-link{opacity:.5;transition:none}blockquote{background-color:rgba(255,229,100,.3);border-left:8px solid #ffe564;padding:15px 30px 15px 15px}.wrapper{margin:0 auto;max-width:1100px;padding:0 20px}.wrapper blockquote>p:first-child{padding-top:0}.center{display:block}.center,.homeContainer{text-align:center}.homeContainer .homeWrapper{padding:2em 10px}.homeContainer .homeWrapper .wrapper{margin:0 auto;max-width:900px;padding:0 20px}.homeContainer .homeWrapper .projectLogo img{height:100px;margin-bottom:0}.homeContainer .homeWrapper #project_title{font-size:300%;letter-spacing:-.08em;line-height:1em;margin-bottom:80px}.homeContainer .homeWrapper #project_tagline{font-size:200%;letter-spacing:-.04em;line-height:1em}.projectLogo{display:none;pointer-events:none}.projectLogo img{height:100px;margin-bottom:0}.projectIntro{margin:40px 0}.projectTitle{color:#812ce5;font-size:250%;line-height:1em}.projectTitle>small{display:block;font-weight:400;font-size:50%;line-height:1em;margin:.7em 0 1.3em}@media only screen and (min-width:480px){.projectTitle{font-size:300%;margin:.3em 0}.projectLogo img{height:200px;margin-bottom:10px}.homeContainer .homeWrapper{padding-left:10px;padding-right:10px}}@media only screen and (min-width:736px){.homeContainer .homeWrapper{position:relative}.homeContainer .homeWrapper #inner{max-width:600px;padding-right:40px}}@media only screen and (min-width:1200px){.homeContainer .homeWrapper #inner{max-width:750px}.homeContainer .homeWrapper .projectLogo{align-items:center;bottom:0;display:flex;justify-content:flex-end;left:0;padding:2em 100px 4em;position:absolute;right:0;top:0}.homeContainer .homeWrapper .projectLogo img{height:100%;max-height:250px}}@media only screen and (min-width:1500px){.homeContainer .homeWrapper #inner{max-width:1100px;padding-bottom:40px;padding-top:40px}.wrapper{max-width:1400px}}.mainContainer{flex:1 1 0%;max-width:100%;padding:40px 0}.mainContainer .wrapper{text-align:left}.mainContainer .wrapper .allShareBlock{padding:10px 0}.mainContainer .wrapper .allShareBlock .pluginBlock{margin:12px 0;padding:0}.mainContainer .wrapper .post{position:relative}.mainContainer .wrapper .post.basicPost{margin-top:30px}.mainContainer .wrapper .post .postHeader{margin-bottom:16px}.mainContainer .wrapper .post .postHeaderTitle{margin-top:0;padding:0}.docsContainer .wrapper .post .postHeader:before,.docsContainer .wrapper .post .postHeaderTitle:before{content:"";display:block;height:90px;margin-top:-90px;visibility:hidden;pointer-events:none}.mainContainer .wrapper .post .postSocialPlugins{padding-top:1em}.mainContainer .wrapper .post .docPagination{background:#812ce5;bottom:0;left:0;position:absolute;right:0}.mainContainer .wrapper .post .docPagination .pager{display:inline-block;width:50%}.mainContainer .wrapper .post .docPagination .pagingNext{float:right;text-align:right}.mainContainer .wrapper .post .docPagination a{border:none;color:#fff;display:block;padding:4px 12px}.mainContainer .wrapper .post .docPagination a:hover{background-color:#f9f9f9;color:#393939}.mainContainer .wrapper .post .docPagination a .pagerLabel{display:inline}.mainContainer .wrapper .post .docPagination a .pagerTitle{display:none}@media only screen and (min-width:480px){.mainContainer .wrapper .post .docPagination a .pagerLabel{display:none}.mainContainer .wrapper .post .docPagination a .pagerTitle{display:inline}}@media only screen and (min-width:1024px){.mainContainer .wrapper .post{display:block}.mainContainer .wrapper .posts .post{width:100%}}@media only screen and (max-width:1023px){.docsContainer .wrapper .post .postHeader:before,.docsContainer .wrapper .post .postHeaderTitle:before{content:"";display:block;height:200px;margin-top:-200px;visibility:hidden;pointer-events:none}}.fixedHeaderContainer{background:#812ce5;color:#fff;min-height:50px;padding:8px 0;position:fixed;width:100%;z-index:9999;transform:translateZ(0)}@media only screen and (min-width:1024px){.fixedHeaderContainer{flex-shrink:0}}.fixedHeaderContainer a{align-items:center;border:0;color:#fff;display:flex;flex-flow:row nowrap;height:34px;z-index:10000}.fixedHeaderContainer header{display:flex;flex-flow:row nowrap;position:relative;text-align:left}.fixedHeaderContainer header img{height:100%;margin-right:10px}.fixedHeaderContainer header .headerTitle{font-size:1.25em;margin:0}.fixedHeaderContainer header .headerTitleWithLogo{font-size:1.25em;line-height:18px;margin:0;position:relative;z-index:9999}.fixedHeaderContainer header h3{color:#fff;font-size:16px;margin:0 0 0 10px;text-decoration:underline}@media (max-width:480px){.headerTitle{font-size:17px}.headerTitleWithLogo{display:none!important}}.promoSection{display:flex;flex-flow:column wrap;font-size:125%;line-height:1.6em;position:relative;z-index:99}.promoSection .promoRow{padding:10px 0}.promoSection .promoRow .pluginWrapper{display:block}.promoSection .promoRow .pluginWrapper.ghStarWrapper,.promoSection .promoRow .pluginWrapper.ghWatchWrapper{height:28px}.promoSection .promoRow .pluginRowBlock{display:flex;flex-wrap:wrap;justify-content:center;margin:0 -2px}.promoSection .promoRow .pluginRowBlock .pluginWrapper{padding:0 2px}.promoSection .promoRow .pluginRowBlock iframe{margin-left:2px;margin-top:5px}input[type=search]{-moz-appearance:none;-webkit-appearance:none}.navSearchWrapper{align-items:center;align-self:center;display:flex;justify-content:center;padding-left:10px;position:absolute;right:10px;top:10px}.navSearchWrapper:before{border:3px solid #e5e5e5;border-radius:50%;content:" ";display:block;height:6px;left:15px;position:absolute;top:50%;transform:translateY(-58%);width:6px;z-index:1}.navSearchWrapper:after{background:#e5e5e5;content:" ";height:7px;left:24px;position:absolute;top:55%;transform:rotate(-45deg);width:3px;z-index:1}.navSearchWrapper .aa-dropdown-menu{background:#f9f9f9;border:3px solid rgba(57,57,57,.25);color:#393939;font-size:14px;left:auto!important;line-height:1.2em;right:0!important}.navSearchWrapper .aa-dropdown-menu .algolia-docsearch-suggestion--category-header{background:#812ce5;color:#fff;font-size:14px;font-weight:400}.navSearchWrapper .aa-dropdown-menu .algolia-docsearch-suggestion--category-header .algolia-docsearch-suggestion--highlight{background-color:#812ce5;color:#fff}.navSearchWrapper .aa-dropdown-menu .algolia-docsearch-suggestion--subcategory-column .algolia-docsearch-suggestion--highlight,.navSearchWrapper .aa-dropdown-menu .algolia-docsearch-suggestion--title .algolia-docsearch-suggestion--highlight{color:#812ce5}.navSearchWrapper .aa-dropdown-menu .algolia-docsearch-suggestion--subcategory-column,.navSearchWrapper .aa-dropdown-menu .algolia-docsearch-suggestion__secondary{border-color:rgba(57,57,57,.3)}input#search_input_react{background-color:rgba(0,0,0,.2);border:none;border-radius:20px;color:#fff;font-size:14px;font-weight:300;line-height:20px;outline:none;padding-left:25px;position:relative;transition:width .5s ease;width:170px}.navSearchWrapper:before{left:24px}.navSearchWrapper:after{left:35px}input#search_input_react:active,input#search_input_react:focus{color:#fff;width:220px}.navigationSlider .slidingNav .navSearchWrapper .algolia-docsearch-footer a{height:auto}@media only screen and (max-width:735px){.navSearchWrapper{width:40%}}input::-moz-placeholder{color:#e5e5e5}input:-ms-input-placeholder{color:#e5e5e5}input::placeholder{color:#e5e5e5}.hljs{padding:1.25rem 1.5rem}.gridBlock{padding:0}.gridBlock>*{box-sizing:border-box}.gridBlock .fourByGridBlock img,.gridBlock .threeByGridBlock img,.gridBlock .twoByGridBlock img{max-width:100%}.gridBlock .gridClear{clear:both}@media only screen and (max-width:735px){.gridBlock .fourByGridBlock{flex:1 0 26%}}@media only screen and (min-width:736px){.gridBlock{display:flex;flex-direction:row;flex-wrap:wrap}.gridBlock>*{margin:0 12px}.gridBlock>:first-child{margin-left:0}.gridBlock>:last-child{margin-right:0}.gridBlock .twoByGridBlock{flex:1 0 40%}.gridBlock .threeByGridBlock{flex:1 0 26%}.gridBlock .fourByGridBlock{flex:1 0 20%}h2+.gridBlock{padding-top:20px}}@media only screen and (min-width:1400px){.gridBlock{display:flex;flex-direction:row;flex-wrap:wrap}}.alignCenter{text-align:center}.alignRight{text-align:right}.imageAlignSide{display:flex;flex-flow:row wrap}.blockImage{max-width:730px}.imageAlignSide .blockImage{flex:0 1 500px;max-width:500px}@media only screen and (max-width:735px){.imageAlignSide .blockImage{display:none}}.imageAlignSide .blockContent{flex:1 1}.imageAlignBottom .blockImage{margin:0 auto 20px;max-width:730px}.imageAlignBottom.alignCenter .blockImage{margin-left:auto;margin-right:auto}.imageAlignTop .blockImage{max-width:80px}.imageAlignTop.alignCenter .blockImage{margin-left:auto;margin-right:auto}.imageAlignRight .blockImage{margin-left:40px}.imageAlignLeft .blockImage{margin-right:40px}.container .gridBlock .blockContent p{padding:0}.container .wrapper .alignCenter h2{text-align:center}.container .wrapper .imageAlignSide h2{text-align:left}.container .wrapper .imageAlignSide p{margin:0 0 40px;max-width:560px}.highlightBackground{background:rgba(153,66,79,.7);color:#fff}.highlightBackground a{font-weight:800}.container.highlightBackground .wrapper h1,.container.highlightBackground .wrapper h2,.container.highlightBackground .wrapper h3,.container.highlightBackground .wrapper h4,.container.highlightBackground .wrapper h5,.highlightBackground a{border-color:#fff;color:#fff}.lightBackground{background:#f7f7f7}.darkBackground{background:grey;color:#fff}.darkBackground a,.darkBackground code{color:#d6b3b8}.container.darkBackground .wrapper h1,.container.darkBackground .wrapper h2,.container.darkBackground .wrapper h3,.container.darkBackground .wrapper h4,.container.darkBackground .wrapper h5{border-color:#fff;color:#fff}.container.paddingAll{padding:40px}.container.paddingBottom{padding-bottom:80px}.container.paddingLeft{padding-left:40px}.container.paddingRight{padding-right:40px}.container.paddingTop{padding-top:80px}@media only screen and (max-width:735px){.container.paddingBottom{padding-bottom:40px}.container.paddingTop{padding-top:20px}}@media only screen and (max-width:1023px){.responsiveList .blockContent{position:relative}.responsiveList .blockContent>div{padding-left:20px}.responsiveList .blockContent:before{content:"\2022";position:absolute}}.navigationSlider .navSlideout{cursor:pointer;padding-top:4px;position:absolute;right:10px;top:0;transition:top .3s;z-index:101}.navigationSlider .slidingNav{bottom:auto;box-sizing:border-box;left:0;position:fixed;right:0;top:0}.navigationSlider .slidingNav.slidingNavActive{height:auto;padding-top:42px;width:300px}.navigationSlider .slidingNav ul{background:#ffaf00;box-sizing:border-box;color:#fff;display:flex;flex-wrap:nowrap;list-style:none;margin-top:50px;padding:0;width:100%}.navigationSlider .slidingNav.slidingNavActive ul{display:block}.navigationSlider .slidingNav ul li{flex:1 1 auto;margin:0;text-align:center;white-space:nowrap}.navigationSlider .slidingNav ul li a{align-items:center;box-sizing:border-box;color:#812ce5;color:inherit;display:flex;font-size:.9em;height:auto;height:50px;justify-content:center;margin:0;padding:10px;transition:background-color .3s}.navigationSlider .slidingNav ul li.siteNavGroupActive>a,.navigationSlider .slidingNav ul li.siteNavItemActive>a,.navigationSlider .slidingNav ul li>a:focus,.navigationSlider .slidingNav ul li>a:hover{background-color:#812ce5}.languages-icon{width:20px}#languages-dropdown{pointer-events:none;position:absolute;width:100%}#languages-dropdown.visible{display:flex}#languages-dropdown.hide{display:none}#languages-dropdown-items{background-color:#812ce5;display:flex;flex-direction:column;min-width:120px;pointer-events:all}#languages li{display:block}.navPusher{left:0;min-height:100%;padding-top:100px;position:relative;z-index:99}.singleRowMobileNav.navPusher{padding-top:50px}.navPusher:after{background:rgba(0,0,0,.4);content:"";height:0;opacity:0;position:absolute;right:0;top:0;transition:opacity .5s,width .1s .5s,height .1s .5s;width:0}@media screen and (min-width:1024px){.navPusher{display:flex;flex-direction:column;min-height:calc(100vh - 50px);padding-top:50px}.navPusher,.navPusher>:first-child{flex-grow:1}}.sliderActive .navPusher:after{height:100%;opacity:1;transition:opacity .5s;width:100%;z-index:100}@media only screen and (max-width:1024px){.reactNavSearchWrapper input#search_input_react{background-color:rgba(242,196,178,.25);border:none;border-radius:20px;box-sizing:border-box;color:#393939;font-size:14px;line-height:20px;outline:none;padding-left:38px;position:relative;transition:background-color .2s cubic-bezier(.68,-.55,.265,1.55),width .2s cubic-bezier(.68,-.55,.265,1.55),color .2s ease;width:100%;height:30px}.reactNavSearchWrapper input#search_input_react:active,.reactNavSearchWrapper input#search_input_react:focus{background-color:#812ce5;color:#fff}.reactNavSearchWrapper .algolia-docsearch-suggestion--subcategory-inline{display:none}.reactNavSearchWrapper>span{width:100%}.reactNavSearchWrapper .aa-dropdown-menu{font-size:12px;line-height:2em;padding:0;border-width:1px;min-width:500px}.reactNavSearchWrapper .algolia-docsearch-suggestion__secondary{border-top:none}.aa-suggestions{min-height:140px;max-height:60vh;-webkit-overflow-scrolling:touch;overflow-y:scroll}#languages-dropdown{left:0;top:50px}#languages-dropdown-items{background-color:#812ce5;display:flex;flex-direction:row}}@media only screen and (min-width:1024px){.navSearchWrapper{padding-left:10px;position:relative;right:auto;top:auto}.reactNavSearchWrapper input#search_input_react{height:100%;padding-top:8px;padding-bottom:8px;padding-left:38px}.navSearchWrapper .algolia-autocomplete{display:block}.navigationSlider{height:34px;margin-left:auto;position:relative}.navigationSlider .navSlideout{display:none}.navigationSlider nav.slidingNav{background:none;height:auto;position:relative;right:auto;top:auto;width:auto}.navigationSlider .slidingNav ul{background:none;display:flex;flex-flow:row nowrap;margin:0;padding:0;width:auto}.navigationSlider .slidingNav ul li a{border:0;color:hsla(0,0%,100%,.8);display:flex;font-size:16px;font-size:1em;font-weight:300;height:32px;line-height:1.2em;margin:0;padding:6px 10px}.navigationSlider .slidingNav ul li.siteNavGroupActive a,.navigationSlider .slidingNav ul li.siteNavItemActive a,.navigationSlider .slidingNav ul li a:hover{color:#fff}}@media only screen and (max-width:735px){.navigationSlider .slidingNav ul{overflow-x:auto}.navigationSlider .slidingNav ul::-webkit-scrollbar{display:none}.reactNavSearchWrapper .aa-dropdown-menu{min-width:400px}}@media only screen and (max-width:475px){.reactNavSearchWrapper .aa-dropdown-menu{min-width:300px}}.docMainWrapper .wrapper{padding-left:0;padding-right:0;padding-top:10px}@media only screen and (min-width:1024px){.docMainWrapper{width:100%}.docMainWrapper>*{margin:0 24px}.docMainWrapper>:first-child{margin-left:0}.docMainWrapper>:last-child{margin-right:0}.docMainWrapper .mainContainer{min-width:0}}.edit-page-link{float:right;font-size:10px;font-weight:400;margin-top:3px;text-decoration:none}@media only screen and (max-width:1023px){.edit-page-link{display:none}}.docLastUpdate{font-size:13px;font-style:italic;margin:20px 0;text-align:right}.docs-prevnext{margin:20px 0}.docs-prevnext:after{clear:both;content:" ";display:table}.docs-next{float:right}.docs-prev{float:left}@media only screen and (max-width:735px){.docs-next{clear:both;float:left}.docs-next,.docs-prev{margin:10px 0}.arrow-next{float:right;margin-left:10px}.arrow-prev{float:left;margin-right:10px}.function-name-prevnext{width:200px;display:inline-block;white-space:nowrap;overflow:hidden;text-overflow:ellipsis}}.hide{display:none}.collapsible{cursor:pointer}.collapsible .arrow{float:right;margin-right:8px;margin-top:-4px;transform:rotate(90deg);transition:transform .2s linear}.collapsible .arrow.rotate{transform:rotate(180deg)}@media only screen and (max-width:1023px){.docsNavContainer{background:#fff;left:0;position:fixed;width:100%;z-index:100}}@media only screen and (min-width:1024px){.docsNavContainer{flex:0 0 240px;height:calc(100vh - 50px);position:-webkit-sticky;position:sticky;overflow-y:auto;top:50px}}.docsSliderActive.docsNavContainer{box-sizing:border-box;height:100%;-webkit-overflow-scrolling:touch;overflow-y:auto;-ms-scroll-chaining:none;overscroll-behavior:contain;padding-bottom:50px}.docsNavContainer .toc .navBreadcrumb{background-color:#f1f1f1;box-sizing:border-box;display:flex;flex-flow:row nowrap;font-size:12px;height:48px;overflow:hidden;padding:8px 20px}.docsNavContainer .toc .navWrapper{padding:0}@media only screen and (min-width:1024px){.docsNavContainer .toc .navBreadcrumb{display:none}.navBreadcrumb h2{padding:0 10px}.separateOnPageNav .docsNavContainer{flex:0 0 240px}}.navBreadcrumb a,.navBreadcrumb span{border:0;color:#393939}@media only screen and (max-width:735px){.anchor{top:-144px}}@media only screen and (min-width:1024px){.toc{padding:40px 0}}.toc section{padding:0;position:relative}.toc section .navGroups{display:none;padding:48px 20px 60px}.toc .toggleNav{color:#393939;position:relative}.toc .toggleNav .navToggle{cursor:pointer;height:32px;margin-right:10px;position:relative;text-align:left;width:18px}.hamburger-menu{position:absolute;top:6px;width:100%}.line1,.line2,.line3{width:100%;height:3px;background-color:#393939;margin:3px 0;transition:.4s;border-radius:10px}.docsSliderActive .hamburger-menu{top:12px}.docsSliderActive .line1{position:absolute;top:50%;transform:rotate(-45deg)}.docsSliderActive .line2{display:none}.docsSliderActive .line3{position:absolute;top:50%;transform:rotate(45deg)}.toggleNav h2 i{padding:0 4px}.toc .toggleNav .navGroup{margin-bottom:16px}.toc .toggleNav .subNavGroup{margin-bottom:0}.toc .toggleNav .navGroup .navGroupCategoryTitle{color:#393939;font-size:18px;font-weight:500;line-height:1.2em;margin-bottom:8px;margin-top:0}.toc .toggleNav .navGroup .navGroupSubcategoryTitle{color:#393939;font-size:14px;font-weight:500;line-height:1.5;margin-bottom:0;margin-top:0;padding:4px 0}.toc .toggleNav .navGroup .navListItem{margin:0}.toc .toggleNav .navGroup h3 i:not(:empty){box-sizing:border-box;color:rgba(57,57,57,.5);display:inline-block;height:16px;margin-right:10px;text-align:center;transition:color .2s;width:16px}.toc .toggleNav ul{padding:0 8px}.docsSliderActive .toc .toggleNav ul{padding-left:0}.toc .toggleNav ul li{list-style-type:none;padding:0}.toc .toggleNav ul li a{border:none;color:#717171;display:block;font-size:14px;padding:4px 0;transition:color .3s}.toc .toggleNav ul li.navListItemActive a,.toc .toggleNav ul li a:focus,.toc .toggleNav ul li a:hover{color:#812ce5}.docsSliderActive .toc .navBreadcrumb,.tocActive .navBreadcrumb{border-bottom:1px solid #ccc;margin-bottom:20px;position:fixed;width:100%}.toc .toggleNav .navBreadcrumb h2{border:0;flex-grow:1;font-size:16px;font-weight:600;line-height:32px;margin:0;padding:0}.docsSliderActive .toc section .navGroups{display:block;padding-top:60px}.tocToggler{cursor:pointer;height:32px;line-height:32px;margin-right:-10px;padding:0 10px}.icon-toc{box-sizing:border-box;display:inline-block;line-height:normal;position:relative;top:-1px;vertical-align:middle}.icon-toc,.icon-toc:after,.icon-toc:before{background-color:currentColor;border:1px solid;border-radius:50%;box-sizing:border-box;height:4px;width:4px}.icon-toc:after,.icon-toc:before{content:"";position:absolute}.icon-toc:before{left:-1px;top:-7px}.icon-toc:after{left:-1px;top:5px}.tocActive .icon-toc{border-radius:0;height:16px;transform:rotate(45deg);width:3px}.tocActive .icon-toc:before{border-radius:0;height:3px;left:50%;top:50%;transform:translate(-50%,-50%);width:16px}.tocActive .icon-toc:after{content:""}@media only screen and (min-width:1024px){.docMainWrapper{display:flex;flex-flow:row nowrap}.docMainWrapper .wrapper{padding-top:0;padding-left:0;padding-right:0}}.onPageNav{display:none;margin-bottom:40px}.onPageNav::-webkit-scrollbar{width:7px}.onPageNav::-webkit-scrollbar-track{background:#f1f1f1;border-radius:10px}.onPageNav::-webkit-scrollbar-thumb{background:#888;border-radius:10px}.onPageNav::-webkit-scrollbar-thumb:hover{background:#555}.onPageNav a{color:#717171}.onPageNav .toc-headings>li>a.active,.onPageNav .toc-headings>li>a.hover{font-weight:600;color:#812ce5}.onPageNav ul{list-style:none}.onPageNav ul li{font-size:12px;line-height:16px;padding-bottom:8px}.onPageNav ul ul{padding:8px 0 0 20px}.onPageNav ul ul li{padding-bottom:5px}@media only screen and (min-width:1024px){.toc section .navGroups{display:block;padding:8px 0 0}.navBreadcrumb h2{padding:0 10px}}@supports ((position: -webkit-sticky) or (position: sticky)){@media only screen and (max-width:1023px){.tocActive .onPageNav{background:#fff;bottom:0;display:block;left:0;overflow-y:auto;-ms-scroll-chaining:none;overscroll-behavior:contain;padding:0 20px;position:fixed;right:0;top:148px;z-index:10;margin-bottom:0}.tocActive .singleRowMobileNav .onPageNav{top:98px}.tocActive .navBreadcrumb h2,.tocActive .navToggle{visibility:hidden}.tocActive .onPageNav>.toc-headings{padding:12px 0}}@media only screen and (min-width:1024px){.separateOnPageNav .headerWrapper.wrapper,.separateOnPageNav .wrapper{max-width:1400px}.separateOnPageNav .toc{width:auto}.separateOnPageNav.sideNavVisible .navPusher .mainContainer{flex:1 auto;max-width:100%;min-width:0}.onPageNav{align-self:flex-start;display:block;flex:0 0 240px;max-height:calc(100vh - 90px);overflow-y:auto;position:-webkit-sticky;position:sticky;top:90px}.onPageNav>.toc-headings{border-left:1px solid #e0e0e0;padding:10px 0 2px 15px}.tocToggler{display:none}}}.blog .wrapper{max-width:1100px}.blogContainer .posts .post{border-bottom:1px solid #e0e0e0;border-radius:3px;margin-bottom:20px;padding-bottom:20px}.blogContainer .postHeader{margin-bottom:10px}.blogContainer .postHeaderTitle{margin-top:0}.blogContainer .postHeader p.post-meta{margin-bottom:10px;padding:0}.blogContainer .postHeader .authorBlock{display:flex}.blogContainer .postHeader .post-authorName{color:rgba(57,57,57,.7);display:flex;flex-direction:column;font-size:14px;font-weight:400;justify-content:center;margin-right:10px;margin-top:0;margin-bottom:0;padding:0}.blogContainer .postHeader .authorPhoto{border-radius:50%;height:30px;overflow:hidden;width:30px}.blogContainer .postHeader .authorPhoto.authorPhotoBig{height:50px;width:50px}.blog-recent{margin:20px 0}.blog-recent>a{float:left}@media only screen and (max-width:735px){.blog-recent{height:40px}}.blogSocialSection{display:block;padding:36px 0}.blogSocialSection .blogSocialSectionItem{padding-bottom:5px}.fb-like{display:block;margin-bottom:20px;width:100%}.more-users{margin:0 auto;max-width:560px;text-align:center}.productShowcaseSection{padding:0 20px;text-align:center}.productShowcaseSection.paddingTop{padding-top:20px}.productShowcaseSection.paddingBottom{padding-bottom:80px}.productShowcaseSection h2{color:#812ce5;font-size:30px;line-height:1em;margin-top:20px;padding:10px 0;text-align:center}.productShowcaseSection p{margin:0 auto;max-width:560px;padding:.8em 0}.productShowcaseSection .logos{align-items:center;display:flex;flex-flow:row wrap;justify-content:center;padding:20px}.productShowcaseSection .logos img{max-height:110px;padding:20px;width:110px}@media only screen and (max-width:735px){.productShowcaseSection .logos img{max-height:64px;padding:20px;width:64px}}.showcaseSection{margin:0 auto;max-width:900px}.showcaseSection,.showcaseSection .prose h1{text-align:center}.showcaseSection .prose{margin:0 auto;max-width:560px;text-align:center}.showcaseSection .logos{align-items:center;display:flex;flex-flow:row wrap;justify-content:center}.showcaseSection .logos img{max-height:128px;padding:20px;width:128px}@media only screen and (max-width:735px){.showcaseSection .logos img{max-height:64px;padding:20px;width:64px}}.nav-footer{background:#20232a;border:none;color:#202020;font-size:15px;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;font-weight:400;line-height:24px;padding-bottom:2em;padding-top:2em;position:relative}@media only screen and (min-width:1024px){.nav-footer{flex-shrink:0}}.nav-footer .sitemap{display:flex;justify-content:space-between;margin:0 auto 3em;max-width:1080px}.nav-footer .sitemap div{flex:1}.nav-footer .sitemap .nav-home{display:table;height:72px;margin:-12px 20px 0 0;opacity:.4;padding:10px;transition:opacity .15s ease-in-out;width:72px}.nav-footer .sitemap .nav-home:focus,.nav-footer .sitemap .nav-home:hover{opacity:1}@media only screen and (max-width:735px){.nav-footer .sitemap{display:flex;flex-direction:column;margin:0 2em 3em;width:calc(100% - 4em)}.nav-footer .sitemap>div{margin-bottom:18px}}.nav-footer .sitemap a{color:hsla(0,0%,100%,.6);display:block;margin:2px 0;padding:3px 0}.nav-footer .sitemap a:focus,.nav-footer .sitemap a:hover,.nav-footer .sitemap h5>a:focus,.nav-footer .sitemap h5>a:hover{color:#fff;text-decoration:none}.nav-footer .sitemap h5,.nav-footer .sitemap h6{margin:0 0 10px}.nav-footer .sitemap h5,.nav-footer .sitemap h5>a,.nav-footer .sitemap h6,.nav-footer .sitemap h6>a{color:#fff}.nav-footer .sitemap h5>a,.nav-footer .sitemap h6>a{margin:0 -10px}.nav-footer .fbOpenSource{display:block;margin:1em auto;opacity:.4;transition:opacity .15s ease-in-out;width:170px}.nav-footer .fbOpenSource:hover{opacity:1}.nav-footer .copyright{color:hsla(0,0%,100%,.4);text-align:center}.nav-footer .social{padding:5px 0}.tabs{border-top:1px solid #cfcfcf}.nav-tabs{display:flex;border-bottom:4px solid #e0e0e0;width:100%;padding:0;overflow-x:auto;white-space:nowrap;max-height:100%}.nav-tabs::-webkit-scrollbar{display:none}.tabs .tab-pane:focus{outline:none}.tabs .nav-tabs>div{font-size:14px;line-height:1.14286;padding:12px 16px;text-decoration:none;display:block;cursor:pointer}.tabs .nav-tabs>div.active{border-bottom:4px solid #812ce5}.tab-pane{display:none}.tab-pane.active{display:block}.tab-pane>pre{white-space:pre-wrap}.tab-pane>pre>code{margin-top:0;border-radius:0;box-shadow:none}html body{font-family:Montserrat,sans-serif;overflow-x:hidden}.fixedHeaderContainer{background-color:#222}.fixedHeaderContainer header .headerTitleWithLogo{display:block;color:#fff}.fixedHeaderContainer header .logo{height:50px}.fixedHeaderContainer header a:nth-child(2){position:absolute;right:0}.fixedHeaderContainer header a:nth-child(2) h3{font-size:14px}.fixedHeaderContainer header a:nth-child(2) h3:before{content:"v: "}.navigationSlider{margin-right:80px}.navigationSlider .slidingNav ul{background:#222}.navigationSlider .slidingNav ul li a{color:#c7d4fd}.navigationSlider .slidingNav ul li a:focus,.navigationSlider .slidingNav ul li a:hover{color:#fff;background-color:inherit}.navigationSlider .slidingNav ul li.siteNavGroupActive>a,.navigationSlider .slidingNav ul li.siteNavItemActive>a{background-color:inherit}.homeContainer{background:linear-gradient(#812ce5,#ffaf00);padding:25px 0}.splashLogo{display:block;margin:0 auto;width:65%}.projectTitle{color:#fff;font-variant:small-caps;font-weight:300}.promoSection .button{border:2px solid #fff;color:#fff;font-size:19px;margin:10px}.promoSection .button:hover{background:inherit;border:2px solid #fff;color:#fff}.landingPage{padding:0}.productShowcaseSection{padding:45px 20px 30px}div.productShowcaseSection{color:#6c6c6c;padding-top:40px}#quickstart{padding-top:80px}.productShowcaseSection>h2{font-variant:small-caps;font-weight:360;margin:0;padding:0;color:#5b1861}.productShowcaseSection p{font-weight:360}# Subtitles for key features .productShowcaseSection .blockContent>div span p{font-size:18px}.productShowcaseSection div.container{padding:10px 0 40px}.productShowcaseSection img{height:100px}.gridBlock .fourByGridBlock img{max-width:200%}.productShowcaseSection li{padding:10px 0}.productShowcaseSection pre{margin:10px 0}.productShowcaseSection code{background:#fff}.container .wrapper .alignCenter h2{color:#222}div#quickstart{background:#efefef}div#quickstart ol{margin-bottom:0}.nav-footer{background-color:#222}.nav-footer .sitemap a{color:#c7d4fd}.nav-footer .sitemap a:hover{color:#fff}.social{text-align:center}a,a:hover,p a,p a:hover{color:#4872f9}.imageAlignTop .blockImage{margin-bottom:20px;max-width:200px}.tutorialBody{margin-top:-20px;color:#6c6c6c}.tutorialBody h1{margin:0}.tutorialBody h1,.tutorialBody h2,.tutorialBody h3{color:#222}.tutorialBody pre{font-family:IBM Plex Mono,monospace;font-size:14px;margin:0}.tutorialBody .input_prompt,.tutorialBody .output_prompt{color:#8b0000;font-size:12px}.tutorialBody .highlight{background:#f3f4f7;padding:10px 20px;border:1px solid #d3d3d3;border-radius:3px}.tutorialBody .cell{margin:20px}.tutorialBody .output_stderr{background-color:#fdede9}.tutorialBody .anchor-link{color:#d3d3d3}.tutorialBody iframe{width:100%;height:100vh}.tutorialButtonWrapper,.tutorialRuntime{margin:20px}.colabButtonWrapper,.tutorialButtonWrapper{float:left;margin:5px}.colabButtonWrapper img{padding-right:.25em}.colabButton{width:24px}.tutorialButtonsWrapper{display:flex;align-items:center;padding-bottom:15px}.tutorialButton svg{height:15px;margin-right:5px}.tutorialButton:hover{color:#4872f9;background-color:inherit}.wrapper{max-width:1400px}@media only screen and (min-device-width:360px) and (max-device-width:736px){.fixedHeaderContainer header a:nth-child(2){position:absolute;right:150px}.promoSection .button{font-size:12px;margin:3px}.inner h2{margin-top:0}.splashLogo{width:90%}.headerTitleWithLogo{display:block!important}.blockContent>div span p{margin-bottom:30px}.productShowcaseSection div.container{padding-top:0}.productShowcaseSection>h2{padding-bottom:20px}}@media only screen and (max-width:1023px){.fixedHeaderContainer header a:nth-child(2){position:absolute;right:200px}}.highlight .hll{background-color:#ffc}.highlight .c{color:#60a0b0;font-style:italic}.highlight .err{border:1px solid red}.highlight .k{color:#007020;font-weight:700}.highlight .o{color:#666}.highlight .cm{color:#60a0b0;font-style:italic}.highlight .cp{color:#007020}.highlight .c1{color:#60a0b0;font-style:italic}.highlight .cs{color:#60a0b0;background-color:#fff0f0}.highlight .gd{color:#a00000}.highlight .ge{font-style:italic}.highlight .gr{color:red}.highlight .gh{color:navy;font-weight:700}.highlight .gi{color:#00a000}.highlight .go{color:grey}.highlight .gp{color:#c65d09}.highlight .gp,.highlight .gs,.highlight .gu{font-weight:700}.highlight .gu{color:purple}.highlight .gt{color:#0040d0}.highlight .kc,.highlight .kd,.highlight .kn{color:#007020;font-weight:700}.highlight .kp{color:#007020}.highlight .kr{color:#007020;font-weight:700}.highlight .kt{color:#902000}.highlight .m{color:#40a070}.highlight .na,.highlight .s{color:#4070a0}.highlight .nb{color:#007020}.highlight .nc{color:#0e84b5;font-weight:700}.highlight .no{color:#60add5}.highlight .nd{color:#555;font-weight:700}.highlight .ni{color:#d55537;font-weight:700}.highlight .ne{color:#007020}.highlight .nf{color:#06287e}.highlight .nl{color:#002070;font-weight:700}.highlight .nn{color:#0e84b5;font-weight:700}.highlight .nt{color:#062873;font-weight:700}.highlight .nv{color:#bb60d5}.highlight .ow{color:#007020;font-weight:700}.highlight .w{color:#bbb}.highlight .mf,.highlight .mh,.highlight .mi,.highlight .mo{color:#40a070}.highlight .sb,.highlight .sc{color:#4070a0}.highlight .sd{color:#4070a0;font-style:italic}.highlight .s2{color:#4070a0}.highlight .se{color:#4070a0;font-weight:700}.highlight .sh{color:#4070a0}.highlight .si{color:#70a0d0;font-style:italic}.highlight .sx{color:#c65d09}.highlight .sr{color:#235388}.highlight .s1{color:#4070a0}.highlight .ss{color:#517918}.highlight .bp{color:#007020}.highlight .vc,.highlight .vg,.highlight .vi{color:#bb60d5}.highlight .il{color:#40a070} \ No newline at end of file +a,abbr,acronym,address,applet,article,aside,audio,b,big,blockquote,body,canvas,caption,center,cite,code,dd,del,details,dfn,div,dl,dt,em,embed,fieldset,figcaption,figure,footer,form,h1,h2,h3,h4,h5,h6,header,hgroup,html,i,iframe,img,ins,kbd,label,legend,li,mark,menu,nav,object,ol,output,p,pre,q,ruby,s,samp,section,small,span,strike,strong,sub,summary,sup,table,tbody,td,tfoot,th,thead,time,tr,tt,u,ul,var,video{border:0;font:inherit;font-size:100%;margin:0;padding:0;vertical-align:baseline}body{color:#24292e;font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Helvetica,Arial,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol;font-size:16px;line-height:1.5;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%;word-wrap:break-word}*{box-sizing:border-box}b,strong{font-weight:600}em,i{font-style:italic}[type=checkbox]{box-sizing:border-box;padding:0}a,a:hover{color:#812ce5;text-decoration:none}a:active,a:hover{outline-width:0}a:not([href]){color:inherit;text-decoration:none}p{margin-bottom:1em;margin-top:0}h1,h2,h3,h4,h5,h6{color:inherit;font-weight:600;line-height:1.25;margin-bottom:16px;margin-top:1.5em}h1{font-size:32px}h2{font-size:24px}h3{font-size:20px}h4{font-size:16px}h5{font-size:14px}h6{font-size:13.6px}ol,ul{margin-bottom:1em;margin-top:0;padding-left:2em}ol ol,ul ol{list-style-type:lower-roman}ol ol,ol ul,ul ol,ul ul{margin-bottom:0;margin-top:0}ol ol ol,ol ul ol,ul ol ol,ul ul ol{list-style-type:lower-alpha}li{word-wrap:break-all}li>p{margin-top:1em}li+li{margin-top:.25em}img{border-style:none;box-sizing:content-box;max-width:100%}img[align=right]{padding-left:1.25em}img[align=left]{padding-right:1.25em}table{border-collapse:collapse;border-spacing:0;display:block;margin-bottom:16px;margin-top:0;overflow:auto;width:100%}table tr{background-color:transparent;border-top:1px solid #dfe2e5}table tr:nth-child(2n){background-color:#f6f8fa}table td,table th{border:1px solid #dfe2e5;padding:6px 13px}table th{background-color:inherit;font-weight:600}table td,table th{color:inherit}blockquote{color:#6a737d;font-size:16px;margin:0 0 16px;padding:0 1em}blockquote>:first-child{margin-top:0}blockquote>:last-child{margin-bottom:0}code{background-color:rgba(27,31,35,.05);border-radius:3px;color:inherit;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-size:85%;margin:0;padding:3.2px 6.4px}pre{margin-bottom:16px}pre code{background-color:transparent;border:0;display:inline;font-size:85%;line-height:inherit;margin:0;max-width:auto;overflow:visible;padding:0;white-space:pre;word-break:normal;word-wrap:normal}kbd{background-color:#fafbfc;border:1px solid #d1d5da;border-bottom-color:#c6cbd1;border-radius:3px;box-shadow:inset 0 -1px 0 #c6cbd1;color:#444d56;display:inline-block;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-size:68.75%;line-height:10px;padding:3px 5px;vertical-align:middle}hr{border:1px solid #dfe2e5;box-sizing:content-box;margin:1.5em 0;overflow:hidden;padding:0}hr:after,hr:before{content:"";display:table}hr:after{clear:both}body{background-color:#fff;min-height:100vh;text-rendering:optimizeLegibility}@media only screen and (min-width:736px){body{display:flex;flex-direction:column}}article:after,article:before{content:"";display:table}article:after{clear:both}article>:first-child{margin-top:0}article>:last-child{margin-bottom:0}article iframe,article p img{display:block;margin-left:auto;margin-right:auto;max-width:100%}.anchor{display:block;position:relative;top:-80px}.hash-link{line-height:1;margin-left:-20px;opacity:0;padding-right:4px;transition:opacity .3s}.hash-link:hover{opacity:1!important;transition:none}.hash-link .hash-link-icon{vertical-align:middle}.button{border:1px solid #812ce5;border-radius:3px;color:#812ce5;display:inline-block;font-size:14px;font-weight:400;line-height:1.2em;padding:10px;text-decoration:none!important;text-transform:uppercase;transition:background .3s,color .3s}.button:hover{background:#812ce5;color:#fff}h1:hover .hash-link,h2:hover .hash-link,h3:hover .hash-link,h4:hover .hash-link{opacity:.5;transition:none}blockquote{background-color:rgba(255,229,100,.3);border-left:8px solid #ffe564;padding:15px 30px 15px 15px}.wrapper{margin:0 auto;max-width:1100px;padding:0 20px}.wrapper blockquote>p:first-child{padding-top:0}.center{display:block}.center,.homeContainer{text-align:center}.homeContainer .homeWrapper{padding:2em 10px}.homeContainer .homeWrapper .wrapper{margin:0 auto;max-width:900px;padding:0 20px}.homeContainer .homeWrapper .projectLogo img{height:100px;margin-bottom:0}.homeContainer .homeWrapper #project_title{font-size:300%;letter-spacing:-.08em;line-height:1em;margin-bottom:80px}.homeContainer .homeWrapper #project_tagline{font-size:200%;letter-spacing:-.04em;line-height:1em}.projectLogo{display:none;pointer-events:none}.projectLogo img{height:100px;margin-bottom:0}.projectIntro{margin:40px 0}.projectTitle{color:#812ce5;font-size:250%;line-height:1em}.projectTitle>small{display:block;font-weight:400;font-size:50%;line-height:1em;margin:.7em 0 1.3em}@media only screen and (min-width:480px){.projectTitle{font-size:300%;margin:.3em 0}.projectLogo img{height:200px;margin-bottom:10px}.homeContainer .homeWrapper{padding-left:10px;padding-right:10px}}@media only screen and (min-width:736px){.homeContainer .homeWrapper{position:relative}.homeContainer .homeWrapper #inner{max-width:600px;padding-right:40px}}@media only screen and (min-width:1200px){.homeContainer .homeWrapper #inner{max-width:750px}.homeContainer .homeWrapper .projectLogo{align-items:center;bottom:0;display:flex;justify-content:flex-end;left:0;padding:2em 100px 4em;position:absolute;right:0;top:0}.homeContainer .homeWrapper .projectLogo img{height:100%;max-height:250px}}@media only screen and (min-width:1500px){.homeContainer .homeWrapper #inner{max-width:1100px;padding-bottom:40px;padding-top:40px}.wrapper{max-width:1400px}}.mainContainer{flex:1 1 0%;max-width:100%;padding:40px 0}.mainContainer .wrapper{text-align:left}.mainContainer .wrapper .allShareBlock{padding:10px 0}.mainContainer .wrapper .allShareBlock .pluginBlock{margin:12px 0;padding:0}.mainContainer .wrapper .post{position:relative}.mainContainer .wrapper .post.basicPost{margin-top:30px}.mainContainer .wrapper .post .postHeader{margin-bottom:16px}.mainContainer .wrapper .post .postHeaderTitle{margin-top:0;padding:0}.docsContainer .wrapper .post .postHeader:before,.docsContainer .wrapper .post .postHeaderTitle:before{content:"";display:block;height:90px;margin-top:-90px;visibility:hidden;pointer-events:none}.mainContainer .wrapper .post .postSocialPlugins{padding-top:1em}.mainContainer .wrapper .post .docPagination{background:#812ce5;bottom:0;left:0;position:absolute;right:0}.mainContainer .wrapper .post .docPagination .pager{display:inline-block;width:50%}.mainContainer .wrapper .post .docPagination .pagingNext{float:right;text-align:right}.mainContainer .wrapper .post .docPagination a{border:none;color:#fff;display:block;padding:4px 12px}.mainContainer .wrapper .post .docPagination a:hover{background-color:#f9f9f9;color:#393939}.mainContainer .wrapper .post .docPagination a .pagerLabel{display:inline}.mainContainer .wrapper .post .docPagination a .pagerTitle{display:none}@media only screen and (min-width:480px){.mainContainer .wrapper .post .docPagination a .pagerLabel{display:none}.mainContainer .wrapper .post .docPagination a .pagerTitle{display:inline}}@media only screen and (min-width:1024px){.mainContainer .wrapper .post{display:block}.mainContainer .wrapper .posts .post{width:100%}}@media only screen and (max-width:1023px){.docsContainer .wrapper .post .postHeader:before,.docsContainer .wrapper .post .postHeaderTitle:before{content:"";display:block;height:200px;margin-top:-200px;visibility:hidden;pointer-events:none}}.fixedHeaderContainer{background:#812ce5;color:#fff;min-height:50px;padding:8px 0;position:fixed;width:100%;z-index:9999;transform:translateZ(0)}@media only screen and (min-width:1024px){.fixedHeaderContainer{flex-shrink:0}}.fixedHeaderContainer a{align-items:center;border:0;color:#fff;display:flex;flex-flow:row nowrap;height:34px;z-index:10000}.fixedHeaderContainer header{display:flex;flex-flow:row nowrap;position:relative;text-align:left}.fixedHeaderContainer header img{height:100%;margin-right:10px}.fixedHeaderContainer header .headerTitle{font-size:1.25em;margin:0}.fixedHeaderContainer header .headerTitleWithLogo{font-size:1.25em;line-height:18px;margin:0;position:relative;z-index:9999}.fixedHeaderContainer header h3{color:#fff;font-size:16px;margin:0 0 0 10px;text-decoration:underline}@media (max-width:480px){.headerTitle{font-size:17px}.headerTitleWithLogo{display:none!important}}.promoSection{display:flex;flex-flow:column wrap;font-size:125%;line-height:1.6em;position:relative;z-index:99}.promoSection .promoRow{padding:10px 0}.promoSection .promoRow .pluginWrapper{display:block}.promoSection .promoRow .pluginWrapper.ghStarWrapper,.promoSection .promoRow .pluginWrapper.ghWatchWrapper{height:28px}.promoSection .promoRow .pluginRowBlock{display:flex;flex-wrap:wrap;justify-content:center;margin:0 -2px}.promoSection .promoRow .pluginRowBlock .pluginWrapper{padding:0 2px}.promoSection .promoRow .pluginRowBlock iframe{margin-left:2px;margin-top:5px}input[type=search]{-moz-appearance:none;-webkit-appearance:none}.navSearchWrapper{align-items:center;align-self:center;display:flex;justify-content:center;padding-left:10px;position:absolute;right:10px;top:10px}.navSearchWrapper:before{border:3px solid #e5e5e5;border-radius:50%;content:" ";display:block;height:6px;left:15px;position:absolute;top:50%;transform:translateY(-58%);width:6px;z-index:1}.navSearchWrapper:after{background:#e5e5e5;content:" ";height:7px;left:24px;position:absolute;top:55%;transform:rotate(-45deg);width:3px;z-index:1}.navSearchWrapper .aa-dropdown-menu{background:#f9f9f9;border:3px solid rgba(57,57,57,.25);color:#393939;font-size:14px;left:auto!important;line-height:1.2em;right:0!important}.navSearchWrapper .aa-dropdown-menu .algolia-docsearch-suggestion--category-header{background:#812ce5;color:#fff;font-size:14px;font-weight:400}.navSearchWrapper .aa-dropdown-menu .algolia-docsearch-suggestion--category-header .algolia-docsearch-suggestion--highlight{background-color:#812ce5;color:#fff}.navSearchWrapper .aa-dropdown-menu .algolia-docsearch-suggestion--subcategory-column .algolia-docsearch-suggestion--highlight,.navSearchWrapper .aa-dropdown-menu .algolia-docsearch-suggestion--title .algolia-docsearch-suggestion--highlight{color:#812ce5}.navSearchWrapper .aa-dropdown-menu .algolia-docsearch-suggestion--subcategory-column,.navSearchWrapper .aa-dropdown-menu .algolia-docsearch-suggestion__secondary{border-color:rgba(57,57,57,.3)}input#search_input_react{background-color:rgba(0,0,0,.2);border:none;border-radius:20px;color:#fff;font-size:14px;font-weight:300;line-height:20px;outline:none;padding-left:25px;position:relative;transition:width .5s ease;width:170px}.navSearchWrapper:before{left:24px}.navSearchWrapper:after{left:35px}input#search_input_react:active,input#search_input_react:focus{color:#fff;width:220px}.navigationSlider .slidingNav .navSearchWrapper .algolia-docsearch-footer a{height:auto}@media only screen and (max-width:735px){.navSearchWrapper{width:40%}}input::-moz-placeholder{color:#e5e5e5}input:-ms-input-placeholder{color:#e5e5e5}input::placeholder{color:#e5e5e5}.hljs{padding:1.25rem 1.5rem}.gridBlock{padding:0}.gridBlock>*{box-sizing:border-box}.gridBlock .fourByGridBlock img,.gridBlock .threeByGridBlock img,.gridBlock .twoByGridBlock img{max-width:100%}.gridBlock .gridClear{clear:both}@media only screen and (max-width:735px){.gridBlock .fourByGridBlock{flex:1 0 26%}}@media only screen and (min-width:736px){.gridBlock{display:flex;flex-direction:row;flex-wrap:wrap}.gridBlock>*{margin:0 12px}.gridBlock>:first-child{margin-left:0}.gridBlock>:last-child{margin-right:0}.gridBlock .twoByGridBlock{flex:1 0 40%}.gridBlock .threeByGridBlock{flex:1 0 26%}.gridBlock .fourByGridBlock{flex:1 0 20%}h2+.gridBlock{padding-top:20px}}@media only screen and (min-width:1400px){.gridBlock{display:flex;flex-direction:row;flex-wrap:wrap}}.alignCenter{text-align:center}.alignRight{text-align:right}.imageAlignSide{display:flex;flex-flow:row wrap}.blockImage{max-width:730px}.imageAlignSide .blockImage{flex:0 1 500px;max-width:500px}@media only screen and (max-width:735px){.imageAlignSide .blockImage{display:none}}.imageAlignSide .blockContent{flex:1 1}.imageAlignBottom .blockImage{margin:0 auto 20px;max-width:730px}.imageAlignBottom.alignCenter .blockImage{margin-left:auto;margin-right:auto}.imageAlignTop .blockImage{max-width:80px}.imageAlignTop.alignCenter .blockImage{margin-left:auto;margin-right:auto}.imageAlignRight .blockImage{margin-left:40px}.imageAlignLeft .blockImage{margin-right:40px}.container .gridBlock .blockContent p{padding:0}.container .wrapper .alignCenter h2{text-align:center}.container .wrapper .imageAlignSide h2{text-align:left}.container .wrapper .imageAlignSide p{margin:0 0 40px;max-width:560px}.highlightBackground{background:rgba(153,66,79,.7);color:#fff}.highlightBackground a{font-weight:800}.container.highlightBackground .wrapper h1,.container.highlightBackground .wrapper h2,.container.highlightBackground .wrapper h3,.container.highlightBackground .wrapper h4,.container.highlightBackground .wrapper h5,.highlightBackground a{border-color:#fff;color:#fff}.lightBackground{background:#f7f7f7}.darkBackground{background:grey;color:#fff}.darkBackground a,.darkBackground code{color:#d6b3b8}.container.darkBackground .wrapper h1,.container.darkBackground .wrapper h2,.container.darkBackground .wrapper h3,.container.darkBackground .wrapper h4,.container.darkBackground .wrapper h5{border-color:#fff;color:#fff}.container.paddingAll{padding:40px}.container.paddingBottom{padding-bottom:80px}.container.paddingLeft{padding-left:40px}.container.paddingRight{padding-right:40px}.container.paddingTop{padding-top:80px}@media only screen and (max-width:735px){.container.paddingBottom{padding-bottom:40px}.container.paddingTop{padding-top:20px}}@media only screen and (max-width:1023px){.responsiveList .blockContent{position:relative}.responsiveList .blockContent>div{padding-left:20px}.responsiveList .blockContent:before{content:"\2022";position:absolute}}.navigationSlider .navSlideout{cursor:pointer;padding-top:4px;position:absolute;right:10px;top:0;transition:top .3s;z-index:101}.navigationSlider .slidingNav{bottom:auto;box-sizing:border-box;left:0;position:fixed;right:0;top:0}.navigationSlider .slidingNav.slidingNavActive{height:auto;padding-top:42px;width:300px}.navigationSlider .slidingNav ul{background:#ffaf00;box-sizing:border-box;color:#fff;display:flex;flex-wrap:nowrap;list-style:none;margin-top:50px;padding:0;width:100%}.navigationSlider .slidingNav.slidingNavActive ul{display:block}.navigationSlider .slidingNav ul li{flex:1 1 auto;margin:0;text-align:center;white-space:nowrap}.navigationSlider .slidingNav ul li a{align-items:center;box-sizing:border-box;color:#812ce5;color:inherit;display:flex;font-size:.9em;height:auto;height:50px;justify-content:center;margin:0;padding:10px;transition:background-color .3s}.navigationSlider .slidingNav ul li.siteNavGroupActive>a,.navigationSlider .slidingNav ul li.siteNavItemActive>a,.navigationSlider .slidingNav ul li>a:focus,.navigationSlider .slidingNav ul li>a:hover{background-color:#812ce5}.languages-icon{width:20px}#languages-dropdown{pointer-events:none;position:absolute;width:100%}#languages-dropdown.visible{display:flex}#languages-dropdown.hide{display:none}#languages-dropdown-items{background-color:#812ce5;display:flex;flex-direction:column;min-width:120px;pointer-events:all}#languages li{display:block}.navPusher{left:0;min-height:100%;padding-top:100px;position:relative;z-index:99}.singleRowMobileNav.navPusher{padding-top:50px}.navPusher:after{background:rgba(0,0,0,.4);content:"";height:0;opacity:0;position:absolute;right:0;top:0;transition:opacity .5s,width .1s .5s,height .1s .5s;width:0}@media screen and (min-width:1024px){.navPusher{display:flex;flex-direction:column;min-height:calc(100vh - 50px);padding-top:50px}.navPusher,.navPusher>:first-child{flex-grow:1}}.sliderActive .navPusher:after{height:100%;opacity:1;transition:opacity .5s;width:100%;z-index:100}@media only screen and (max-width:1024px){.reactNavSearchWrapper input#search_input_react{background-color:rgba(242,196,178,.25);border:none;border-radius:20px;box-sizing:border-box;color:#393939;font-size:14px;line-height:20px;outline:none;padding-left:38px;position:relative;transition:background-color .2s cubic-bezier(.68,-.55,.265,1.55),width .2s cubic-bezier(.68,-.55,.265,1.55),color .2s ease;width:100%;height:30px}.reactNavSearchWrapper input#search_input_react:active,.reactNavSearchWrapper input#search_input_react:focus{background-color:#812ce5;color:#fff}.reactNavSearchWrapper .algolia-docsearch-suggestion--subcategory-inline{display:none}.reactNavSearchWrapper>span{width:100%}.reactNavSearchWrapper .aa-dropdown-menu{font-size:12px;line-height:2em;padding:0;border-width:1px;min-width:500px}.reactNavSearchWrapper .algolia-docsearch-suggestion__secondary{border-top:none}.aa-suggestions{min-height:140px;max-height:60vh;-webkit-overflow-scrolling:touch;overflow-y:scroll}#languages-dropdown{left:0;top:50px}#languages-dropdown-items{background-color:#812ce5;display:flex;flex-direction:row}}@media only screen and (min-width:1024px){.navSearchWrapper{padding-left:10px;position:relative;right:auto;top:auto}.reactNavSearchWrapper input#search_input_react{height:100%;padding-top:8px;padding-bottom:8px;padding-left:38px}.navSearchWrapper .algolia-autocomplete{display:block}.navigationSlider{height:34px;margin-left:auto;position:relative}.navigationSlider .navSlideout{display:none}.navigationSlider nav.slidingNav{background:none;height:auto;position:relative;right:auto;top:auto;width:auto}.navigationSlider .slidingNav ul{background:none;display:flex;flex-flow:row nowrap;margin:0;padding:0;width:auto}.navigationSlider .slidingNav ul li a{border:0;color:hsla(0,0%,100%,.8);display:flex;font-size:16px;font-size:1em;font-weight:300;height:32px;line-height:1.2em;margin:0;padding:6px 10px}.navigationSlider .slidingNav ul li.siteNavGroupActive a,.navigationSlider .slidingNav ul li.siteNavItemActive a,.navigationSlider .slidingNav ul li a:hover{color:#fff}}@media only screen and (max-width:735px){.navigationSlider .slidingNav ul{overflow-x:auto}.navigationSlider .slidingNav ul::-webkit-scrollbar{display:none}.reactNavSearchWrapper .aa-dropdown-menu{min-width:400px}}@media only screen and (max-width:475px){.reactNavSearchWrapper .aa-dropdown-menu{min-width:300px}}.docMainWrapper .wrapper{padding-left:0;padding-right:0;padding-top:10px}@media only screen and (min-width:1024px){.docMainWrapper{width:100%}.docMainWrapper>*{margin:0 24px}.docMainWrapper>:first-child{margin-left:0}.docMainWrapper>:last-child{margin-right:0}.docMainWrapper .mainContainer{min-width:0}}.edit-page-link{float:right;font-size:10px;font-weight:400;margin-top:3px;text-decoration:none}@media only screen and (max-width:1023px){.edit-page-link{display:none}}.docLastUpdate{font-size:13px;font-style:italic;margin:20px 0;text-align:right}.docs-prevnext{margin:20px 0}.docs-prevnext:after{clear:both;content:" ";display:table}.docs-next{float:right}.docs-prev{float:left}@media only screen and (max-width:735px){.docs-next{clear:both;float:left}.docs-next,.docs-prev{margin:10px 0}.arrow-next{float:right;margin-left:10px}.arrow-prev{float:left;margin-right:10px}.function-name-prevnext{width:200px;display:inline-block;white-space:nowrap;overflow:hidden;text-overflow:ellipsis}}.hide{display:none}.collapsible{cursor:pointer}.collapsible .arrow{float:right;margin-right:8px;margin-top:-4px;transform:rotate(90deg);transition:transform .2s linear}.collapsible .arrow.rotate{transform:rotate(180deg)}@media only screen and (max-width:1023px){.docsNavContainer{background:#fff;left:0;position:fixed;width:100%;z-index:100}}@media only screen and (min-width:1024px){.docsNavContainer{flex:0 0 240px;height:calc(100vh - 50px);position:sticky;overflow-y:auto;top:50px}}.docsSliderActive.docsNavContainer{box-sizing:border-box;height:100%;-webkit-overflow-scrolling:touch;overflow-y:auto;-ms-scroll-chaining:none;overscroll-behavior:contain;padding-bottom:50px}.docsNavContainer .toc .navBreadcrumb{background-color:#f1f1f1;box-sizing:border-box;display:flex;flex-flow:row nowrap;font-size:12px;height:48px;overflow:hidden;padding:8px 20px}.docsNavContainer .toc .navWrapper{padding:0}@media only screen and (min-width:1024px){.docsNavContainer .toc .navBreadcrumb{display:none}.navBreadcrumb h2{padding:0 10px}.separateOnPageNav .docsNavContainer{flex:0 0 240px}}.navBreadcrumb a,.navBreadcrumb span{border:0;color:#393939}@media only screen and (max-width:735px){.anchor{top:-144px}}@media only screen and (min-width:1024px){.toc{padding:40px 0}}.toc section{padding:0;position:relative}.toc section .navGroups{display:none;padding:48px 20px 60px}.toc .toggleNav{color:#393939;position:relative}.toc .toggleNav .navToggle{cursor:pointer;height:32px;margin-right:10px;position:relative;text-align:left;width:18px}.hamburger-menu{position:absolute;top:6px;width:100%}.line1,.line2,.line3{width:100%;height:3px;background-color:#393939;margin:3px 0;transition:.4s;border-radius:10px}.docsSliderActive .hamburger-menu{top:12px}.docsSliderActive .line1{position:absolute;top:50%;transform:rotate(-45deg)}.docsSliderActive .line2{display:none}.docsSliderActive .line3{position:absolute;top:50%;transform:rotate(45deg)}.toggleNav h2 i{padding:0 4px}.toc .toggleNav .navGroup{margin-bottom:16px}.toc .toggleNav .subNavGroup{margin-bottom:0}.toc .toggleNav .navGroup .navGroupCategoryTitle{color:#393939;font-size:18px;font-weight:500;line-height:1.2em;margin-bottom:8px;margin-top:0}.toc .toggleNav .navGroup .navGroupSubcategoryTitle{color:#393939;font-size:14px;font-weight:500;line-height:1.5;margin-bottom:0;margin-top:0;padding:4px 0}.toc .toggleNav .navGroup .navListItem{margin:0}.toc .toggleNav .navGroup h3 i:not(:empty){box-sizing:border-box;color:rgba(57,57,57,.5);display:inline-block;height:16px;margin-right:10px;text-align:center;transition:color .2s;width:16px}.toc .toggleNav ul{padding:0 8px}.docsSliderActive .toc .toggleNav ul{padding-left:0}.toc .toggleNav ul li{list-style-type:none;padding:0}.toc .toggleNav ul li a{border:none;color:#717171;display:block;font-size:14px;padding:4px 0;transition:color .3s}.toc .toggleNav ul li.navListItemActive a,.toc .toggleNav ul li a:focus,.toc .toggleNav ul li a:hover{color:#812ce5}.docsSliderActive .toc .navBreadcrumb,.tocActive .navBreadcrumb{border-bottom:1px solid #ccc;margin-bottom:20px;position:fixed;width:100%}.toc .toggleNav .navBreadcrumb h2{border:0;flex-grow:1;font-size:16px;font-weight:600;line-height:32px;margin:0;padding:0}.docsSliderActive .toc section .navGroups{display:block;padding-top:60px}.tocToggler{cursor:pointer;height:32px;line-height:32px;margin-right:-10px;padding:0 10px}.icon-toc{box-sizing:border-box;display:inline-block;line-height:normal;position:relative;top:-1px;vertical-align:middle}.icon-toc,.icon-toc:after,.icon-toc:before{background-color:currentColor;border:1px solid;border-radius:50%;box-sizing:border-box;height:4px;width:4px}.icon-toc:after,.icon-toc:before{content:"";position:absolute}.icon-toc:before{left:-1px;top:-7px}.icon-toc:after{left:-1px;top:5px}.tocActive .icon-toc{border-radius:0;height:16px;transform:rotate(45deg);width:3px}.tocActive .icon-toc:before{border-radius:0;height:3px;left:50%;top:50%;transform:translate(-50%,-50%);width:16px}.tocActive .icon-toc:after{content:""}@media only screen and (min-width:1024px){.docMainWrapper{display:flex;flex-flow:row nowrap}.docMainWrapper .wrapper{padding-top:0;padding-left:0;padding-right:0}}.onPageNav{display:none;margin-bottom:40px}.onPageNav::-webkit-scrollbar{width:7px}.onPageNav::-webkit-scrollbar-track{background:#f1f1f1;border-radius:10px}.onPageNav::-webkit-scrollbar-thumb{background:#888;border-radius:10px}.onPageNav::-webkit-scrollbar-thumb:hover{background:#555}.onPageNav a{color:#717171}.onPageNav .toc-headings>li>a.active,.onPageNav .toc-headings>li>a.hover{font-weight:600;color:#812ce5}.onPageNav ul{list-style:none}.onPageNav ul li{font-size:12px;line-height:16px;padding-bottom:8px}.onPageNav ul ul{padding:8px 0 0 20px}.onPageNav ul ul li{padding-bottom:5px}@media only screen and (min-width:1024px){.toc section .navGroups{display:block;padding:8px 0 0}.navBreadcrumb h2{padding:0 10px}}@supports (position:sticky){@media only screen and (max-width:1023px){.tocActive .onPageNav{background:#fff;bottom:0;display:block;left:0;overflow-y:auto;-ms-scroll-chaining:none;overscroll-behavior:contain;padding:0 20px;position:fixed;right:0;top:148px;z-index:10;margin-bottom:0}.tocActive .singleRowMobileNav .onPageNav{top:98px}.tocActive .navBreadcrumb h2,.tocActive .navToggle{visibility:hidden}.tocActive .onPageNav>.toc-headings{padding:12px 0}}@media only screen and (min-width:1024px){.separateOnPageNav .headerWrapper.wrapper,.separateOnPageNav .wrapper{max-width:1400px}.separateOnPageNav .toc{width:auto}.separateOnPageNav.sideNavVisible .navPusher .mainContainer{flex:1 auto;max-width:100%;min-width:0}.onPageNav{align-self:flex-start;display:block;flex:0 0 240px;max-height:calc(100vh - 90px);overflow-y:auto;position:sticky;top:90px}.onPageNav>.toc-headings{border-left:1px solid #e0e0e0;padding:10px 0 2px 15px}.tocToggler{display:none}}}.blog .wrapper{max-width:1100px}.blogContainer .posts .post{border-bottom:1px solid #e0e0e0;border-radius:3px;margin-bottom:20px;padding-bottom:20px}.blogContainer .postHeader{margin-bottom:10px}.blogContainer .postHeaderTitle{margin-top:0}.blogContainer .postHeader p.post-meta{margin-bottom:10px;padding:0}.blogContainer .postHeader .authorBlock{display:flex}.blogContainer .postHeader .post-authorName{color:rgba(57,57,57,.7);display:flex;flex-direction:column;font-size:14px;font-weight:400;justify-content:center;margin-right:10px;margin-top:0;margin-bottom:0;padding:0}.blogContainer .postHeader .authorPhoto{border-radius:50%;height:30px;overflow:hidden;width:30px}.blogContainer .postHeader .authorPhoto.authorPhotoBig{height:50px;width:50px}.blog-recent{margin:20px 0}.blog-recent>a{float:left}@media only screen and (max-width:735px){.blog-recent{height:40px}}.blogSocialSection{display:block;padding:36px 0}.blogSocialSection .blogSocialSectionItem{padding-bottom:5px}.fb-like{display:block;margin-bottom:20px;width:100%}.more-users{margin:0 auto;max-width:560px;text-align:center}.productShowcaseSection{padding:0 20px;text-align:center}.productShowcaseSection.paddingTop{padding-top:20px}.productShowcaseSection.paddingBottom{padding-bottom:80px}.productShowcaseSection h2{color:#812ce5;font-size:30px;line-height:1em;margin-top:20px;padding:10px 0;text-align:center}.productShowcaseSection p{margin:0 auto;max-width:560px;padding:.8em 0}.productShowcaseSection .logos{align-items:center;display:flex;flex-flow:row wrap;justify-content:center;padding:20px}.productShowcaseSection .logos img{max-height:110px;padding:20px;width:110px}@media only screen and (max-width:735px){.productShowcaseSection .logos img{max-height:64px;padding:20px;width:64px}}.showcaseSection{margin:0 auto;max-width:900px}.showcaseSection,.showcaseSection .prose h1{text-align:center}.showcaseSection .prose{margin:0 auto;max-width:560px;text-align:center}.showcaseSection .logos{align-items:center;display:flex;flex-flow:row wrap;justify-content:center}.showcaseSection .logos img{max-height:128px;padding:20px;width:128px}@media only screen and (max-width:735px){.showcaseSection .logos img{max-height:64px;padding:20px;width:64px}}.nav-footer{background:#20232a;border:none;color:#202020;font-size:15px;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;font-weight:400;line-height:24px;padding-bottom:2em;padding-top:2em;position:relative}@media only screen and (min-width:1024px){.nav-footer{flex-shrink:0}}.nav-footer .sitemap{display:flex;justify-content:space-between;margin:0 auto 3em;max-width:1080px}.nav-footer .sitemap div{flex:1}.nav-footer .sitemap .nav-home{display:table;height:72px;margin:-12px 20px 0 0;opacity:.4;padding:10px;transition:opacity .15s ease-in-out;width:72px}.nav-footer .sitemap .nav-home:focus,.nav-footer .sitemap .nav-home:hover{opacity:1}@media only screen and (max-width:735px){.nav-footer .sitemap{display:flex;flex-direction:column;margin:0 2em 3em;width:calc(100% - 4em)}.nav-footer .sitemap>div{margin-bottom:18px}}.nav-footer .sitemap a{color:hsla(0,0%,100%,.6);display:block;margin:2px 0;padding:3px 0}.nav-footer .sitemap a:focus,.nav-footer .sitemap a:hover,.nav-footer .sitemap h5>a:focus,.nav-footer .sitemap h5>a:hover{color:#fff;text-decoration:none}.nav-footer .sitemap h5,.nav-footer .sitemap h6{margin:0 0 10px}.nav-footer .sitemap h5,.nav-footer .sitemap h5>a,.nav-footer .sitemap h6,.nav-footer .sitemap h6>a{color:#fff}.nav-footer .sitemap h5>a,.nav-footer .sitemap h6>a{margin:0 -10px}.nav-footer .fbOpenSource{display:block;margin:1em auto;opacity:.4;transition:opacity .15s ease-in-out;width:170px}.nav-footer .fbOpenSource:hover{opacity:1}.nav-footer .copyright{color:hsla(0,0%,100%,.4);text-align:center}.nav-footer .social{padding:5px 0}.tabs{border-top:1px solid #cfcfcf}.nav-tabs{display:flex;border-bottom:4px solid #e0e0e0;width:100%;padding:0;overflow-x:auto;white-space:nowrap;max-height:100%}.nav-tabs::-webkit-scrollbar{display:none}.tabs .tab-pane:focus{outline:none}.tabs .nav-tabs>div{font-size:14px;line-height:1.14286;padding:12px 16px;text-decoration:none;display:block;cursor:pointer}.tabs .nav-tabs>div.active{border-bottom:4px solid #812ce5}.tab-pane{display:none}.tab-pane.active{display:block}.tab-pane>pre{white-space:pre-wrap}.tab-pane>pre>code{margin-top:0;border-radius:0;box-shadow:none}html body{font-family:Montserrat,sans-serif;overflow-x:hidden}.fixedHeaderContainer{background-color:#222}.fixedHeaderContainer header .headerTitleWithLogo{display:block;color:#fff}.fixedHeaderContainer header .logo{height:50px}.fixedHeaderContainer header a:nth-child(2){position:absolute;right:0}.fixedHeaderContainer header a:nth-child(2) h3{font-size:14px}.fixedHeaderContainer header a:nth-child(2) h3:before{content:"v: "}.navigationSlider{margin-right:80px}.navigationSlider .slidingNav ul{background:#222}.navigationSlider .slidingNav ul li a{color:#c7d4fd}.navigationSlider .slidingNav ul li a:focus,.navigationSlider .slidingNav ul li a:hover{color:#fff;background-color:inherit}.navigationSlider .slidingNav ul li.siteNavGroupActive>a,.navigationSlider .slidingNav ul li.siteNavItemActive>a{background-color:inherit}.homeContainer{background:linear-gradient(#812ce5,#ffaf00);padding:25px 0}.splashLogo{display:block;margin:0 auto;width:65%}.projectTitle{color:#fff;font-variant:small-caps;font-weight:300}.promoSection .button{border:2px solid #fff;color:#fff;font-size:19px;margin:10px}.promoSection .button:hover{background:inherit;border:2px solid #fff;color:#fff}.landingPage{padding:0}.productShowcaseSection{padding:45px 20px 30px}div.productShowcaseSection{color:#6c6c6c;padding-top:40px}#quickstart{padding-top:80px}.productShowcaseSection>h2{font-variant:small-caps;font-weight:360;margin:0;padding:0;color:#5b1861}.productShowcaseSection p{font-weight:360}# Subtitles for key features .productShowcaseSection .blockContent>div span p{font-size:18px}.productShowcaseSection div.container{padding:10px 0 40px}.productShowcaseSection img{height:100px}.gridBlock .fourByGridBlock img{max-width:200%}.productShowcaseSection li{padding:10px 0}.productShowcaseSection pre{margin:10px 0}.productShowcaseSection code{background:#fff}.container .wrapper .alignCenter h2{color:#222}div#quickstart{background:#efefef}div#quickstart ol{margin-bottom:0}.nav-footer{background-color:#222}.nav-footer .sitemap a{color:#c7d4fd}.nav-footer .sitemap a:hover{color:#fff}.social{text-align:center}a,a:hover,p a,p a:hover{color:#4872f9}.imageAlignTop .blockImage{margin-bottom:20px;max-width:200px}.tutorialBody{margin-top:-20px;color:#6c6c6c}.tutorialBody h1{margin:0}.tutorialBody h1,.tutorialBody h2,.tutorialBody h3{color:#222}.tutorialBody pre{font-family:IBM Plex Mono,monospace;font-size:14px;margin:0}.tutorialBody .input_prompt,.tutorialBody .output_prompt{color:#8b0000;font-size:12px}.tutorialBody .highlight{background:#f3f4f7;padding:10px 20px;border:1px solid #d3d3d3;border-radius:3px}.tutorialBody .cell{margin:20px}.tutorialBody .output_stderr{background-color:#fdede9}.tutorialBody .anchor-link{color:#d3d3d3}.tutorialBody iframe{width:100%;height:100vh}.tutorialButtonWrapper,.tutorialRuntime{margin:20px}.colabButtonWrapper,.tutorialButtonWrapper{float:left;margin:5px}.colabButtonWrapper img{padding-right:.25em}.colabButton{width:24px}.tutorialButtonsWrapper{display:flex;align-items:center;padding-bottom:15px}.tutorialButton svg{height:15px;margin-right:5px}.tutorialButton:hover{color:#4872f9;background-color:inherit}.wrapper{max-width:1400px}@media only screen and (min-device-width:360px) and (max-device-width:736px){.fixedHeaderContainer header a:nth-child(2){position:absolute;right:150px}.promoSection .button{font-size:12px;margin:3px}.inner h2{margin-top:0}.splashLogo{width:90%}.headerTitleWithLogo{display:block!important}.blockContent>div span p{margin-bottom:30px}.productShowcaseSection div.container{padding-top:0}.productShowcaseSection>h2{padding-bottom:20px}}@media only screen and (max-width:1023px){.fixedHeaderContainer header a:nth-child(2){position:absolute;right:200px}}.highlight .hll{background-color:#ffc}.highlight .c{color:#60a0b0;font-style:italic}.highlight .err{border:1px solid red}.highlight .k{color:#007020;font-weight:700}.highlight .o{color:#666}.highlight .cm{color:#60a0b0;font-style:italic}.highlight .cp{color:#007020}.highlight .c1{color:#60a0b0;font-style:italic}.highlight .cs{color:#60a0b0;background-color:#fff0f0}.highlight .gd{color:#a00000}.highlight .ge{font-style:italic}.highlight .gr{color:red}.highlight .gh{color:navy;font-weight:700}.highlight .gi{color:#00a000}.highlight .go{color:grey}.highlight .gp{color:#c65d09}.highlight .gp,.highlight .gs,.highlight .gu{font-weight:700}.highlight .gu{color:purple}.highlight .gt{color:#0040d0}.highlight .kc,.highlight .kd,.highlight .kn{color:#007020;font-weight:700}.highlight .kp{color:#007020}.highlight .kr{color:#007020;font-weight:700}.highlight .kt{color:#902000}.highlight .m{color:#40a070}.highlight .na,.highlight .s{color:#4070a0}.highlight .nb{color:#007020}.highlight .nc{color:#0e84b5;font-weight:700}.highlight .no{color:#60add5}.highlight .nd{color:#555;font-weight:700}.highlight .ni{color:#d55537;font-weight:700}.highlight .ne{color:#007020}.highlight .nf{color:#06287e}.highlight .nl{color:#002070;font-weight:700}.highlight .nn{color:#0e84b5;font-weight:700}.highlight .nt{color:#062873;font-weight:700}.highlight .nv{color:#bb60d5}.highlight .ow{color:#007020;font-weight:700}.highlight .w{color:#bbb}.highlight .mf,.highlight .mh,.highlight .mi,.highlight .mo{color:#40a070}.highlight .sb,.highlight .sc{color:#4070a0}.highlight .sd{color:#4070a0;font-style:italic}.highlight .s2{color:#4070a0}.highlight .se{color:#4070a0;font-weight:700}.highlight .sh{color:#4070a0}.highlight .si{color:#70a0d0;font-style:italic}.highlight .sx{color:#c65d09}.highlight .sr{color:#235388}.highlight .s1{color:#4070a0}.highlight .ss{color:#517918}.highlight .bp{color:#007020}.highlight .vc,.highlight .vg,.highlight .vi{color:#bb60d5}.highlight .il{color:#40a070} \ No newline at end of file diff --git a/docs/batching.html b/docs/batching.html index 0fc3fd58..27f5f70b 100644 --- a/docs/batching.html +++ b/docs/batching.html @@ -1,4 +1,4 @@ -batching · PyTorch3D

Batching

-

In deep learning, every optimization step operates on multiple input examples for robust training. Thus, efficient batching is crucial. For image inputs, batching is straighforward; N images are resized to the same height and width and stacked as a 4 dimensional tensor of shape N x 3 x H x W. For meshes, batching is less straighforward.

+

In deep learning, every optimization step operates on multiple input examples for robust training. Thus, efficient batching is crucial. For image inputs, batching is straightforward; N images are resized to the same height and width and stacked as a 4 dimensional tensor of shape N x 3 x H x W. For meshes, batching is less straightforward.

batch_intro

Batch modes for meshes

Assume you want to construct a batch containing two meshes, with mesh1 = (v1: V1 x 3, f1: F1 x 3) containing V1 vertices and F1 faces, and mesh2 = (v2: V2 x 3, f2: F2 x 3) with V2 (!= V1) vertices and F2 (!= F1) faces. The Meshes data structure provides three different ways to batch heterogeneous meshes. If meshes = Meshes(verts = [v1, v2], faces = [f1, f2]) is an instantiation of the data structure, then

@@ -74,6 +74,6 @@

batch_modes

Use cases for batch modes

-

The need for different mesh batch modes is inherent to the way pytorch operators are implemented. To fully utilize the optimized pytorch ops, the Meshes data structure allows for efficient conversion between the different batch modes. This is crucial when aiming for a fast and efficient training cycle. An example of this is Mesh R-CNN. Here, in the same forward pass different parts of the network assume different inputs, which are computed by converting between the different batch modes. In particular, vert_align assumes a padded input tensor while immediately after graph_conv assumes a packed input tensor.

+

The need for different mesh batch modes is inherent to the way PyTorch operators are implemented. To fully utilize the optimized PyTorch ops, the Meshes data structure allows for efficient conversion between the different batch modes. This is crucial when aiming for a fast and efficient training cycle. An example of this is Mesh R-CNN. Here, in the same forward pass different parts of the network assume different inputs, which are computed by converting between the different batch modes. In particular, vert_align assumes a padded input tensor while immediately after graph_conv assumes a packed input tensor.

meshrcnn

-
Last updated by Nikhila Ravi
\ No newline at end of file +
Last updated by Jeremy Reizenstein
Data loadersCubify
\ No newline at end of file diff --git a/docs/batching/index.html b/docs/batching/index.html index 0fc3fd58..27f5f70b 100644 --- a/docs/batching/index.html +++ b/docs/batching/index.html @@ -1,4 +1,4 @@ -batching · PyTorch3D

Batching

-

In deep learning, every optimization step operates on multiple input examples for robust training. Thus, efficient batching is crucial. For image inputs, batching is straighforward; N images are resized to the same height and width and stacked as a 4 dimensional tensor of shape N x 3 x H x W. For meshes, batching is less straighforward.

+

In deep learning, every optimization step operates on multiple input examples for robust training. Thus, efficient batching is crucial. For image inputs, batching is straightforward; N images are resized to the same height and width and stacked as a 4 dimensional tensor of shape N x 3 x H x W. For meshes, batching is less straightforward.

batch_intro

Batch modes for meshes

Assume you want to construct a batch containing two meshes, with mesh1 = (v1: V1 x 3, f1: F1 x 3) containing V1 vertices and F1 faces, and mesh2 = (v2: V2 x 3, f2: F2 x 3) with V2 (!= V1) vertices and F2 (!= F1) faces. The Meshes data structure provides three different ways to batch heterogeneous meshes. If meshes = Meshes(verts = [v1, v2], faces = [f1, f2]) is an instantiation of the data structure, then

@@ -74,6 +74,6 @@

batch_modes

Use cases for batch modes

-

The need for different mesh batch modes is inherent to the way pytorch operators are implemented. To fully utilize the optimized pytorch ops, the Meshes data structure allows for efficient conversion between the different batch modes. This is crucial when aiming for a fast and efficient training cycle. An example of this is Mesh R-CNN. Here, in the same forward pass different parts of the network assume different inputs, which are computed by converting between the different batch modes. In particular, vert_align assumes a padded input tensor while immediately after graph_conv assumes a packed input tensor.

+

The need for different mesh batch modes is inherent to the way PyTorch operators are implemented. To fully utilize the optimized PyTorch ops, the Meshes data structure allows for efficient conversion between the different batch modes. This is crucial when aiming for a fast and efficient training cycle. An example of this is Mesh R-CNN. Here, in the same forward pass different parts of the network assume different inputs, which are computed by converting between the different batch modes. In particular, vert_align assumes a padded input tensor while immediately after graph_conv assumes a packed input tensor.

meshrcnn

-
Last updated by Nikhila Ravi
\ No newline at end of file +
Last updated by Jeremy Reizenstein
Data loadersCubify
\ No newline at end of file diff --git a/docs/cameras.html b/docs/cameras.html index c20f27f5..9316934d 100644 --- a/docs/cameras.html +++ b/docs/cameras.html @@ -1,4 +1,4 @@ -cameras · PyTorch3D
\ No newline at end of file diff --git a/docs/io/index.html b/docs/io/index.html new file mode 100644 index 00000000..df982b44 --- /dev/null +++ b/docs/io/index.html @@ -0,0 +1,30 @@ +io · PyTorch3D
\ No newline at end of file diff --git a/docs/meshes_io.html b/docs/meshes_io.html index 593b64f1..82e92203 100644 --- a/docs/meshes_io.html +++ b/docs/meshes_io.html @@ -1,4 +1,4 @@ -meshes_io · PyTorch3D

Meshes and IO

The Meshes object represents a batch of triangulated meshes, and is central to -much of the functionality of pytorch3d. There is no insistence that each mesh in +much of the functionality of PyTorch3D. There is no insistence that each mesh in the batch has the same number of vertices or faces. When available, it can store other data which pertains to the mesh, for example face normals, face areas and textures.

Two common file formats for storing single meshes are ".obj" and ".ply" files, -and pytorch3d has functions for reading these.

+and PyTorch3D has functions for reading these.

OBJ

Obj files have a standard way to store extra information about a mesh. Given an obj file, it can be read with

@@ -105,7 +105,7 @@ entire mesh e.g.

The load_objs_as_meshes function provides this procedure.

PLY

-

Ply files are flexible in the way they store additional information, pytorch3d +

Ply files are flexible in the way they store additional information. PyTorch3D provides a function just to read the vertices and faces from a ply file. The call

    verts, faces = load_ply(filename)
@@ -116,4 +116,4 @@ are not triangles will be split into triangles. A Meshes object containing a
 single mesh can be created from this data using

    meshes = Meshes(verts=[verts], faces=[faces])
 
-
Last updated by Jeremy Reizenstein
\ No newline at end of file +
Last updated by Jeremy Reizenstein
Why PyTorch3DData loaders
\ No newline at end of file diff --git a/docs/meshes_io/index.html b/docs/meshes_io/index.html index 593b64f1..82e92203 100644 --- a/docs/meshes_io/index.html +++ b/docs/meshes_io/index.html @@ -1,4 +1,4 @@ -meshes_io · PyTorch3D

Meshes and IO

The Meshes object represents a batch of triangulated meshes, and is central to -much of the functionality of pytorch3d. There is no insistence that each mesh in +much of the functionality of PyTorch3D. There is no insistence that each mesh in the batch has the same number of vertices or faces. When available, it can store other data which pertains to the mesh, for example face normals, face areas and textures.

Two common file formats for storing single meshes are ".obj" and ".ply" files, -and pytorch3d has functions for reading these.

+and PyTorch3D has functions for reading these.

OBJ

Obj files have a standard way to store extra information about a mesh. Given an obj file, it can be read with

@@ -105,7 +105,7 @@ entire mesh e.g.

The load_objs_as_meshes function provides this procedure.

PLY

-

Ply files are flexible in the way they store additional information, pytorch3d +

Ply files are flexible in the way they store additional information. PyTorch3D provides a function just to read the vertices and faces from a ply file. The call

    verts, faces = load_ply(filename)
@@ -116,4 +116,4 @@ are not triangles will be split into triangles. A Meshes object containing a
 single mesh can be created from this data using

    meshes = Meshes(verts=[verts], faces=[faces])
 
-
Last updated by Jeremy Reizenstein
\ No newline at end of file +
Last updated by Jeremy Reizenstein
Why PyTorch3DData loaders
\ No newline at end of file diff --git a/docs/renderer.html b/docs/renderer.html index 9ac5945f..dd6d6f43 100644 --- a/docs/renderer.html +++ b/docs/renderer.html @@ -1,4 +1,4 @@ -renderer · PyTorch3D

Why PyTorch3D

Our goal with PyTorch3D is to help accelerate research at the intersection of deep learning and 3D. 3D data is more complex than 2D images and while working on projects such as Mesh R-CNN and C3DPO, we encountered several challenges including 3D data representation, batching, and speed. We have developed many useful operators and abstractions for working on 3D deep learning and want to share this with the community to drive novel research in this area.

In PyTorch3D we have included efficient 3D operators, heterogeneous batching capabilities, and a modular differentiable rendering API, to equip researchers in this field with a much needed toolkit to implement cutting-edge research with complex 3D inputs.

-
Last updated by Patrick Labatut
\ No newline at end of file +
Last updated by Patrick Labatut
Loading from file
\ No newline at end of file diff --git a/docs/why_pytorch3d/index.html b/docs/why_pytorch3d/index.html index 8ab61de1..6bf93047 100644 --- a/docs/why_pytorch3d/index.html +++ b/docs/why_pytorch3d/index.html @@ -1,4 +1,4 @@ -why_pytorch3d · PyTorch3D

Why PyTorch3D

Our goal with PyTorch3D is to help accelerate research at the intersection of deep learning and 3D. 3D data is more complex than 2D images and while working on projects such as Mesh R-CNN and C3DPO, we encountered several challenges including 3D data representation, batching, and speed. We have developed many useful operators and abstractions for working on 3D deep learning and want to share this with the community to drive novel research in this area.

In PyTorch3D we have included efficient 3D operators, heterogeneous batching capabilities, and a modular differentiable rendering API, to equip researchers in this field with a much needed toolkit to implement cutting-edge research with complex 3D inputs.

-
Last updated by Patrick Labatut
\ No newline at end of file +
Last updated by Patrick Labatut
Loading from file
\ No newline at end of file diff --git a/en/help.html b/en/help.html index a58adf36..8ab419de 100644 --- a/en/help.html +++ b/en/help.html @@ -1,4 +1,4 @@ -PyTorch3D · A library for deep learning with 3D data
\ No newline at end of file +
\ No newline at end of file diff --git a/en/users/index.html b/en/users/index.html index 611eefdb..1b72287e 100644 --- a/en/users/index.html +++ b/en/users/index.html @@ -1,4 +1,4 @@ -PyTorch3D · A library for deep learning with 3D data
\ No newline at end of file +
\ No newline at end of file diff --git a/files/bundle_adjustment.ipynb b/files/bundle_adjustment.ipynb index 38d02c9c..a0501074 100644 --- a/files/bundle_adjustment.ipynb +++ b/files/bundle_adjustment.ipynb @@ -41,7 +41,7 @@ "Our optimization seeks to align the estimated (orange) cameras with the ground truth (purple) cameras, by minimizing the discrepancies between pairs of relative cameras. Thus, the solution to the problem should look as follows:\n", "![Solution](https://github.com/facebookresearch/pytorch3d/blob/master/docs/tutorials/data/bundle_adjustment_final.png?raw=1)\n", "\n", - "In practice, the camera extrinsics $g_{ij}$ and $g_i$ are represented using objects from the `SfMPerspectiveCameras` class initialized with the corresponding rotation and translation matrices `R_absolute` and `T_absolute` that define the extrinsic parameters $g = (R, T); R \\in SO(3); T \\in \\mathbb{R}^3$. In order to ensure that `R_absolute` is a valid rotation matrix, we represent it using an exponential map (implemented with `so3_exponential_map`) of the axis-angle representation of the rotation `log_R_absolute`.\n", + "In practice, the camera extrinsics $g_{ij}$ and $g_i$ are represented using objects from the `SfMPerspectiveCameras` class initialized with the corresponding rotation and translation matrices `R_absolute` and `T_absolute` that define the extrinsic parameters $g = (R, T); R \\in SO(3); T \\in \\mathbb{R}^3$. In order to ensure that `R_absolute` is a valid rotation matrix, we represent it using an exponential map (implemented with `so3_exp_map`) of the axis-angle representation of the rotation `log_R_absolute`.\n", "\n", "Note that the solution to this problem could only be recovered up to an unknown global rigid transformation $g_{glob} \\in SE(3)$. Thus, for simplicity, we assume knowledge of the absolute extrinsics of the first camera $g_0$. We set $g_0$ as a trivial camera $g_0 = (I, \\vec{0})$.\n" ] @@ -63,7 +63,7 @@ "id": "WAHR1LMJmP-h" }, "source": [ - "If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell:" + "Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell:" ] }, { @@ -80,19 +80,25 @@ }, "outputs": [], "source": [ - "!pip install torch torchvision\n", "import os\n", "import sys\n", "import torch\n", - "if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):\n", - " !pip install pytorch3d\n", - "else:\n", - " need_pytorch3d=False\n", - " try:\n", - " import pytorch3d\n", - " except ModuleNotFoundError:\n", - " need_pytorch3d=True\n", - " if need_pytorch3d:\n", + "need_pytorch3d=False\n", + "try:\n", + " import pytorch3d\n", + "except ModuleNotFoundError:\n", + " need_pytorch3d=True\n", + "if need_pytorch3d:\n", + " if torch.__version__.startswith(\"1.9\") and sys.platform.startswith(\"linux\"):\n", + " # We try to install PyTorch3D via a released wheel.\n", + " version_str=\"\".join([\n", + " f\"py3{sys.version_info.minor}_cu\",\n", + " torch.version.cuda.replace(\".\",\"\"),\n", + " f\"_pyt{torch.__version__[0:5:2]}\"\n", + " ])\n", + " !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n", + " else:\n", + " # We try to install PyTorch3D from source.\n", " !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n", " !tar xzf 1.10.0.tar.gz\n", " os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n", @@ -116,7 +122,7 @@ "# imports\n", "import torch\n", "from pytorch3d.transforms.so3 import (\n", - " so3_exponential_map,\n", + " so3_exp_map,\n", " so3_relative_angle,\n", ")\n", "from pytorch3d.renderer.cameras import (\n", @@ -322,7 +328,7 @@ "\n", "As mentioned earlier, `log_R_absolute` is the axis angle representation of the rotation part of our absolute cameras. We can obtain the 3x3 rotation matrix `R_absolute` that corresponds to `log_R_absolute` with:\n", "\n", - "`R_absolute = so3_exponential_map(log_R_absolute)`\n" + "`R_absolute = so3_exp_map(log_R_absolute)`\n" ] }, { @@ -372,7 +378,7 @@ " # compute the absolute camera rotations as \n", " # an exponential map of the logarithms (=axis-angles)\n", " # of the absolute rotations\n", - " R_absolute = so3_exponential_map(log_R_absolute * camera_mask)\n", + " R_absolute = so3_exp_map(log_R_absolute * camera_mask)\n", "\n", " # get the current absolute cameras\n", " cameras_absolute = SfMPerspectiveCameras(\n", @@ -381,7 +387,7 @@ " device = device,\n", " )\n", "\n", - " # compute the relative cameras as a compositon of the absolute cameras\n", + " # compute the relative cameras as a composition of the absolute cameras\n", " cameras_relative_composed = \\\n", " get_relative_camera(cameras_absolute, relative_edges)\n", "\n", diff --git a/files/bundle_adjustment.py b/files/bundle_adjustment.py index 836a659f..6ac5fa84 100644 --- a/files/bundle_adjustment.py +++ b/files/bundle_adjustment.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python + # coding: utf-8 # In[ ]: @@ -28,31 +28,37 @@ # Our optimization seeks to align the estimated (orange) cameras with the ground truth (purple) cameras, by minimizing the discrepancies between pairs of relative cameras. Thus, the solution to the problem should look as follows: # ![Solution](https://github.com/facebookresearch/pytorch3d/blob/master/docs/tutorials/data/bundle_adjustment_final.png?raw=1) # -# In practice, the camera extrinsics $g_{ij}$ and $g_i$ are represented using objects from the `SfMPerspectiveCameras` class initialized with the corresponding rotation and translation matrices `R_absolute` and `T_absolute` that define the extrinsic parameters $g = (R, T); R \in SO(3); T \in \mathbb{R}^3$. In order to ensure that `R_absolute` is a valid rotation matrix, we represent it using an exponential map (implemented with `so3_exponential_map`) of the axis-angle representation of the rotation `log_R_absolute`. +# In practice, the camera extrinsics $g_{ij}$ and $g_i$ are represented using objects from the `SfMPerspectiveCameras` class initialized with the corresponding rotation and translation matrices `R_absolute` and `T_absolute` that define the extrinsic parameters $g = (R, T); R \in SO(3); T \in \mathbb{R}^3$. In order to ensure that `R_absolute` is a valid rotation matrix, we represent it using an exponential map (implemented with `so3_exp_map`) of the axis-angle representation of the rotation `log_R_absolute`. # # Note that the solution to this problem could only be recovered up to an unknown global rigid transformation $g_{glob} \in SE(3)$. Thus, for simplicity, we assume knowledge of the absolute extrinsics of the first camera $g_0$. We set $g_0$ as a trivial camera $g_0 = (I, \vec{0})$. # # ## 0. Install and Import Modules -# If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell: +# Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell: # In[ ]: -get_ipython().system('pip install torch torchvision') import os import sys import torch -if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'): - get_ipython().system('pip install pytorch3d') -else: - need_pytorch3d=False - try: - import pytorch3d - except ModuleNotFoundError: - need_pytorch3d=True - if need_pytorch3d: +need_pytorch3d=False +try: + import pytorch3d +except ModuleNotFoundError: + need_pytorch3d=True +if need_pytorch3d: + if torch.__version__.startswith("1.9") and sys.platform.startswith("linux"): + # We try to install PyTorch3D via a released wheel. + version_str="".join([ + f"py3{sys.version_info.minor}_cu", + torch.version.cuda.replace(".",""), + f"_pyt{torch.__version__[0:5:2]}" + ]) + get_ipython().system('pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html') + else: + # We try to install PyTorch3D from source. get_ipython().system('curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz') get_ipython().system('tar xzf 1.10.0.tar.gz') os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0" @@ -65,7 +71,7 @@ else: # imports import torch from pytorch3d.transforms.so3 import ( - so3_exponential_map, + so3_exp_map, so3_relative_angle, ) from pytorch3d.renderer.cameras import ( @@ -197,7 +203,7 @@ def get_relative_camera(cams, edges): # # As mentioned earlier, `log_R_absolute` is the axis angle representation of the rotation part of our absolute cameras. We can obtain the 3x3 rotation matrix `R_absolute` that corresponds to `log_R_absolute` with: # -# `R_absolute = so3_exponential_map(log_R_absolute)` +# `R_absolute = so3_exp_map(log_R_absolute)` # # In[ ]: @@ -236,7 +242,7 @@ for it in range(n_iter): # compute the absolute camera rotations as # an exponential map of the logarithms (=axis-angles) # of the absolute rotations - R_absolute = so3_exponential_map(log_R_absolute * camera_mask) + R_absolute = so3_exp_map(log_R_absolute * camera_mask) # get the current absolute cameras cameras_absolute = SfMPerspectiveCameras( @@ -245,7 +251,7 @@ for it in range(n_iter): device = device, ) - # compute the relative cameras as a compositon of the absolute cameras + # compute the relative cameras as a composition of the absolute cameras cameras_relative_composed = get_relative_camera(cameras_absolute, relative_edges) # compare the composed cameras with the ground truth relative cameras diff --git a/files/camera_position_optimization_with_differentiable_rendering.ipynb b/files/camera_position_optimization_with_differentiable_rendering.ipynb index d84db529..f5f72441 100644 --- a/files/camera_position_optimization_with_differentiable_rendering.ipynb +++ b/files/camera_position_optimization_with_differentiable_rendering.ipynb @@ -50,7 +50,7 @@ "id": "qkX7DiM6rmeM" }, "source": [ - "If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell:" + "Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell:" ] }, { @@ -67,19 +67,25 @@ }, "outputs": [], "source": [ - "!pip install torch torchvision\n", "import os\n", "import sys\n", "import torch\n", - "if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):\n", - " !pip install pytorch3d\n", - "else:\n", - " need_pytorch3d=False\n", - " try:\n", - " import pytorch3d\n", - " except ModuleNotFoundError:\n", - " need_pytorch3d=True\n", - " if need_pytorch3d:\n", + "need_pytorch3d=False\n", + "try:\n", + " import pytorch3d\n", + "except ModuleNotFoundError:\n", + " need_pytorch3d=True\n", + "if need_pytorch3d:\n", + " if torch.__version__.startswith(\"1.9\") and sys.platform.startswith(\"linux\"):\n", + " # We try to install PyTorch3D via a released wheel.\n", + " version_str=\"\".join([\n", + " f\"py3{sys.version_info.minor}_cu\",\n", + " torch.version.cuda.replace(\".\",\"\"),\n", + " f\"_pyt{torch.__version__[0:5:2]}\"\n", + " ])\n", + " !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n", + " else:\n", + " # We try to install PyTorch3D from source.\n", " !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n", " !tar xzf 1.10.0.tar.gz\n", " os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n", @@ -217,9 +223,9 @@ "source": [ "### Create a renderer\n", "\n", - "A **renderer** in PyTorch3D is composed of a **rasterizer** and a **shader** which each have a number of subcomponents such as a **camera** (orthgraphic/perspective). Here we initialize some of these components and use default values for the rest. \n", + "A **renderer** in PyTorch3D is composed of a **rasterizer** and a **shader** which each have a number of subcomponents such as a **camera** (orthographic/perspective). Here we initialize some of these components and use default values for the rest. \n", "\n", - "For optimizing the camera position we will use a renderer which produces a **silhouette** of the object only and does not apply any **lighting** or **shading**. We will also initialize another renderer which applies full **phong shading** and use this for visualizing the outputs. " + "For optimizing the camera position we will use a renderer which produces a **silhouette** of the object only and does not apply any **lighting** or **shading**. We will also initialize another renderer which applies full **Phong shading** and use this for visualizing the outputs. " ] }, { @@ -260,7 +266,7 @@ ")\n", "\n", "\n", - "# We will also create a phong renderer. This is simpler and only needs to render one face per pixel.\n", + "# We will also create a Phong renderer. This is simpler and only needs to render one face per pixel.\n", "raster_settings = RasterizationSettings(\n", " image_size=256, \n", " blur_radius=0.0, \n", @@ -316,15 +322,15 @@ "R, T = look_at_view_transform(distance, elevation, azimuth, device=device)\n", "\n", "# Render the teapot providing the values of R and T. \n", - "silhouete = silhouette_renderer(meshes_world=teapot_mesh, R=R, T=T)\n", + "silhouette = silhouette_renderer(meshes_world=teapot_mesh, R=R, T=T)\n", "image_ref = phong_renderer(meshes_world=teapot_mesh, R=R, T=T)\n", "\n", - "silhouete = silhouete.cpu().numpy()\n", + "silhouette = silhouette.cpu().numpy()\n", "image_ref = image_ref.cpu().numpy()\n", "\n", "plt.figure(figsize=(10, 10))\n", "plt.subplot(1, 2, 1)\n", - "plt.imshow(silhouete.squeeze()[..., 3]) # only plot the alpha channel of the RGBA image\n", + "plt.imshow(silhouette.squeeze()[..., 3]) # only plot the alpha channel of the RGBA image\n", "plt.grid(False)\n", "plt.subplot(1, 2, 2)\n", "plt.imshow(image_ref.squeeze())\n", @@ -371,7 +377,7 @@ " def forward(self):\n", " \n", " # Render the image using the updated camera position. Based on the new position of the \n", - " # camer we calculate the rotation and translation matrices\n", + " # camera we calculate the rotation and translation matrices\n", " R = look_at_rotation(self.camera_position[None, :], device=self.device) # (1, 3, 3)\n", " T = -torch.bmm(R.transpose(1, 2), self.camera_position[None, :, None])[:, :, 0] # (1, 3)\n", " \n", @@ -514,7 +520,6 @@ " plt.figure()\n", " plt.imshow(image[..., :3])\n", " plt.title(\"iter: %d, loss: %0.2f\" % (i, loss.data))\n", - " plt.grid(\"off\")\n", " plt.axis(\"off\")\n", " \n", "writer.close()" diff --git a/files/camera_position_optimization_with_differentiable_rendering.py b/files/camera_position_optimization_with_differentiable_rendering.py index 6bef854a..c52b1c7a 100644 --- a/files/camera_position_optimization_with_differentiable_rendering.py +++ b/files/camera_position_optimization_with_differentiable_rendering.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python + # coding: utf-8 # In[ ]: @@ -22,24 +22,30 @@ # ## 0. Install and import modules -# If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell: +# Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell: # In[ ]: -get_ipython().system('pip install torch torchvision') import os import sys import torch -if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'): - get_ipython().system('pip install pytorch3d') -else: - need_pytorch3d=False - try: - import pytorch3d - except ModuleNotFoundError: - need_pytorch3d=True - if need_pytorch3d: +need_pytorch3d=False +try: + import pytorch3d +except ModuleNotFoundError: + need_pytorch3d=True +if need_pytorch3d: + if torch.__version__.startswith("1.9") and sys.platform.startswith("linux"): + # We try to install PyTorch3D via a released wheel. + version_str="".join([ + f"py3{sys.version_info.minor}_cu", + torch.version.cuda.replace(".",""), + f"_pyt{torch.__version__[0:5:2]}" + ]) + get_ipython().system('pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html') + else: + # We try to install PyTorch3D from source. get_ipython().system('curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz') get_ipython().system('tar xzf 1.10.0.tar.gz') os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0" @@ -121,9 +127,9 @@ teapot_mesh = Meshes( # ### Create a renderer # -# A **renderer** in PyTorch3D is composed of a **rasterizer** and a **shader** which each have a number of subcomponents such as a **camera** (orthgraphic/perspective). Here we initialize some of these components and use default values for the rest. +# A **renderer** in PyTorch3D is composed of a **rasterizer** and a **shader** which each have a number of subcomponents such as a **camera** (orthographic/perspective). Here we initialize some of these components and use default values for the rest. # -# For optimizing the camera position we will use a renderer which produces a **silhouette** of the object only and does not apply any **lighting** or **shading**. We will also initialize another renderer which applies full **phong shading** and use this for visualizing the outputs. +# For optimizing the camera position we will use a renderer which produces a **silhouette** of the object only and does not apply any **lighting** or **shading**. We will also initialize another renderer which applies full **Phong shading** and use this for visualizing the outputs. # In[ ]: @@ -156,7 +162,7 @@ silhouette_renderer = MeshRenderer( ) -# We will also create a phong renderer. This is simpler and only needs to render one face per pixel. +# We will also create a Phong renderer. This is simpler and only needs to render one face per pixel. raster_settings = RasterizationSettings( image_size=256, blur_radius=0.0, @@ -193,15 +199,15 @@ azimuth = 0.0 # No rotation so the camera is positioned on the +Z axis. R, T = look_at_view_transform(distance, elevation, azimuth, device=device) # Render the teapot providing the values of R and T. -silhouete = silhouette_renderer(meshes_world=teapot_mesh, R=R, T=T) +silhouette = silhouette_renderer(meshes_world=teapot_mesh, R=R, T=T) image_ref = phong_renderer(meshes_world=teapot_mesh, R=R, T=T) -silhouete = silhouete.cpu().numpy() +silhouette = silhouette.cpu().numpy() image_ref = image_ref.cpu().numpy() plt.figure(figsize=(10, 10)) plt.subplot(1, 2, 1) -plt.imshow(silhouete.squeeze()[..., 3]) # only plot the alpha channel of the RGBA image +plt.imshow(silhouette.squeeze()[..., 3]) # only plot the alpha channel of the RGBA image plt.grid(False) plt.subplot(1, 2, 2) plt.imshow(image_ref.squeeze()) @@ -233,7 +239,7 @@ class Model(nn.Module): def forward(self): # Render the image using the updated camera position. Based on the new position of the - # camer we calculate the rotation and translation matrices + # camera we calculate the rotation and translation matrices R = look_at_rotation(self.camera_position[None, :], device=self.device) # (1, 3, 3) T = -torch.bmm(R.transpose(1, 2), self.camera_position[None, :, None])[:, :, 0] # (1, 3) @@ -313,7 +319,6 @@ for i in loop: plt.figure() plt.imshow(image[..., :3]) plt.title("iter: %d, loss: %0.2f" % (i, loss.data)) - plt.grid("off") plt.axis("off") writer.close() diff --git a/files/dataloaders_ShapeNetCore_R2N2.ipynb b/files/dataloaders_ShapeNetCore_R2N2.ipynb index 7dc0f4d0..bf6389f4 100644 --- a/files/dataloaders_ShapeNetCore_R2N2.ipynb +++ b/files/dataloaders_ShapeNetCore_R2N2.ipynb @@ -33,7 +33,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell:" + "Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell:" ] }, { @@ -42,19 +42,25 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install torch torchvision\n", "import os\n", "import sys\n", "import torch\n", - "if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):\n", - " !pip install pytorch3d\n", - "else:\n", - " need_pytorch3d=False\n", - " try:\n", - " import pytorch3d\n", - " except ModuleNotFoundError:\n", - " need_pytorch3d=True\n", - " if need_pytorch3d:\n", + "need_pytorch3d=False\n", + "try:\n", + " import pytorch3d\n", + "except ModuleNotFoundError:\n", + " need_pytorch3d=True\n", + "if need_pytorch3d:\n", + " if torch.__version__.startswith(\"1.9\") and sys.platform.startswith(\"linux\"):\n", + " # We try to install PyTorch3D via a released wheel.\n", + " version_str=\"\".join([\n", + " f\"py3{sys.version_info.minor}_cu\",\n", + " torch.version.cuda.replace(\".\",\"\"),\n", + " f\"_pyt{torch.__version__[0:5:2]}\"\n", + " ])\n", + " !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n", + " else:\n", + " # We try to install PyTorch3D from source.\n", " !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n", " !tar xzf 1.10.0.tar.gz\n", " os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n", @@ -184,7 +190,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We can retrieve a model by indexing into the loaded dataset. For both ShapeNetCore and R2N2, we can examine the category this model belongs to (in the form of a synset id, equivalend to wnid described in ImageNet's API: http://image-net.org/download-API), its model id, and its vertices and faces." + "We can retrieve a model by indexing into the loaded dataset. For both ShapeNetCore and R2N2, we can examine the category this model belongs to (in the form of a synset id, equivalent to wnid described in ImageNet's API: http://image-net.org/download-API), its model id, and its vertices and faces." ] }, { @@ -248,11 +254,11 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Training deep learning models, usually requires passing in batches of inputs. The `torch.utils.data.DataLoader` from Pytorch helps us do this. PyTorch3D provides a function `collate_batched_meshes` to group the input meshes into a single `Meshes` object which represents the batch. The `Meshes` datastructure can then be used directly by other PyTorch3D ops which might be part of the deep learning model (e.g. `graph_conv`).\n", + "Training deep learning models, usually requires passing in batches of inputs. The `torch.utils.data.DataLoader` from PyTorch helps us do this. PyTorch3D provides a function `collate_batched_meshes` to group the input meshes into a single `Meshes` object which represents the batch. The `Meshes` datastructure can then be used directly by other PyTorch3D ops which might be part of the deep learning model (e.g. `graph_conv`).\n", "\n", "For R2N2, if all the models in the batch have the same number of views, the views, rotation matrices, translation matrices, intrinsic matrices and voxels will also be stacked into batched tensors.\n", "\n", - "**NOTE**: All models in the `val` split of R2N2 have 24 views, but there are 8 models that split their 24 views between `train` and `test` splits, in which case `collate_batched_meshes` will only be able to join the matrices, views and voxels as lists. However, this can be avoided by laoding only one view of each model by setting `return_all_views = False`." + "**NOTE**: All models in the `val` split of R2N2 have 24 views, but there are 8 models that split their 24 views between `train` and `test` splits, in which case `collate_batched_meshes` will only be able to join the matrices, views and voxels as lists. However, this can be avoided by loading only one view of each model by setting `return_all_views = False`." ] }, { @@ -289,7 +295,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 3. Render ShapeNetCore models with PyTorch3D's differntiable renderer" + "## 3. Render ShapeNetCore models with PyTorch3D's differentiable renderer" ] }, { @@ -444,7 +450,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Next, we will visualize PyTorch3d's renderings:" + "Next, we will visualize PyTorch3D's renderings:" ] }, { diff --git a/files/dataloaders_ShapeNetCore_R2N2.py b/files/dataloaders_ShapeNetCore_R2N2.py index f875130a..0ee336c1 100644 --- a/files/dataloaders_ShapeNetCore_R2N2.py +++ b/files/dataloaders_ShapeNetCore_R2N2.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python + # coding: utf-8 # In[ ]: @@ -17,24 +17,30 @@ # ## 0. Install and import modules -# If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell: +# Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell: # In[ ]: -get_ipython().system('pip install torch torchvision') import os import sys import torch -if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'): - get_ipython().system('pip install pytorch3d') -else: - need_pytorch3d=False - try: - import pytorch3d - except ModuleNotFoundError: - need_pytorch3d=True - if need_pytorch3d: +need_pytorch3d=False +try: + import pytorch3d +except ModuleNotFoundError: + need_pytorch3d=True +if need_pytorch3d: + if torch.__version__.startswith("1.9") and sys.platform.startswith("linux"): + # We try to install PyTorch3D via a released wheel. + version_str="".join([ + f"py3{sys.version_info.minor}_cu", + torch.version.cuda.replace(".",""), + f"_pyt{torch.__version__[0:5:2]}" + ]) + get_ipython().system('pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html') + else: + # We try to install PyTorch3D from source. get_ipython().system('curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz') get_ipython().system('tar xzf 1.10.0.tar.gz') os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0" @@ -120,7 +126,7 @@ SPLITS_PATH = "None" r2n2_dataset = R2N2("train", SHAPENET_PATH, R2N2_PATH, SPLITS_PATH, return_voxels=True) -# We can retrieve a model by indexing into the loaded dataset. For both ShapeNetCore and R2N2, we can examine the category this model belongs to (in the form of a synset id, equivalend to wnid described in ImageNet's API: http://image-net.org/download-API), its model id, and its vertices and faces. +# We can retrieve a model by indexing into the loaded dataset. For both ShapeNetCore and R2N2, we can examine the category this model belongs to (in the form of a synset id, equivalent to wnid described in ImageNet's API: http://image-net.org/download-API), its model id, and its vertices and faces. # In[ ]: @@ -155,11 +161,11 @@ image_grid(r2n2_renderings.numpy(), rows=1, cols=2, rgb=True) # ## 2. Use the datasets with `torch.utils.data.DataLoader` -# Training deep learning models, usually requires passing in batches of inputs. The `torch.utils.data.DataLoader` from Pytorch helps us do this. PyTorch3D provides a function `collate_batched_meshes` to group the input meshes into a single `Meshes` object which represents the batch. The `Meshes` datastructure can then be used directly by other PyTorch3D ops which might be part of the deep learning model (e.g. `graph_conv`). +# Training deep learning models, usually requires passing in batches of inputs. The `torch.utils.data.DataLoader` from PyTorch helps us do this. PyTorch3D provides a function `collate_batched_meshes` to group the input meshes into a single `Meshes` object which represents the batch. The `Meshes` datastructure can then be used directly by other PyTorch3D ops which might be part of the deep learning model (e.g. `graph_conv`). # # For R2N2, if all the models in the batch have the same number of views, the views, rotation matrices, translation matrices, intrinsic matrices and voxels will also be stacked into batched tensors. # -# **NOTE**: All models in the `val` split of R2N2 have 24 views, but there are 8 models that split their 24 views between `train` and `test` splits, in which case `collate_batched_meshes` will only be able to join the matrices, views and voxels as lists. However, this can be avoided by laoding only one view of each model by setting `return_all_views = False`. +# **NOTE**: All models in the `val` split of R2N2 have 24 views, but there are 8 models that split their 24 views between `train` and `test` splits, in which case `collate_batched_meshes` will only be able to join the matrices, views and voxels as lists. However, this can be avoided by loading only one view of each model by setting `return_all_views = False`. # In[ ]: @@ -180,7 +186,7 @@ batch_renderings = r2n2_batch["images"] # (N, V, H, W, 3), and in this case V is image_grid(batch_renderings.squeeze().numpy(), rows=3, cols=4, rgb=True) -# ## 3. Render ShapeNetCore models with PyTorch3D's differntiable renderer +# ## 3. Render ShapeNetCore models with PyTorch3D's differentiable renderer # Both `ShapeNetCore` and `R2N2` dataloaders have customized `render` functions that support rendering models by specifying their model ids, categories or indices using PyTorch3D's differentiable renderer implementation. @@ -272,7 +278,7 @@ original_rendering = r2n2_dataset[6,[1,2]]["images"] image_grid(original_rendering.numpy(), rows=1, cols=2, rgb=True) -# Next, we will visualize PyTorch3d's renderings: +# Next, we will visualize PyTorch3D's renderings: # In[ ]: diff --git a/files/deform_source_mesh_to_target_mesh.ipynb b/files/deform_source_mesh_to_target_mesh.ipynb index cf676ee6..ecd884b7 100644 --- a/files/deform_source_mesh_to_target_mesh.ipynb +++ b/files/deform_source_mesh_to_target_mesh.ipynb @@ -68,7 +68,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell:" + "Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell:" ] }, { @@ -81,19 +81,25 @@ }, "outputs": [], "source": [ - "!pip install torch torchvision\n", "import os\n", "import sys\n", "import torch\n", - "if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):\n", - " !pip install pytorch3d\n", - "else:\n", - " need_pytorch3d=False\n", - " try:\n", - " import pytorch3d\n", - " except ModuleNotFoundError:\n", - " need_pytorch3d=True\n", - " if need_pytorch3d:\n", + "need_pytorch3d=False\n", + "try:\n", + " import pytorch3d\n", + "except ModuleNotFoundError:\n", + " need_pytorch3d=True\n", + "if need_pytorch3d:\n", + " if torch.__version__.startswith(\"1.9\") and sys.platform.startswith(\"linux\"):\n", + " # We try to install PyTorch3D via a released wheel.\n", + " version_str=\"\".join([\n", + " f\"py3{sys.version_info.minor}_cu\",\n", + " torch.version.cuda.replace(\".\",\"\"),\n", + " f\"_pyt{torch.__version__[0:5:2]}\"\n", + " ])\n", + " !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n", + " else:\n", + " # We try to install PyTorch3D from source.\n", " !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n", " !tar xzf 1.10.0.tar.gz\n", " os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n", @@ -400,10 +406,10 @@ " loop.set_description('total_loss = %.6f' % loss)\n", " \n", " # Save the losses for plotting\n", - " chamfer_losses.append(loss_chamfer)\n", - " edge_losses.append(loss_edge)\n", - " normal_losses.append(loss_normal)\n", - " laplacian_losses.append(loss_laplacian)\n", + " chamfer_losses.append(float(loss_chamfer.detach().cpu()))\n", + " edge_losses.append(float(loss_edge.detach().cpu()))\n", + " normal_losses.append(float(loss_normal.detach().cpu()))\n", + " laplacian_losses.append(float(loss_laplacian.detach().cpu()))\n", " \n", " # Plot mesh\n", " if i % plot_period == 0:\n", diff --git a/files/deform_source_mesh_to_target_mesh.py b/files/deform_source_mesh_to_target_mesh.py index 7752ebcb..8afb908e 100644 --- a/files/deform_source_mesh_to_target_mesh.py +++ b/files/deform_source_mesh_to_target_mesh.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python + # coding: utf-8 # In[ ]: @@ -34,24 +34,30 @@ # ## 0. Install and Import modules -# If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell: +# Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell: # In[ ]: -get_ipython().system('pip install torch torchvision') import os import sys import torch -if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'): - get_ipython().system('pip install pytorch3d') -else: - need_pytorch3d=False - try: - import pytorch3d - except ModuleNotFoundError: - need_pytorch3d=True - if need_pytorch3d: +need_pytorch3d=False +try: + import pytorch3d +except ModuleNotFoundError: + need_pytorch3d=True +if need_pytorch3d: + if torch.__version__.startswith("1.9") and sys.platform.startswith("linux"): + # We try to install PyTorch3D via a released wheel. + version_str="".join([ + f"py3{sys.version_info.minor}_cu", + torch.version.cuda.replace(".",""), + f"_pyt{torch.__version__[0:5:2]}" + ]) + get_ipython().system('pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html') + else: + # We try to install PyTorch3D from source. get_ipython().system('curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz') get_ipython().system('tar xzf 1.10.0.tar.gz') os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0" @@ -237,10 +243,10 @@ for i in loop: loop.set_description('total_loss = %.6f' % loss) # Save the losses for plotting - chamfer_losses.append(loss_chamfer) - edge_losses.append(loss_edge) - normal_losses.append(loss_normal) - laplacian_losses.append(loss_laplacian) + chamfer_losses.append(float(loss_chamfer.detach().cpu())) + edge_losses.append(float(loss_edge.detach().cpu())) + normal_losses.append(float(loss_normal.detach().cpu())) + laplacian_losses.append(float(loss_laplacian.detach().cpu())) # Plot mesh if i % plot_period == 0: diff --git a/files/fit_textured_mesh.ipynb b/files/fit_textured_mesh.ipynb index fe045b2f..736a832a 100644 --- a/files/fit_textured_mesh.ipynb +++ b/files/fit_textured_mesh.ipynb @@ -46,7 +46,7 @@ "id": "okLalbR_g7NS" }, "source": [ - "If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell:" + "Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell:" ] }, { @@ -59,19 +59,25 @@ }, "outputs": [], "source": [ - "!pip install torch torchvision\n", "import os\n", "import sys\n", "import torch\n", - "if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):\n", - " !pip install pytorch3d\n", - "else:\n", - " need_pytorch3d=False\n", - " try:\n", - " import pytorch3d\n", - " except ModuleNotFoundError:\n", - " need_pytorch3d=True\n", - " if need_pytorch3d:\n", + "need_pytorch3d=False\n", + "try:\n", + " import pytorch3d\n", + "except ModuleNotFoundError:\n", + " need_pytorch3d=True\n", + "if need_pytorch3d:\n", + " if torch.__version__.startswith(\"1.9\") and sys.platform.startswith(\"linux\"):\n", + " # We try to install PyTorch3D via a released wheel.\n", + " version_str=\"\".join([\n", + " f\"py3{sys.version_info.minor}_cu\",\n", + " torch.version.cuda.replace(\".\",\"\"),\n", + " f\"_pyt{torch.__version__[0:5:2]}\"\n", + " ])\n", + " !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n", + " else:\n", + " # We try to install PyTorch3D from source.\n", " !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n", " !tar xzf 1.10.0.tar.gz\n", " os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n", @@ -91,7 +97,6 @@ "import os\n", "import torch\n", "import matplotlib.pyplot as plt\n", - "from skimage.io import imread\n", "\n", "from pytorch3d.utils import ico_sphere\n", "import numpy as np\n", @@ -187,11 +192,11 @@ "source": [ "### 1. Load a mesh and texture file\n", "\n", - "Load an `.obj` file and it's associated `.mtl` file and create a **Textures** and **Meshes** object. \n", + "Load an `.obj` file and its associated `.mtl` file and create a **Textures** and **Meshes** object. \n", "\n", "**Meshes** is a unique datastructure provided in PyTorch3D for working with batches of meshes of different sizes. \n", "\n", - "**TexturesVertex** is an auxillary datastructure for storing vertex rgb texture information about meshes. \n", + "**TexturesVertex** is an auxiliary datastructure for storing vertex rgb texture information about meshes. \n", "\n", "**Meshes** has several class methods which are used throughout the rendering pipeline." ] @@ -255,7 +260,7 @@ "N = verts.shape[0]\n", "center = verts.mean(0)\n", "scale = max((verts - center).abs().max(0)[0])\n", - "mesh.offset_verts_(-center.expand(N, 3))\n", + "mesh.offset_verts_(-center)\n", "mesh.scale_verts_((1.0 / float(scale)));" ] }, @@ -309,7 +314,7 @@ "# purposes only we will set faces_per_pixel=1 and blur_radius=0.0. Refer to \n", "# rasterize_meshes.py for explanations of these parameters. We also leave \n", "# bin_size and max_faces_per_bin to their default values of None, which sets \n", - "# their values using huristics and ensures that the faster coarse-to-fine \n", + "# their values using heuristics and ensures that the faster coarse-to-fine \n", "# rasterization method is used. Refer to docs/notes/renderer.md for an \n", "# explanation of the difference between naive and coarse-to-fine rasterization. \n", "raster_settings = RasterizationSettings(\n", @@ -318,8 +323,8 @@ " faces_per_pixel=1, \n", ")\n", "\n", - "# Create a phong renderer by composing a rasterizer and a shader. The textured \n", - "# phong shader will interpolate the texture uv coordinates for each vertex, \n", + "# Create a Phong renderer by composing a rasterizer and a shader. The textured \n", + "# Phong shader will interpolate the texture uv coordinates for each vertex, \n", "# sample from a texture image and apply the Phong lighting model\n", "renderer = MeshRenderer(\n", " rasterizer=MeshRasterizer(\n", @@ -380,7 +385,7 @@ "id": "gOb4rYx65E8z" }, "source": [ - "Later in this tutorial, we will fit a mesh to the rendered RGB images, as well as to just images of just the cow silhouette. For the latter case, we will render a dataset of silhouette images. Most shaders in PyTorch3D will output an alpha channel along with the RGB image as a 4th channel in an RGBA image. The alpha channel encodes the probability that each pixel belongs to the foreground of the object. We contruct a soft silhouette shader to render this alpha channel." + "Later in this tutorial, we will fit a mesh to the rendered RGB images, as well as to just images of just the cow silhouette. For the latter case, we will render a dataset of silhouette images. Most shaders in PyTorch3D will output an alpha channel along with the RGB image as a 4th channel in an RGBA image. The alpha channel encodes the probability that each pixel belongs to the foreground of the object. We construct a soft silhouette shader to render this alpha channel." ] }, { @@ -449,7 +454,8 @@ " target_image=target_rgb[1], title='', \n", " silhouette=False):\n", " inds = 3 if silhouette else range(3)\n", - " predicted_images = renderer(predicted_mesh)\n", + " with torch.no_grad():\n", + " predicted_images = renderer(predicted_mesh)\n", " plt.figure(figsize=(20, 10))\n", " plt.subplot(1, 2, 1)\n", " plt.imshow(predicted_images[0, ..., inds].cpu().detach().numpy())\n", @@ -457,7 +463,6 @@ " plt.subplot(1, 2, 2)\n", " plt.imshow(target_image.cpu().detach().numpy())\n", " plt.title(title)\n", - " plt.grid(\"off\")\n", " plt.axis(\"off\")\n", "\n", "# Plot losses as a function of optimization iteration\n", @@ -601,7 +606,7 @@ "id": "QLc9zK8lEqFS" }, "source": [ - "We write an optimization loop to iteratively refine our predicted mesh from the sphere mesh into a mesh that matches the sillhouettes of the target images:" + "We write an optimization loop to iteratively refine our predicted mesh from the sphere mesh into a mesh that matches the silhouettes of the target images:" ] }, { @@ -640,7 +645,8 @@ " sum_loss = torch.tensor(0.0, device=device)\n", " for k, l in loss.items():\n", " sum_loss += l * losses[k][\"weight\"]\n", - " losses[k][\"values\"].append(l)\n", + " losses[k][\"values\"].append(float(l.detach().cpu()))\n", + "\n", " \n", " # Print the losses\n", " loop.set_description(\"total_loss = %.6f\" % sum_loss)\n", @@ -824,7 +830,7 @@ " sum_loss = torch.tensor(0.0, device=device)\n", " for k, l in loss.items():\n", " sum_loss += l * losses[k][\"weight\"]\n", - " losses[k][\"values\"].append(l)\n", + " losses[k][\"values\"].append(float(l.detach().cpu()))\n", " \n", " # Print the losses\n", " loop.set_description(\"total_loss = %.6f\" % sum_loss)\n", diff --git a/files/fit_textured_mesh.py b/files/fit_textured_mesh.py index f28c219e..618b581c 100644 --- a/files/fit_textured_mesh.py +++ b/files/fit_textured_mesh.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python + # coding: utf-8 # In[ ]: @@ -17,24 +17,30 @@ # ## 0. Install and Import modules -# If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell: +# Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell: # In[ ]: -get_ipython().system('pip install torch torchvision') import os import sys import torch -if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'): - get_ipython().system('pip install pytorch3d') -else: - need_pytorch3d=False - try: - import pytorch3d - except ModuleNotFoundError: - need_pytorch3d=True - if need_pytorch3d: +need_pytorch3d=False +try: + import pytorch3d +except ModuleNotFoundError: + need_pytorch3d=True +if need_pytorch3d: + if torch.__version__.startswith("1.9") and sys.platform.startswith("linux"): + # We try to install PyTorch3D via a released wheel. + version_str="".join([ + f"py3{sys.version_info.minor}_cu", + torch.version.cuda.replace(".",""), + f"_pyt{torch.__version__[0:5:2]}" + ]) + get_ipython().system('pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html') + else: + # We try to install PyTorch3D from source. get_ipython().system('curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz') get_ipython().system('tar xzf 1.10.0.tar.gz') os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0" @@ -47,7 +53,6 @@ else: import os import torch import matplotlib.pyplot as plt -from skimage.io import imread from pytorch3d.utils import ico_sphere import numpy as np @@ -105,11 +110,11 @@ from plot_image_grid import image_grid # ### 1. Load a mesh and texture file # -# Load an `.obj` file and it's associated `.mtl` file and create a **Textures** and **Meshes** object. +# Load an `.obj` file and its associated `.mtl` file and create a **Textures** and **Meshes** object. # # **Meshes** is a unique datastructure provided in PyTorch3D for working with batches of meshes of different sizes. # -# **TexturesVertex** is an auxillary datastructure for storing vertex rgb texture information about meshes. +# **TexturesVertex** is an auxiliary datastructure for storing vertex rgb texture information about meshes. # # **Meshes** has several class methods which are used throughout the rendering pipeline. @@ -150,7 +155,7 @@ verts = mesh.verts_packed() N = verts.shape[0] center = verts.mean(0) scale = max((verts - center).abs().max(0)[0]) -mesh.offset_verts_(-center.expand(N, 3)) +mesh.offset_verts_(-center) mesh.scale_verts_((1.0 / float(scale))); @@ -190,7 +195,7 @@ camera = OpenGLPerspectiveCameras(device=device, R=R[None, 1, ...], # purposes only we will set faces_per_pixel=1 and blur_radius=0.0. Refer to # rasterize_meshes.py for explanations of these parameters. We also leave # bin_size and max_faces_per_bin to their default values of None, which sets -# their values using huristics and ensures that the faster coarse-to-fine +# their values using heuristics and ensures that the faster coarse-to-fine # rasterization method is used. Refer to docs/notes/renderer.md for an # explanation of the difference between naive and coarse-to-fine rasterization. raster_settings = RasterizationSettings( @@ -199,8 +204,8 @@ raster_settings = RasterizationSettings( faces_per_pixel=1, ) -# Create a phong renderer by composing a rasterizer and a shader. The textured -# phong shader will interpolate the texture uv coordinates for each vertex, +# Create a Phong renderer by composing a rasterizer and a shader. The textured +# Phong shader will interpolate the texture uv coordinates for each vertex, # sample from a texture image and apply the Phong lighting model renderer = MeshRenderer( rasterizer=MeshRasterizer( @@ -239,7 +244,7 @@ image_grid(target_images.cpu().numpy(), rows=4, cols=5, rgb=True) plt.show() -# Later in this tutorial, we will fit a mesh to the rendered RGB images, as well as to just images of just the cow silhouette. For the latter case, we will render a dataset of silhouette images. Most shaders in PyTorch3D will output an alpha channel along with the RGB image as a 4th channel in an RGBA image. The alpha channel encodes the probability that each pixel belongs to the foreground of the object. We contruct a soft silhouette shader to render this alpha channel. +# Later in this tutorial, we will fit a mesh to the rendered RGB images, as well as to just images of just the cow silhouette. For the latter case, we will render a dataset of silhouette images. Most shaders in PyTorch3D will output an alpha channel along with the RGB image as a 4th channel in an RGBA image. The alpha channel encodes the probability that each pixel belongs to the foreground of the object. We construct a soft silhouette shader to render this alpha channel. # In[ ]: @@ -285,7 +290,8 @@ def visualize_prediction(predicted_mesh, renderer=renderer_silhouette, target_image=target_rgb[1], title='', silhouette=False): inds = 3 if silhouette else range(3) - predicted_images = renderer(predicted_mesh) + with torch.no_grad(): + predicted_images = renderer(predicted_mesh) plt.figure(figsize=(20, 10)) plt.subplot(1, 2, 1) plt.imshow(predicted_images[0, ..., inds].cpu().detach().numpy()) @@ -293,7 +299,6 @@ def visualize_prediction(predicted_mesh, renderer=renderer_silhouette, plt.subplot(1, 2, 2) plt.imshow(target_image.cpu().detach().numpy()) plt.title(title) - plt.grid("off") plt.axis("off") # Plot losses as a function of optimization iteration @@ -385,7 +390,7 @@ deform_verts = torch.full(verts_shape, 0.0, device=device, requires_grad=True) optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9) -# We write an optimization loop to iteratively refine our predicted mesh from the sphere mesh into a mesh that matches the sillhouettes of the target images: +# We write an optimization loop to iteratively refine our predicted mesh from the sphere mesh into a mesh that matches the silhouettes of the target images: # In[ ]: @@ -416,7 +421,8 @@ for i in loop: sum_loss = torch.tensor(0.0, device=device) for k, l in loss.items(): sum_loss += l * losses[k]["weight"] - losses[k]["values"].append(l) + losses[k]["values"].append(float(l.detach().cpu())) + # Print the losses loop.set_description("total_loss = %.6f" % sum_loss) @@ -547,7 +553,7 @@ for i in loop: sum_loss = torch.tensor(0.0, device=device) for k, l in loss.items(): sum_loss += l * losses[k]["weight"] - losses[k]["values"].append(l) + losses[k]["values"].append(float(l.detach().cpu())) # Print the losses loop.set_description("total_loss = %.6f" % sum_loss) diff --git a/files/render_colored_points.ipynb b/files/render_colored_points.ipynb index 6aa206a6..f191c7df 100644 --- a/files/render_colored_points.ipynb +++ b/files/render_colored_points.ipynb @@ -32,7 +32,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell:" + "Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell:" ] }, { @@ -41,19 +41,25 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install torch torchvision\n", "import os\n", "import sys\n", "import torch\n", - "if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):\n", - " !pip install pytorch3d\n", - "else:\n", - " need_pytorch3d=False\n", - " try:\n", - " import pytorch3d\n", - " except ModuleNotFoundError:\n", - " need_pytorch3d=True\n", - " if need_pytorch3d:\n", + "need_pytorch3d=False\n", + "try:\n", + " import pytorch3d\n", + "except ModuleNotFoundError:\n", + " need_pytorch3d=True\n", + "if need_pytorch3d:\n", + " if torch.__version__.startswith(\"1.9\") and sys.platform.startswith(\"linux\"):\n", + " # We try to install PyTorch3D via a released wheel.\n", + " version_str=\"\".join([\n", + " f\"py3{sys.version_info.minor}_cu\",\n", + " torch.version.cuda.replace(\".\",\"\"),\n", + " f\"_pyt{torch.__version__[0:5:2]}\"\n", + " ])\n", + " !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n", + " else:\n", + " # We try to install PyTorch3D from source.\n", " !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n", " !tar xzf 1.10.0.tar.gz\n", " os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n", @@ -70,7 +76,6 @@ "import torch\n", "import torch.nn.functional as F\n", "import matplotlib.pyplot as plt\n", - "from skimage.io import imread\n", "\n", "# Util function for loading point clouds|\n", "import numpy as np\n", @@ -151,7 +156,7 @@ "source": [ "## Create a renderer\n", "\n", - "A renderer in PyTorch3D is composed of a **rasterizer** and a **shader** which each have a number of subcomponents such as a **camera** (orthgraphic/perspective). Here we initialize some of these components and use default values for the rest.\n", + "A renderer in PyTorch3D is composed of a **rasterizer** and a **shader** which each have a number of subcomponents such as a **camera** (orthographic/perspective). Here we initialize some of these components and use default values for the rest.\n", "\n", "In this example we will first create a **renderer** which uses an **orthographic camera**, and applies **alpha compositing**. Then we learn how to vary different components using the modular API. \n", "\n", @@ -196,7 +201,6 @@ "images = renderer(point_cloud)\n", "plt.figure(figsize=(10, 10))\n", "plt.imshow(images[0, ..., :3].cpu().numpy())\n", - "plt.grid(\"off\")\n", "plt.axis(\"off\");" ] }, @@ -223,7 +227,6 @@ "\n", "plt.figure(figsize=(10, 10))\n", "plt.imshow(images[0, ..., :3].cpu().numpy())\n", - "plt.grid(\"off\")\n", "plt.axis(\"off\");" ] }, @@ -271,7 +274,6 @@ "images = renderer(point_cloud)\n", "plt.figure(figsize=(10, 10))\n", "plt.imshow(images[0, ..., :3].cpu().numpy())\n", - "plt.grid(\"off\")\n", "plt.axis(\"off\");" ] }, @@ -297,7 +299,6 @@ "images = renderer(point_cloud)\n", "plt.figure(figsize=(10, 10))\n", "plt.imshow(images[0, ..., :3].cpu().numpy())\n", - "plt.grid(\"off\")\n", "plt.axis(\"off\");" ] }, @@ -330,7 +331,6 @@ " bg_col=torch.tensor([0.0, 1.0, 0.0, 1.0], dtype=torch.float32, device=device))\n", "plt.figure(figsize=(10, 10))\n", "plt.imshow(images[0, ..., :3].cpu().numpy())\n", - "plt.grid(\"off\")\n", "plt.axis(\"off\");" ] }, diff --git a/files/render_colored_points.py b/files/render_colored_points.py index 0c54351d..9370f804 100644 --- a/files/render_colored_points.py +++ b/files/render_colored_points.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python + # coding: utf-8 # In[ ]: @@ -16,24 +16,30 @@ # ## Import modules -# If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell: +# Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell: # In[ ]: -get_ipython().system('pip install torch torchvision') import os import sys import torch -if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'): - get_ipython().system('pip install pytorch3d') -else: - need_pytorch3d=False - try: - import pytorch3d - except ModuleNotFoundError: - need_pytorch3d=True - if need_pytorch3d: +need_pytorch3d=False +try: + import pytorch3d +except ModuleNotFoundError: + need_pytorch3d=True +if need_pytorch3d: + if torch.__version__.startswith("1.9") and sys.platform.startswith("linux"): + # We try to install PyTorch3D via a released wheel. + version_str="".join([ + f"py3{sys.version_info.minor}_cu", + torch.version.cuda.replace(".",""), + f"_pyt{torch.__version__[0:5:2]}" + ]) + get_ipython().system('pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html') + else: + # We try to install PyTorch3D from source. get_ipython().system('curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz') get_ipython().system('tar xzf 1.10.0.tar.gz') os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0" @@ -47,7 +53,6 @@ import os import torch import torch.nn.functional as F import matplotlib.pyplot as plt -from skimage.io import imread # Util function for loading point clouds| import numpy as np @@ -108,7 +113,7 @@ point_cloud = Pointclouds(points=[verts], features=[rgb]) # ## Create a renderer # -# A renderer in PyTorch3D is composed of a **rasterizer** and a **shader** which each have a number of subcomponents such as a **camera** (orthgraphic/perspective). Here we initialize some of these components and use default values for the rest. +# A renderer in PyTorch3D is composed of a **rasterizer** and a **shader** which each have a number of subcomponents such as a **camera** (orthographic/perspective). Here we initialize some of these components and use default values for the rest. # # In this example we will first create a **renderer** which uses an **orthographic camera**, and applies **alpha compositing**. Then we learn how to vary different components using the modular API. # @@ -146,7 +151,6 @@ renderer = PointsRenderer( images = renderer(point_cloud) plt.figure(figsize=(10, 10)) plt.imshow(images[0, ..., :3].cpu().numpy()) -plt.grid("off") plt.axis("off"); @@ -165,7 +169,6 @@ images = renderer(point_cloud) plt.figure(figsize=(10, 10)) plt.imshow(images[0, ..., :3].cpu().numpy()) -plt.grid("off") plt.axis("off"); @@ -202,7 +205,6 @@ renderer = PointsRenderer( images = renderer(point_cloud) plt.figure(figsize=(10, 10)) plt.imshow(images[0, ..., :3].cpu().numpy()) -plt.grid("off") plt.axis("off"); @@ -220,7 +222,6 @@ renderer = PointsRenderer( images = renderer(point_cloud) plt.figure(figsize=(10, 10)) plt.imshow(images[0, ..., :3].cpu().numpy()) -plt.grid("off") plt.axis("off"); @@ -245,7 +246,6 @@ images = renderer(point_cloud, gamma=(1e-4,), bg_col=torch.tensor([0.0, 1.0, 0.0, 1.0], dtype=torch.float32, device=device)) plt.figure(figsize=(10, 10)) plt.imshow(images[0, ..., :3].cpu().numpy()) -plt.grid("off") plt.axis("off"); diff --git a/files/render_densepose.ipynb b/files/render_densepose.ipynb index 264499d3..068694d3 100644 --- a/files/render_densepose.ipynb +++ b/files/render_densepose.ipynb @@ -39,7 +39,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "If torch, torchvision and PyTorch3D are not installed, run the following cell:" + "Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell:" ] }, { @@ -48,19 +48,25 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install torch torchvision\n", "import os\n", "import sys\n", "import torch\n", - "if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):\n", - " !pip install pytorch3d\n", - "else:\n", - " need_pytorch3d=False\n", - " try:\n", - " import pytorch3d\n", - " except ModuleNotFoundError:\n", - " need_pytorch3d=True\n", - " if need_pytorch3d:\n", + "need_pytorch3d=False\n", + "try:\n", + " import pytorch3d\n", + "except ModuleNotFoundError:\n", + " need_pytorch3d=True\n", + "if need_pytorch3d:\n", + " if torch.__version__.startswith(\"1.9\") and sys.platform.startswith(\"linux\"):\n", + " # We try to install PyTorch3D via a released wheel.\n", + " version_str=\"\".join([\n", + " f\"py3{sys.version_info.minor}_cu\",\n", + " torch.version.cuda.replace(\".\",\"\"),\n", + " f\"_pyt{torch.__version__[0:5:2]}\"\n", + " ])\n", + " !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n", + " else:\n", + " # We try to install PyTorch3D from source.\n", " !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n", " !tar xzf 1.10.0.tar.gz\n", " os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n", @@ -86,12 +92,11 @@ "import os\n", "import torch\n", "import matplotlib.pyplot as plt\n", - "from skimage.io import imread\n", "import numpy as np\n", "\n", "# libraries for reading data from files\n", "from scipy.io import loadmat\n", - "from pytorch3d.io.utils import _read_image\n", + "from PIL import Image\n", "import pickle\n", "\n", "# Data structures and functions for rendering\n", @@ -178,13 +183,15 @@ " data = pickle.load(f, encoding='latin1') \n", " v_template = torch.Tensor(data['v_template']).to(device) # (6890, 3)\n", "ALP_UV = loadmat(data_filename)\n", - "tex = torch.from_numpy(_read_image(file_name=tex_filename, format='RGB') / 255. ).unsqueeze(0).to(device)\n", + "with Image.open(tex_filename) as image:\n", + " np_image = np.asarray(image.convert(\"RGB\")).astype(np.float32)\n", + "tex = torch.from_numpy(np_image / 255.)[None].to(device)\n", "\n", - "verts = torch.from_numpy((ALP_UV[\"All_vertices\"]).astype(int)).squeeze().to(device) # (7829, 1)\n", + "verts = torch.from_numpy((ALP_UV[\"All_vertices\"]).astype(int)).squeeze().to(device) # (7829,)\n", "U = torch.Tensor(ALP_UV['All_U_norm']).to(device) # (7829, 1)\n", "V = torch.Tensor(ALP_UV['All_V_norm']).to(device) # (7829, 1)\n", "faces = torch.from_numpy((ALP_UV['All_Faces'] - 1).astype(int)).to(device) # (13774, 3)\n", - "face_indices = torch.Tensor(ALP_UV['All_FaceIndices']).squeeze()" + "face_indices = torch.Tensor(ALP_UV['All_FaceIndices']).squeeze() # (13774,)" ] }, { @@ -196,7 +203,6 @@ "# Display the texture image\n", "plt.figure(figsize=(10, 10))\n", "plt.imshow(tex.squeeze(0).cpu())\n", - "plt.grid(\"off\");\n", "plt.axis(\"off\");" ] }, @@ -222,6 +228,9 @@ " part = rows * i + j + 1 # parts are 1-indexed in face_indices\n", " offset_per_part[part] = (u, v)\n", "\n", + "U_norm = U.clone()\n", + "V_norm = V.clone()\n", + "\n", "# iterate over faces and offset the corresponding vertex u and v values\n", "for i in range(len(faces)):\n", " face_vert_idxs = faces[i]\n", @@ -232,15 +241,15 @@ " # vertices are reused, but we don't want to offset multiple times\n", " if vert_idx.item() not in already_offset:\n", " # offset u value\n", - " U[vert_idx] = U[vert_idx] / cols + offset_u\n", + " U_norm[vert_idx] = U[vert_idx] / cols + offset_u\n", " # offset v value\n", " # this also flips each part locally, as each part is upside down\n", - " V[vert_idx] = (1 - V[vert_idx]) / rows + offset_v\n", + " V_norm[vert_idx] = (1 - V[vert_idx]) / rows + offset_v\n", " # add vertex to our set tracking offsetted vertices\n", " already_offset.add(vert_idx.item())\n", "\n", "# invert V values\n", - "U_norm, V_norm = U, 1 - V" + "V_norm = 1 - V_norm" ] }, { @@ -257,10 +266,7 @@ "# Therefore when initializing the Meshes class,\n", "# we need to map each of the vertices referenced by the DensePose faces (in verts, which is the \"All_vertices\" field)\n", "# to the correct xyz coordinate in the SMPL template mesh.\n", - "v_template_extended = torch.stack(list(map(lambda vert: v_template[vert-1], verts))).unsqueeze(0).to(device) # (1, 7829, 3)\n", - "\n", - "# add a batch dimension to faces\n", - "faces = faces.unsqueeze(0)" + "v_template_extended = v_template[verts-1][None] # (1, 7829, 3)" ] }, { @@ -271,7 +277,7 @@ "\n", "**Meshes** is a unique datastructure provided in PyTorch3D for working with batches of meshes of different sizes.\n", "\n", - "**TexturesUV** is an auxillary datastructure for storing vertex uv and texture maps for meshes." + "**TexturesUV** is an auxiliary datastructure for storing vertex uv and texture maps for meshes." ] }, { @@ -280,8 +286,8 @@ "metadata": {}, "outputs": [], "source": [ - "texture = TexturesUV(maps=tex, faces_uvs=faces, verts_uvs=verts_uv)\n", - "mesh = Meshes(v_template_extended, faces, texture)" + "texture = TexturesUV(maps=tex, faces_uvs=faces[None], verts_uvs=verts_uv)\n", + "mesh = Meshes(v_template_extended, faces[None], texture)" ] }, { @@ -314,7 +320,7 @@ "# Place a point light in front of the person. \n", "lights = PointLights(device=device, location=[[0.0, 0.0, 2.0]])\n", "\n", - "# Create a phong renderer by composing a rasterizer and a shader. The textured phong shader will \n", + "# Create a Phong renderer by composing a rasterizer and a shader. The textured Phong shader will \n", "# interpolate the texture uv coordinates for each vertex, sample from a texture image and \n", "# apply the Phong lighting model\n", "renderer = MeshRenderer(\n", @@ -346,7 +352,6 @@ "images = renderer(mesh)\n", "plt.figure(figsize=(10, 10))\n", "plt.imshow(images[0, ..., :3].cpu().numpy())\n", - "plt.grid(\"off\");\n", "plt.axis(\"off\");" ] }, @@ -387,7 +392,6 @@ "source": [ "plt.figure(figsize=(10, 10))\n", "plt.imshow(images[0, ..., :3].cpu().numpy())\n", - "plt.grid(\"off\");\n", "plt.axis(\"off\");" ] }, diff --git a/files/render_densepose.py b/files/render_densepose.py index 2a78ed5c..544c95d5 100644 --- a/files/render_densepose.py +++ b/files/render_densepose.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python + # coding: utf-8 # In[ ]: @@ -20,24 +20,30 @@ # ## Import modules -# If torch, torchvision and PyTorch3D are not installed, run the following cell: +# Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell: # In[ ]: -get_ipython().system('pip install torch torchvision') import os import sys import torch -if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'): - get_ipython().system('pip install pytorch3d') -else: - need_pytorch3d=False - try: - import pytorch3d - except ModuleNotFoundError: - need_pytorch3d=True - if need_pytorch3d: +need_pytorch3d=False +try: + import pytorch3d +except ModuleNotFoundError: + need_pytorch3d=True +if need_pytorch3d: + if torch.__version__.startswith("1.9") and sys.platform.startswith("linux"): + # We try to install PyTorch3D via a released wheel. + version_str="".join([ + f"py3{sys.version_info.minor}_cu", + torch.version.cuda.replace(".",""), + f"_pyt{torch.__version__[0:5:2]}" + ]) + get_ipython().system('pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html') + else: + # We try to install PyTorch3D from source. get_ipython().system('curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz') get_ipython().system('tar xzf 1.10.0.tar.gz') os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0" @@ -57,12 +63,11 @@ get_ipython().system('pip install chumpy') import os import torch import matplotlib.pyplot as plt -from skimage.io import imread import numpy as np # libraries for reading data from files from scipy.io import loadmat -from pytorch3d.io.utils import _read_image +from PIL import Image import pickle # Data structures and functions for rendering @@ -133,13 +138,15 @@ with open(verts_filename, 'rb') as f: data = pickle.load(f, encoding='latin1') v_template = torch.Tensor(data['v_template']).to(device) # (6890, 3) ALP_UV = loadmat(data_filename) -tex = torch.from_numpy(_read_image(file_name=tex_filename, format='RGB') / 255. ).unsqueeze(0).to(device) +with Image.open(tex_filename) as image: + np_image = np.asarray(image.convert("RGB")).astype(np.float32) +tex = torch.from_numpy(np_image / 255.)[None].to(device) -verts = torch.from_numpy((ALP_UV["All_vertices"]).astype(int)).squeeze().to(device) # (7829, 1) +verts = torch.from_numpy((ALP_UV["All_vertices"]).astype(int)).squeeze().to(device) # (7829,) U = torch.Tensor(ALP_UV['All_U_norm']).to(device) # (7829, 1) V = torch.Tensor(ALP_UV['All_V_norm']).to(device) # (7829, 1) faces = torch.from_numpy((ALP_UV['All_Faces'] - 1).astype(int)).to(device) # (13774, 3) -face_indices = torch.Tensor(ALP_UV['All_FaceIndices']).squeeze() +face_indices = torch.Tensor(ALP_UV['All_FaceIndices']).squeeze() # (13774,) # In[ ]: @@ -148,7 +155,6 @@ face_indices = torch.Tensor(ALP_UV['All_FaceIndices']).squeeze() # Display the texture image plt.figure(figsize=(10, 10)) plt.imshow(tex.squeeze(0).cpu()) -plt.grid("off"); plt.axis("off"); @@ -166,6 +172,9 @@ for i, u in enumerate(np.linspace(0, 1, cols, endpoint=False)): part = rows * i + j + 1 # parts are 1-indexed in face_indices offset_per_part[part] = (u, v) +U_norm = U.clone() +V_norm = V.clone() + # iterate over faces and offset the corresponding vertex u and v values for i in range(len(faces)): face_vert_idxs = faces[i] @@ -176,15 +185,15 @@ for i in range(len(faces)): # vertices are reused, but we don't want to offset multiple times if vert_idx.item() not in already_offset: # offset u value - U[vert_idx] = U[vert_idx] / cols + offset_u + U_norm[vert_idx] = U[vert_idx] / cols + offset_u # offset v value # this also flips each part locally, as each part is upside down - V[vert_idx] = (1 - V[vert_idx]) / rows + offset_v + V_norm[vert_idx] = (1 - V[vert_idx]) / rows + offset_v # add vertex to our set tracking offsetted vertices already_offset.add(vert_idx.item()) # invert V values -U_norm, V_norm = U, 1 - V +V_norm = 1 - V_norm # In[ ]: @@ -198,23 +207,20 @@ verts_uv = torch.cat([U_norm[None],V_norm[None]], dim=2) # (1, 7829, 2) # Therefore when initializing the Meshes class, # we need to map each of the vertices referenced by the DensePose faces (in verts, which is the "All_vertices" field) # to the correct xyz coordinate in the SMPL template mesh. -v_template_extended = torch.stack(list(map(lambda vert: v_template[vert-1], verts))).unsqueeze(0).to(device) # (1, 7829, 3) - -# add a batch dimension to faces -faces = faces.unsqueeze(0) +v_template_extended = v_template[verts-1][None] # (1, 7829, 3) # ### Create our textured mesh # # **Meshes** is a unique datastructure provided in PyTorch3D for working with batches of meshes of different sizes. # -# **TexturesUV** is an auxillary datastructure for storing vertex uv and texture maps for meshes. +# **TexturesUV** is an auxiliary datastructure for storing vertex uv and texture maps for meshes. # In[ ]: -texture = TexturesUV(maps=tex, faces_uvs=faces, verts_uvs=verts_uv) -mesh = Meshes(v_template_extended, faces, texture) +texture = TexturesUV(maps=tex, faces_uvs=faces[None], verts_uvs=verts_uv) +mesh = Meshes(v_template_extended, faces[None], texture) # ## Create a renderer @@ -239,7 +245,7 @@ raster_settings = RasterizationSettings( # Place a point light in front of the person. lights = PointLights(device=device, location=[[0.0, 0.0, 2.0]]) -# Create a phong renderer by composing a rasterizer and a shader. The textured phong shader will +# Create a Phong renderer by composing a rasterizer and a shader. The textured Phong shader will # interpolate the texture uv coordinates for each vertex, sample from a texture image and # apply the Phong lighting model renderer = MeshRenderer( @@ -263,7 +269,6 @@ renderer = MeshRenderer( images = renderer(mesh) plt.figure(figsize=(10, 10)) plt.imshow(images[0, ..., :3].cpu().numpy()) -plt.grid("off"); plt.axis("off"); @@ -293,7 +298,6 @@ images = renderer(mesh, lights=lights, cameras=cameras) plt.figure(figsize=(10, 10)) plt.imshow(images[0, ..., :3].cpu().numpy()) -plt.grid("off"); plt.axis("off"); diff --git a/files/render_textured_meshes.ipynb b/files/render_textured_meshes.ipynb index c28acbf0..2f916b26 100644 --- a/files/render_textured_meshes.ipynb +++ b/files/render_textured_meshes.ipynb @@ -47,7 +47,7 @@ "id": "okLalbR_g7NS" }, "source": [ - "If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell:" + "Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell:" ] }, { @@ -64,19 +64,25 @@ }, "outputs": [], "source": [ - "!pip install torch torchvision\n", "import os\n", "import sys\n", "import torch\n", - "if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):\n", - " !pip install pytorch3d\n", - "else:\n", - " need_pytorch3d=False\n", - " try:\n", - " import pytorch3d\n", - " except ModuleNotFoundError:\n", - " need_pytorch3d=True\n", - " if need_pytorch3d:\n", + "need_pytorch3d=False\n", + "try:\n", + " import pytorch3d\n", + "except ModuleNotFoundError:\n", + " need_pytorch3d=True\n", + "if need_pytorch3d:\n", + " if torch.__version__.startswith(\"1.9\") and sys.platform.startswith(\"linux\"):\n", + " # We try to install PyTorch3D via a released wheel.\n", + " version_str=\"\".join([\n", + " f\"py3{sys.version_info.minor}_cu\",\n", + " torch.version.cuda.replace(\".\",\"\"),\n", + " f\"_pyt{torch.__version__[0:5:2]}\"\n", + " ])\n", + " !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n", + " else:\n", + " # We try to install PyTorch3D from source.\n", " !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n", " !tar xzf 1.10.0.tar.gz\n", " os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n", @@ -96,7 +102,6 @@ "import os\n", "import torch\n", "import matplotlib.pyplot as plt\n", - "from skimage.io import imread\n", "\n", "# Util function for loading meshes\n", "from pytorch3d.io import load_objs_as_meshes, load_obj\n", @@ -185,11 +190,11 @@ "source": [ "### 1. Load a mesh and texture file\n", "\n", - "Load an `.obj` file and it's associated `.mtl` file and create a **Textures** and **Meshes** object. \n", + "Load an `.obj` file and its associated `.mtl` file and create a **Textures** and **Meshes** object. \n", "\n", "**Meshes** is a unique datastructure provided in PyTorch3D for working with batches of meshes of different sizes. \n", "\n", - "**TexturesUV** is an auxillary datastructure for storing vertex uv and texture maps for meshes. \n", + "**TexturesUV** is an auxiliary datastructure for storing vertex uv and texture maps for meshes. \n", "\n", "**Meshes** has several class methods which are used throughout the rendering pipeline." ] @@ -277,7 +282,6 @@ "plt.figure(figsize=(7,7))\n", "texture_image=mesh.textures.maps_padded()\n", "plt.imshow(texture_image.squeeze().cpu().numpy())\n", - "plt.grid(\"off\");\n", "plt.axis(\"off\");" ] }, @@ -296,7 +300,6 @@ "source": [ "plt.figure(figsize=(7,7))\n", "texturesuv_image_matplotlib(mesh.textures, subsample=None)\n", - "plt.grid(\"off\");\n", "plt.axis(\"off\");" ] }, @@ -311,7 +314,7 @@ "\n", "A renderer in PyTorch3D is composed of a **rasterizer** and a **shader** which each have a number of subcomponents such as a **camera** (orthographic/perspective). Here we initialize some of these components and use default values for the rest.\n", "\n", - "In this example we will first create a **renderer** which uses a **perspective camera**, a **point light** and applies **phong shading**. Then we learn how to vary different components using the modular API. " + "In this example we will first create a **renderer** which uses a **perspective camera**, a **point light** and applies **Phong shading**. Then we learn how to vary different components using the modular API. " ] }, { @@ -346,7 +349,7 @@ "# -z direction. \n", "lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])\n", "\n", - "# Create a phong renderer by composing a rasterizer and a shader. The textured phong shader will \n", + "# Create a Phong renderer by composing a rasterizer and a shader. The textured Phong shader will \n", "# interpolate the texture uv coordinates for each vertex, sample from a texture image and \n", "# apply the Phong lighting model\n", "renderer = MeshRenderer(\n", @@ -399,7 +402,6 @@ "images = renderer(mesh)\n", "plt.figure(figsize=(10, 10))\n", "plt.imshow(images[0, ..., :3].cpu().numpy())\n", - "plt.grid(\"off\");\n", "plt.axis(\"off\");" ] }, @@ -412,7 +414,7 @@ "source": [ "## 4. Move the light behind the object and re-render\n", "\n", - "We can pass arbirary keyword arguments to the `rasterizer`/`shader` via the call to the `renderer` so the renderer does not need to be reinitialized if any of the settings change/\n", + "We can pass arbitrary keyword arguments to the `rasterizer`/`shader` via the call to the `renderer` so the renderer does not need to be reinitialized if any of the settings change/\n", "\n", "In this case, we can simply update the location of the lights and pass them into the call to the renderer. \n", "\n", @@ -450,7 +452,6 @@ "source": [ "plt.figure(figsize=(10, 10))\n", "plt.imshow(images[0, ..., :3].cpu().numpy())\n", - "plt.grid(\"off\");\n", "plt.axis(\"off\");" ] }, @@ -514,7 +515,6 @@ "source": [ "plt.figure(figsize=(10, 10))\n", "plt.imshow(images[0, ..., :3].cpu().numpy())\n", - "plt.grid(\"off\");\n", "plt.axis(\"off\");" ] }, @@ -573,7 +573,7 @@ }, "outputs": [], "source": [ - "# We can pass arbirary keyword arguments to the rasterizer/shader via the renderer\n", + "# We can pass arbitrary keyword arguments to the rasterizer/shader via the renderer\n", "# so the renderer does not need to be reinitialized if any of the settings change.\n", "images = renderer(meshes, cameras=cameras, lights=lights)" ] diff --git a/files/render_textured_meshes.py b/files/render_textured_meshes.py index 4df1dc25..93b9b2f4 100644 --- a/files/render_textured_meshes.py +++ b/files/render_textured_meshes.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python + # coding: utf-8 # In[ ]: @@ -18,24 +18,30 @@ # ## 0. Install and Import modules -# If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell: +# Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell: # In[ ]: -get_ipython().system('pip install torch torchvision') import os import sys import torch -if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'): - get_ipython().system('pip install pytorch3d') -else: - need_pytorch3d=False - try: - import pytorch3d - except ModuleNotFoundError: - need_pytorch3d=True - if need_pytorch3d: +need_pytorch3d=False +try: + import pytorch3d +except ModuleNotFoundError: + need_pytorch3d=True +if need_pytorch3d: + if torch.__version__.startswith("1.9") and sys.platform.startswith("linux"): + # We try to install PyTorch3D via a released wheel. + version_str="".join([ + f"py3{sys.version_info.minor}_cu", + torch.version.cuda.replace(".",""), + f"_pyt{torch.__version__[0:5:2]}" + ]) + get_ipython().system('pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html') + else: + # We try to install PyTorch3D from source. get_ipython().system('curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz') get_ipython().system('tar xzf 1.10.0.tar.gz') os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0" @@ -48,7 +54,6 @@ else: import os import torch import matplotlib.pyplot as plt -from skimage.io import imread # Util function for loading meshes from pytorch3d.io import load_objs_as_meshes, load_obj @@ -96,11 +101,11 @@ from plot_image_grid import image_grid # ### 1. Load a mesh and texture file # -# Load an `.obj` file and it's associated `.mtl` file and create a **Textures** and **Meshes** object. +# Load an `.obj` file and its associated `.mtl` file and create a **Textures** and **Meshes** object. # # **Meshes** is a unique datastructure provided in PyTorch3D for working with batches of meshes of different sizes. # -# **TexturesUV** is an auxillary datastructure for storing vertex uv and texture maps for meshes. +# **TexturesUV** is an auxiliary datastructure for storing vertex uv and texture maps for meshes. # # **Meshes** has several class methods which are used throughout the rendering pipeline. @@ -142,7 +147,6 @@ mesh = load_objs_as_meshes([obj_filename], device=device) plt.figure(figsize=(7,7)) texture_image=mesh.textures.maps_padded() plt.imshow(texture_image.squeeze().cpu().numpy()) -plt.grid("off"); plt.axis("off"); @@ -153,7 +157,6 @@ plt.axis("off"); plt.figure(figsize=(7,7)) texturesuv_image_matplotlib(mesh.textures, subsample=None) -plt.grid("off"); plt.axis("off"); @@ -161,7 +164,7 @@ plt.axis("off"); # # A renderer in PyTorch3D is composed of a **rasterizer** and a **shader** which each have a number of subcomponents such as a **camera** (orthographic/perspective). Here we initialize some of these components and use default values for the rest. # -# In this example we will first create a **renderer** which uses a **perspective camera**, a **point light** and applies **phong shading**. Then we learn how to vary different components using the modular API. +# In this example we will first create a **renderer** which uses a **perspective camera**, a **point light** and applies **Phong shading**. Then we learn how to vary different components using the modular API. # In[ ]: @@ -188,7 +191,7 @@ raster_settings = RasterizationSettings( # -z direction. lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]]) -# Create a phong renderer by composing a rasterizer and a shader. The textured phong shader will +# Create a Phong renderer by composing a rasterizer and a shader. The textured Phong shader will # interpolate the texture uv coordinates for each vertex, sample from a texture image and # apply the Phong lighting model renderer = MeshRenderer( @@ -214,13 +217,12 @@ renderer = MeshRenderer( images = renderer(mesh) plt.figure(figsize=(10, 10)) plt.imshow(images[0, ..., :3].cpu().numpy()) -plt.grid("off"); plt.axis("off"); # ## 4. Move the light behind the object and re-render # -# We can pass arbirary keyword arguments to the `rasterizer`/`shader` via the call to the `renderer` so the renderer does not need to be reinitialized if any of the settings change/ +# We can pass arbitrary keyword arguments to the `rasterizer`/`shader` via the call to the `renderer` so the renderer does not need to be reinitialized if any of the settings change/ # # In this case, we can simply update the location of the lights and pass them into the call to the renderer. # @@ -239,7 +241,6 @@ images = renderer(mesh, lights=lights) plt.figure(figsize=(10, 10)) plt.imshow(images[0, ..., :3].cpu().numpy()) -plt.grid("off"); plt.axis("off"); @@ -277,7 +278,6 @@ images = renderer(mesh, lights=lights, materials=materials, cameras=cameras) plt.figure(figsize=(10, 10)) plt.imshow(images[0, ..., :3].cpu().numpy()) -plt.grid("off"); plt.axis("off"); @@ -315,7 +315,7 @@ lights.location = torch.tensor([[0.0, 0.0, -3.0]], device=device) # In[ ]: -# We can pass arbirary keyword arguments to the rasterizer/shader via the renderer +# We can pass arbitrary keyword arguments to the rasterizer/shader via the renderer # so the renderer does not need to be reinitialized if any of the settings change. images = renderer(meshes, cameras=cameras, lights=lights) diff --git a/help.html b/help.html index e293fce6..44455d6b 100644 --- a/help.html +++ b/help.html @@ -1,4 +1,4 @@ -PyTorch3D · A library for deep learning with 3D data

Welcome to the PyTorch3D Tutorials

Here you can learn about the structure and applications of Pytorch3D from examples which are in the form of ipython notebooks.

Run interactively

At the top of each example you can find a button named "Run in Google Colab" which will open the notebook in Google Colaboratory where you can run the code directly in the browser with access to GPU support - it looks like this:

You can modify the code and experiment with varying different settings. Remember to install the latest stable version of PyTorch3D and its dependencies. Code to do this with pip is provided in each notebook.

Run locally

There is also a button to download the notebook and source code to run it locally.

\ No newline at end of file +

Welcome to the PyTorch3D Tutorials

Here you can learn about the structure and applications of PyTorch3D from examples which are in the form of ipython notebooks.

Run interactively

At the top of each example you can find a button named "Run in Google Colab" which will open the notebook in Google Colaboratory where you can run the code directly in the browser with access to GPU support - it looks like this:

You can modify the code and experiment with varying different settings. Remember to install the latest stable version of PyTorch3D and its dependencies. Code to do this with pip is provided in each notebook.

Run locally

There is also a button to download the notebook and source code to run it locally.

\ No newline at end of file diff --git a/tutorials/render_colored_points.html b/tutorials/render_colored_points.html index 62fdd918..d16cbc7d 100644 --- a/tutorials/render_colored_points.html +++ b/tutorials/render_colored_points.html @@ -1,4 +1,4 @@ -PyTorch3D · A library for deep learning with 3D data
\ No newline at end of file +
\ No newline at end of file diff --git a/users/index.html b/users/index.html index 51da0487..3cebe847 100644 --- a/users/index.html +++ b/users/index.html @@ -1,4 +1,4 @@ -PyTorch3D · A library for deep learning with 3D data
\ No newline at end of file +
\ No newline at end of file