mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2025-12-20 06:10:34 +08:00
move LinearWithRepeat to pytorch3d
Summary: Move this simple layer from the NeRF project into pytorch3d. Reviewed By: shapovalov Differential Revision: D34126972 fbshipit-source-id: a9c6d6c3c1b662c1b844ea5d1b982007d4df83e6
This commit is contained in:
committed by
Facebook GitHub Bot
parent
ef21a6f6aa
commit
2a1de3b610
93
pytorch3d/common/linear_with_repeat.py
Normal file
93
pytorch3d/common/linear_with_repeat.py
Normal file
@@ -0,0 +1,93 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import math
|
||||
from typing import Tuple
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from torch.nn import Parameter, init
|
||||
|
||||
|
||||
class LinearWithRepeat(torch.nn.Module):
|
||||
"""
|
||||
if x has shape (..., k, n1)
|
||||
and y has shape (..., n2)
|
||||
then
|
||||
LinearWithRepeat(n1 + n2, out_features).forward((x,y))
|
||||
is equivalent to
|
||||
Linear(n1 + n2, out_features).forward(
|
||||
torch.cat([x, y.unsqueeze(-2).expand(..., k, n2)], dim=-1)
|
||||
)
|
||||
|
||||
Or visually:
|
||||
Given the following, for each ray,
|
||||
|
||||
feature ->
|
||||
|
||||
ray xxxxxxxx
|
||||
position xxxxxxxx
|
||||
| xxxxxxxx
|
||||
v xxxxxxxx
|
||||
|
||||
|
||||
and
|
||||
yyyyyyyy
|
||||
|
||||
where the y's do not depend on the position
|
||||
but only on the ray,
|
||||
we want to evaluate a Linear layer on both
|
||||
types of data at every position.
|
||||
|
||||
It's as if we constructed
|
||||
|
||||
xxxxxxxxyyyyyyyy
|
||||
xxxxxxxxyyyyyyyy
|
||||
xxxxxxxxyyyyyyyy
|
||||
xxxxxxxxyyyyyyyy
|
||||
|
||||
and sent that through the Linear.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
in_features: int,
|
||||
out_features: int,
|
||||
bias: bool = True,
|
||||
device=None,
|
||||
dtype=None,
|
||||
) -> None:
|
||||
"""
|
||||
Copied from torch.nn.Linear.
|
||||
"""
|
||||
factory_kwargs = {"device": device, "dtype": dtype}
|
||||
super().__init__()
|
||||
self.in_features = in_features
|
||||
self.out_features = out_features
|
||||
self.weight = Parameter(
|
||||
torch.empty((out_features, in_features), **factory_kwargs)
|
||||
)
|
||||
if bias:
|
||||
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
|
||||
else:
|
||||
self.register_parameter("bias", None)
|
||||
self.reset_parameters()
|
||||
|
||||
def reset_parameters(self) -> None:
|
||||
"""
|
||||
Copied from torch.nn.Linear.
|
||||
"""
|
||||
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
|
||||
if self.bias is not None:
|
||||
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
|
||||
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
|
||||
init.uniform_(self.bias, -bound, bound)
|
||||
|
||||
def forward(self, input: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
|
||||
n1 = input[0].shape[-1]
|
||||
output1 = F.linear(input[0], self.weight[:, :n1], self.bias)
|
||||
output2 = F.linear(input[1], self.weight[:, n1:], None)
|
||||
return output1 + output2.unsqueeze(-2)
|
||||
@@ -73,8 +73,8 @@ from .points import (
|
||||
from .utils import (
|
||||
TensorProperties,
|
||||
convert_to_tensors_and_broadcast,
|
||||
ndc_to_grid_sample_coords,
|
||||
ndc_grid_sample,
|
||||
ndc_to_grid_sample_coords,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
import copy
|
||||
import inspect
|
||||
import warnings
|
||||
from typing import Any, Optional, Union, Tuple
|
||||
from typing import Any, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
Reference in New Issue
Block a user