mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2025-07-30 18:32:49 +08:00
test fixes and lints
Summary: - followup recent pyre change D63415925 - make tests remove temporary files - weights_only=True in torch.load - lint fixes 3 test fixes from VRehnberg in https://github.com/facebookresearch/pytorch3d/issues/1914 - imageio channels fix - frozen decorator in test_config - load_blobs positional Reviewed By: MichaelRamamonjisoa Differential Revision: D66162167 fbshipit-source-id: 7737e174691b62f1708443a4fae07343cec5bfeb
This commit is contained in:
parent
c17e6f947a
commit
e20cbe9b0e
@ -36,5 +36,5 @@ then
|
||||
|
||||
echo "Running pyre..."
|
||||
echo "To restart/kill pyre server, run 'pyre restart' or 'pyre kill' in fbcode/"
|
||||
( cd ~/fbsource/fbcode; pyre -l vision/fair/pytorch3d/ )
|
||||
( cd ~/fbsource/fbcode; arc pyre check //vision/fair/pytorch3d/... )
|
||||
fi
|
||||
|
@ -32,7 +32,6 @@ requirements:
|
||||
|
||||
build:
|
||||
string: py{{py}}_{{ environ['CU_VERSION'] }}_pyt{{ environ['PYTORCH_VERSION_NODOT']}}
|
||||
# script: LD_LIBRARY_PATH=$PREFIX/lib:$BUILD_PREFIX/lib:$LD_LIBRARY_PATH python setup.py install --single-version-externally-managed --record=record.txt # [not win]
|
||||
script: python setup.py install --single-version-externally-managed --record=record.txt # [not win]
|
||||
script_env:
|
||||
- CUDA_HOME
|
||||
@ -57,7 +56,6 @@ test:
|
||||
- pandas
|
||||
- sqlalchemy
|
||||
commands:
|
||||
#pytest .
|
||||
python -m unittest discover -v -s tests -t .
|
||||
|
||||
|
||||
|
@ -116,7 +116,9 @@ class ImplicitronModelFactory(ModelFactoryBase):
|
||||
"cuda:%d" % 0: "cuda:%d" % accelerator.local_process_index
|
||||
}
|
||||
model_state_dict = torch.load(
|
||||
model_io.get_model_path(model_path), map_location=map_location
|
||||
model_io.get_model_path(model_path),
|
||||
map_location=map_location,
|
||||
weights_only=True,
|
||||
)
|
||||
|
||||
try:
|
||||
|
@ -241,7 +241,7 @@ class ImplicitronOptimizerFactory(OptimizerFactoryBase):
|
||||
map_location = {
|
||||
"cuda:%d" % 0: "cuda:%d" % accelerator.local_process_index
|
||||
}
|
||||
optimizer_state = torch.load(opt_path, map_location)
|
||||
optimizer_state = torch.load(opt_path, map_location, weights_only=True)
|
||||
else:
|
||||
raise FileNotFoundError(f"Optimizer state {opt_path} does not exist.")
|
||||
return optimizer_state
|
||||
|
@ -84,9 +84,9 @@ def get_nerf_datasets(
|
||||
|
||||
if autodownload and any(not os.path.isfile(p) for p in (cameras_path, image_path)):
|
||||
# Automatically download the data files if missing.
|
||||
download_data((dataset_name,), data_root=data_root)
|
||||
download_data([dataset_name], data_root=data_root)
|
||||
|
||||
train_data = torch.load(cameras_path)
|
||||
train_data = torch.load(cameras_path, weights_only=True)
|
||||
n_cameras = train_data["cameras"]["R"].shape[0]
|
||||
|
||||
_image_max_image_pixels = Image.MAX_IMAGE_PIXELS
|
||||
|
@ -63,7 +63,7 @@ def main(cfg: DictConfig):
|
||||
raise ValueError(f"Model checkpoint {checkpoint_path} does not exist!")
|
||||
|
||||
print(f"Loading checkpoint {checkpoint_path}.")
|
||||
loaded_data = torch.load(checkpoint_path)
|
||||
loaded_data = torch.load(checkpoint_path, weights_only=True)
|
||||
# Do not load the cached xy grid.
|
||||
# - this allows setting an arbitrary evaluation image size.
|
||||
state_dict = {
|
||||
|
@ -77,7 +77,7 @@ def main(cfg: DictConfig):
|
||||
# Resume training if requested.
|
||||
if cfg.resume and os.path.isfile(checkpoint_path):
|
||||
print(f"Resuming from checkpoint {checkpoint_path}.")
|
||||
loaded_data = torch.load(checkpoint_path)
|
||||
loaded_data = torch.load(checkpoint_path, weights_only=True)
|
||||
model.load_state_dict(loaded_data["model"])
|
||||
stats = pickle.loads(loaded_data["stats"])
|
||||
print(f" => resuming from epoch {stats.epoch}.")
|
||||
|
@ -106,7 +106,7 @@ class ResNetFeatureExtractor(FeatureExtractorBase):
|
||||
self.layers = torch.nn.ModuleList()
|
||||
self.proj_layers = torch.nn.ModuleList()
|
||||
for stage in range(self.max_stage):
|
||||
stage_name = f"layer{stage+1}"
|
||||
stage_name = f"layer{stage + 1}"
|
||||
feature_name = self._get_resnet_stage_feature_name(stage)
|
||||
if (stage + 1) in self.stages:
|
||||
if (
|
||||
@ -139,7 +139,7 @@ class ResNetFeatureExtractor(FeatureExtractorBase):
|
||||
self.stages = set(self.stages) # convert to set for faster "in"
|
||||
|
||||
def _get_resnet_stage_feature_name(self, stage) -> str:
|
||||
return f"res_layer_{stage+1}"
|
||||
return f"res_layer_{stage + 1}"
|
||||
|
||||
def _resnet_normalize_image(self, img: torch.Tensor) -> torch.Tensor:
|
||||
return (img - self._resnet_mean) / self._resnet_std
|
||||
|
@ -111,10 +111,10 @@ def load_model(fl, map_location: Optional[dict]):
|
||||
flstats = get_stats_path(fl)
|
||||
flmodel = get_model_path(fl)
|
||||
flopt = get_optimizer_path(fl)
|
||||
model_state_dict = torch.load(flmodel, map_location=map_location)
|
||||
model_state_dict = torch.load(flmodel, map_location=map_location, weights_only=True)
|
||||
stats = load_stats(flstats)
|
||||
if os.path.isfile(flopt):
|
||||
optimizer = torch.load(flopt, map_location=map_location)
|
||||
optimizer = torch.load(flopt, map_location=map_location, weights_only=True)
|
||||
else:
|
||||
optimizer = None
|
||||
|
||||
|
@ -163,9 +163,8 @@ def _read_chunks(
|
||||
if binary_data is not None:
|
||||
binary_data = np.frombuffer(binary_data, dtype=np.uint8)
|
||||
|
||||
# pyre-fixme[7]: Expected `Optional[Tuple[Dict[str, typing.Any],
|
||||
# ndarray[typing.Any, typing.Any]]]` but got `Tuple[typing.Any,
|
||||
# Optional[ndarray[typing.Any, dtype[typing.Any]]]]`.
|
||||
assert binary_data is not None
|
||||
|
||||
return json_data, binary_data
|
||||
|
||||
|
||||
|
@ -1246,13 +1246,10 @@ def _save_ply(
|
||||
return
|
||||
|
||||
color_np_type = np.ubyte if colors_as_uint8 else np.float32
|
||||
verts_dtype = [("verts", np.float32, 3)]
|
||||
verts_dtype: list = [("verts", np.float32, 3)]
|
||||
if verts_normals is not None:
|
||||
verts_dtype.append(("normals", np.float32, 3))
|
||||
if verts_colors is not None:
|
||||
# pyre-fixme[6]: For 1st argument expected `Tuple[str,
|
||||
# Type[floating[_32Bit]], int]` but got `Tuple[str,
|
||||
# Type[Union[floating[_32Bit], unsignedinteger[typing.Any]]], int]`.
|
||||
verts_dtype.append(("colors", color_np_type, 3))
|
||||
|
||||
vert_data = np.zeros(verts.shape[0], dtype=verts_dtype)
|
||||
|
@ -168,7 +168,7 @@ def _get_culled_faces(face_verts: torch.Tensor, frustum: ClipFrustum) -> torch.T
|
||||
position of the clipping planes.
|
||||
|
||||
Returns:
|
||||
faces_culled: An boolean tensor of size F specifying whether or not each face should be
|
||||
faces_culled: boolean tensor of size F specifying whether or not each face should be
|
||||
culled.
|
||||
"""
|
||||
clipping_planes = (
|
||||
|
@ -726,15 +726,17 @@ class TexturesUV(TexturesBase):
|
||||
for each face
|
||||
verts_uvs: (N, V, 2) tensor giving the uv coordinates per vertex
|
||||
(a FloatTensor with values between 0 and 1).
|
||||
maps_ids: Used if there are to be multiple maps per face. This can be either a list of map_ids [(F,)]
|
||||
maps_ids: Used if there are to be multiple maps per face.
|
||||
This can be either a list of map_ids [(F,)]
|
||||
or a long tensor of shape (N, F) giving the id of the texture map
|
||||
for each face. If maps_ids is present, the maps has an extra dimension M
|
||||
(so maps_padded is (N, M, H, W, C) and maps_list has elements of
|
||||
shape (M, H, W, C)).
|
||||
Specifically, the color
|
||||
of a vertex V is given by an average of maps_padded[i, maps_ids[i, f], u, v, :]
|
||||
of a vertex V is given by an average of
|
||||
maps_padded[i, maps_ids[i, f], u, v, :]
|
||||
over u and v integers adjacent to
|
||||
_verts_uvs_padded[i, _faces_uvs_padded[i, f, 0], :] .
|
||||
_verts_uvs_padded[i, _faces_uvs_padded[i, f, 0], :] .
|
||||
align_corners: If true, the extreme values 0 and 1 for verts_uvs
|
||||
indicate the centers of the edge pixels in the maps.
|
||||
padding_mode: padding mode for outside grid values
|
||||
@ -1237,7 +1239,8 @@ class TexturesUV(TexturesBase):
|
||||
texels = texels.reshape(N, K, C, H_out, W_out).permute(0, 3, 4, 1, 2)
|
||||
return texels
|
||||
else:
|
||||
# We have maps_ids_padded: (N, F), textures_map: (N, M, Hi, Wi, C),fragmenmts.pix_to_face: (N, Ho, Wo, K)
|
||||
# We have maps_ids_padded: (N, F), textures_map: (N, M, Hi, Wi, C),
|
||||
# fragments.pix_to_face: (N, Ho, Wo, K)
|
||||
# Get pixel_to_map_ids: (N, K, Ho, Wo) by indexing pix_to_face into maps_ids
|
||||
N, M, H_in, W_in, C = texture_maps.shape # 3 for RGB
|
||||
|
||||
@ -1827,7 +1830,7 @@ class TexturesVertex(TexturesBase):
|
||||
representation) which overlap the pixel.
|
||||
|
||||
Returns:
|
||||
texels: An texture per pixel of shape (N, H, W, K, C).
|
||||
texels: A texture per pixel of shape (N, H, W, K, C).
|
||||
There will be one C dimensional value for each element in
|
||||
fragments.pix_to_face.
|
||||
"""
|
||||
|
@ -246,7 +246,7 @@ class TestConfig(unittest.TestCase):
|
||||
|
||||
enable_get_default_args(Foo)
|
||||
|
||||
@dataclass()
|
||||
@dataclass(frozen=True)
|
||||
class Bar:
|
||||
aa: int = 9
|
||||
bb: int = 9
|
||||
|
@ -87,7 +87,9 @@ class CanineFrameDataBuilder(
|
||||
sequence_annotation: types.SequenceAnnotation,
|
||||
load_blobs: bool = True,
|
||||
) -> CanineFrameData:
|
||||
frame_data = super().build(frame_annotation, sequence_annotation, load_blobs)
|
||||
frame_data = super().build(
|
||||
frame_annotation, sequence_annotation, load_blobs=load_blobs
|
||||
)
|
||||
frame_data.num_dogs = frame_annotation.num_dogs or 101
|
||||
frame_data.magnetic_field_average_flux_density = (
|
||||
frame_annotation.magnetic_field.average_flux_density
|
||||
|
@ -76,7 +76,7 @@ class TestForward(unittest.TestCase):
|
||||
"test_out",
|
||||
"test_forward_TestForward_test_bg_weight_hits.png",
|
||||
),
|
||||
(hits * 255.0).cpu().to(torch.uint8).numpy(),
|
||||
(hits * 255.0).cpu().to(torch.uint8).squeeze(2).numpy(),
|
||||
)
|
||||
self.assertEqual(hits[500, 500, 0].item(), 1.0)
|
||||
self.assertTrue(
|
||||
@ -139,7 +139,7 @@ class TestForward(unittest.TestCase):
|
||||
"test_out",
|
||||
"test_forward_TestForward_test_basic_3chan_hits.png",
|
||||
),
|
||||
(hits * 255.0).cpu().to(torch.uint8).numpy(),
|
||||
(hits * 255.0).cpu().to(torch.uint8).squeeze(2).numpy(),
|
||||
)
|
||||
self.assertEqual(hits[500, 500, 0].item(), 1.0)
|
||||
self.assertTrue(
|
||||
@ -194,7 +194,7 @@ class TestForward(unittest.TestCase):
|
||||
"test_out",
|
||||
"test_forward_TestForward_test_basic_1chan.png",
|
||||
),
|
||||
(result * 255.0).cpu().to(torch.uint8).numpy(),
|
||||
(result * 255.0).cpu().to(torch.uint8).squeeze(2).numpy(),
|
||||
)
|
||||
imageio.imsave(
|
||||
path.join(
|
||||
@ -202,7 +202,7 @@ class TestForward(unittest.TestCase):
|
||||
"test_out",
|
||||
"test_forward_TestForward_test_basic_1chan_hits.png",
|
||||
),
|
||||
(hits * 255.0).cpu().to(torch.uint8).numpy(),
|
||||
(hits * 255.0).cpu().to(torch.uint8).squeeze(2).numpy(),
|
||||
)
|
||||
self.assertEqual(hits[500, 500, 0].item(), 1.0)
|
||||
self.assertTrue(
|
||||
@ -264,7 +264,7 @@ class TestForward(unittest.TestCase):
|
||||
"test_out",
|
||||
"test_forward_TestForward_test_basic_8chan_hits.png",
|
||||
),
|
||||
(hits * 255.0).cpu().to(torch.uint8).numpy(),
|
||||
(hits * 255.0).cpu().to(torch.uint8).squeeze(2).numpy(),
|
||||
)
|
||||
self.assertEqual(hits[500, 500, 0].item(), 1.0)
|
||||
self.assertTrue(
|
||||
|
@ -43,7 +43,7 @@ class TestBuild(unittest.TestCase):
|
||||
tutorials = sorted(tutorials_dir.glob("*.ipynb"))
|
||||
|
||||
for tutorial in tutorials:
|
||||
with open(tutorial) as f:
|
||||
with open(tutorial, encoding="utf8") as f:
|
||||
json.load(f)
|
||||
|
||||
@unittest.skipIf(in_conda_build or in_re_worker, "In conda build, or RE worker")
|
||||
|
@ -53,7 +53,7 @@ def _write(mesh, path, **kwargs) -> None:
|
||||
io.save_mesh(mesh, path, **kwargs)
|
||||
|
||||
with open(path, "rb") as f:
|
||||
_, stored_length = _read_header(f)
|
||||
_, stored_length = _read_header(f) # pyre-ignore
|
||||
assert stored_length == os.path.getsize(path)
|
||||
|
||||
|
||||
@ -191,14 +191,14 @@ class TestMeshGltfIO(TestCaseMixin, unittest.TestCase):
|
||||
mesh = _load(glb, device=device)
|
||||
|
||||
# save the mesh to a glb file
|
||||
glb = DATA_DIR / "cow_write.glb"
|
||||
_write(mesh, glb)
|
||||
glb_reload = DATA_DIR / "cow_write.glb"
|
||||
_write(mesh, glb_reload)
|
||||
|
||||
# load again
|
||||
glb_reload = DATA_DIR / "cow_write.glb"
|
||||
self.assertTrue(glb_reload.is_file())
|
||||
device = torch.device("cuda:0")
|
||||
mesh_reload = _load(glb_reload, device=device)
|
||||
glb_reload.unlink()
|
||||
|
||||
# assertions
|
||||
self.assertEqual(mesh_reload.faces_packed().shape, (5856, 3))
|
||||
@ -232,6 +232,7 @@ class TestMeshGltfIO(TestCaseMixin, unittest.TestCase):
|
||||
# reload the ico_sphere
|
||||
device = torch.device("cuda:0")
|
||||
mesh_reload = _load(glb, device=device, include_textures=False)
|
||||
glb.unlink()
|
||||
|
||||
self.assertClose(
|
||||
ico_sphere_mesh.verts_padded().cpu(),
|
||||
@ -299,9 +300,9 @@ class TestMeshGltfIO(TestCaseMixin, unittest.TestCase):
|
||||
_write(mesh, glb)
|
||||
|
||||
# reload the mesh glb file saved in TexturesVertex format
|
||||
glb = DATA_DIR / "cow_write_texturesvertex.glb"
|
||||
self.assertTrue(glb.is_file())
|
||||
mesh_dash = _load(glb, device=device)
|
||||
glb.unlink()
|
||||
self.assertEqual(len(mesh_dash), 1)
|
||||
|
||||
self.assertEqual(mesh_dash.faces_packed().shape, (5856, 3))
|
||||
@ -381,3 +382,4 @@ class TestMeshGltfIO(TestCaseMixin, unittest.TestCase):
|
||||
|
||||
glb = DATA_DIR / "example_write_texturesvertex.glb"
|
||||
_write(mesh, glb)
|
||||
glb.unlink()
|
||||
|
@ -196,7 +196,7 @@ class TestPointsToVolumes(TestCaseMixin, unittest.TestCase):
|
||||
Generate a batch of `batch_size` cube meshes.
|
||||
"""
|
||||
|
||||
device = torch.device(device)
|
||||
device_ = torch.device(device)
|
||||
|
||||
verts, faces = [], []
|
||||
|
||||
@ -213,7 +213,7 @@ class TestPointsToVolumes(TestCaseMixin, unittest.TestCase):
|
||||
[0.0, 0.0, 1.0],
|
||||
],
|
||||
dtype=torch.float32,
|
||||
device=device,
|
||||
device=device_,
|
||||
)
|
||||
verts.append(v)
|
||||
faces.append(
|
||||
@ -233,7 +233,7 @@ class TestPointsToVolumes(TestCaseMixin, unittest.TestCase):
|
||||
[0, 1, 6],
|
||||
],
|
||||
dtype=torch.int64,
|
||||
device=device,
|
||||
device=device_,
|
||||
)
|
||||
)
|
||||
|
||||
@ -316,7 +316,7 @@ class TestPointsToVolumes(TestCaseMixin, unittest.TestCase):
|
||||
outfile = (
|
||||
outdir
|
||||
+ f"/rgb_{interp_mode}"
|
||||
+ f"_{str(volume_size).replace(' ','')}"
|
||||
+ f"_{str(volume_size).replace(' ', '')}"
|
||||
+ f"_{vidx:003d}_sldim{slice_dim}.png"
|
||||
)
|
||||
im.save(outfile)
|
||||
|
@ -639,4 +639,4 @@ class TestRaysampling(TestCaseMixin, unittest.TestCase):
|
||||
origin1, origin2, rtol=1e-4, atol=1e-4
|
||||
) == (id1 == id2), (origin1, origin2, id1, id2)
|
||||
assert not torch.allclose(dir1, dir2), (dir1, dir2)
|
||||
self.assertClose(len1, len2), (len1, len2)
|
||||
self.assertClose(len1, len2)
|
||||
|
@ -689,7 +689,7 @@ class TestRenderVolumes(TestCaseMixin, unittest.TestCase):
|
||||
outfile = (
|
||||
outdir
|
||||
+ f"/rgb_{sample_mode}"
|
||||
+ f"_{str(volume_size).replace(' ','')}"
|
||||
+ f"_{str(volume_size).replace(' ', '')}"
|
||||
+ f"_{imidx:003d}"
|
||||
)
|
||||
if image_ is image:
|
||||
|
Loading…
x
Reference in New Issue
Block a user