mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2025-08-02 03:42:50 +08:00
Fix test evaluation for Blender data
Summary: Blender data doesn't have depths or crops. Reviewed By: shapovalov Differential Revision: D38345583 fbshipit-source-id: a19300daf666bbfd799d0038aeefa14641c559d7
This commit is contained in:
parent
3a063f5976
commit
760305e044
@ -8,9 +8,9 @@ data_source_ImplicitronDataSource_args:
|
||||
dataset_map_provider_class_type: BlenderDatasetMapProvider
|
||||
dataset_map_provider_BlenderDatasetMapProvider_args:
|
||||
base_dir: ${oc.env:BLENDER_DATASET_ROOT}
|
||||
n_known_frames_for_test: null
|
||||
object_name: ${oc.env:BLENDER_SINGLESEQ_CLASS}
|
||||
path_manager_factory_class_type: PathManagerFactory
|
||||
n_known_frames_for_test: null
|
||||
path_manager_factory_PathManagerFactory_args:
|
||||
silence_logs: true
|
||||
|
||||
@ -20,8 +20,13 @@ model_factory_ImplicitronModelFactory_args:
|
||||
n_rays_per_image_sampled_from_mask: 4096
|
||||
scene_extent: 2.0
|
||||
renderer_MultiPassEmissionAbsorptionRenderer_args:
|
||||
density_noise_std_train: 1.0
|
||||
n_pts_per_ray_fine_training: 128
|
||||
n_pts_per_ray_fine_evaluation: 128
|
||||
raymarcher_EmissionAbsorptionRaymarcher_args:
|
||||
blend_output: true
|
||||
bg_color:
|
||||
- 1.0
|
||||
loss_weights:
|
||||
loss_rgb_mse: 1.0
|
||||
loss_prev_stage_rgb_mse: 1.0
|
||||
@ -30,9 +35,12 @@ model_factory_ImplicitronModelFactory_args:
|
||||
loss_autodecoder_norm: 0.00
|
||||
|
||||
optimizer_factory_ImplicitronOptimizerFactory_args:
|
||||
exponential_lr_step_size: 2000
|
||||
exponential_lr_step_size: 3001
|
||||
lr_policy: Exponential
|
||||
|
||||
training_loop_ImplicitronTrainingLoop_args:
|
||||
max_epochs: 2000
|
||||
visualize_interval: 0
|
||||
validation_interval: 30
|
||||
max_epochs: 3001
|
||||
metric_print_interval: 100
|
||||
store_checkpoints_purge: 3
|
||||
test_when_finished: true
|
||||
validation_interval: 100
|
||||
|
@ -161,7 +161,7 @@ class TestExperiment(unittest.TestCase):
|
||||
|
||||
|
||||
class TestNerfRepro(unittest.TestCase):
|
||||
@unittest.skip("This test reproduces full NERF training.")
|
||||
@unittest.skip("This runs full NeRF training on Blender data.")
|
||||
def test_nerf_blender(self):
|
||||
# Train vanilla NERF.
|
||||
# Set env vars BLENDER_DATASET_ROOT and BLENDER_SINGLESEQ_CLASS first!
|
||||
|
@ -242,14 +242,24 @@ def eval_batch(
|
||||
if frame_data.depth_map is None or frame_data.depth_map.sum() <= 0:
|
||||
warnings.warn("Empty or missing depth map in evaluation!")
|
||||
|
||||
if frame_data.mask_crop is None:
|
||||
warnings.warn("mask_crop is None, assuming the whole image is valid.")
|
||||
|
||||
# threshold the masks to make ground truth binary masks
|
||||
mask_fg, mask_crop = [
|
||||
(getattr(frame_data, k) >= mask_thr) for k in ("fg_probability", "mask_crop")
|
||||
]
|
||||
# pyre-ignore [58]
|
||||
mask_fg = frame_data.fg_probability >= mask_thr
|
||||
mask_crop = (
|
||||
frame_data.mask_crop
|
||||
if frame_data.mask_crop is not None
|
||||
# pyre-ignore [6]
|
||||
else torch.ones_like(mask_fg)
|
||||
)
|
||||
|
||||
image_rgb_masked = mask_background(
|
||||
# pyre-fixme[6]: Expected `Tensor` for 1st param but got
|
||||
# `Optional[torch.Tensor]`.
|
||||
frame_data.image_rgb,
|
||||
# pyre-ignore [6]
|
||||
mask_fg,
|
||||
bg_color=bg_color,
|
||||
)
|
||||
@ -274,7 +284,7 @@ def eval_batch(
|
||||
|
||||
results["iou"] = iou(
|
||||
cloned_render["mask_render"],
|
||||
mask_fg,
|
||||
mask_fg, # pyre-ignore [6]
|
||||
mask=mask_crop,
|
||||
)
|
||||
|
||||
@ -297,7 +307,7 @@ def eval_batch(
|
||||
results[metric_name].item(), metric_name, loss_mask_now
|
||||
)
|
||||
|
||||
if name_postfix == "_fg":
|
||||
if name_postfix == "_fg" and frame_data.depth_map is not None:
|
||||
# only record depth metrics for the foreground
|
||||
_, abs_ = eval_depth(
|
||||
cloned_render["depth_render"],
|
||||
@ -313,9 +323,7 @@ def eval_batch(
|
||||
if visualize:
|
||||
visualizer.show_depth(abs_.mean().item(), name_postfix, loss_mask_now)
|
||||
if break_after_visualising:
|
||||
import pdb # noqa: B602
|
||||
|
||||
pdb.set_trace()
|
||||
breakpoint() # noqa: B601
|
||||
|
||||
if lpips_model is not None:
|
||||
im1, im2 = [
|
||||
|
@ -126,6 +126,7 @@ class ImplicitronEvaluator(EvaluatorBase):
|
||||
)
|
||||
|
||||
results = category_result["results"]
|
||||
evaluate.pretty_print_nvs_metrics(results)
|
||||
if dump_to_json:
|
||||
_dump_to_json(epoch, exp_dir, results)
|
||||
|
||||
@ -140,7 +141,6 @@ def _dump_to_json(
|
||||
r["eval_epoch"] = int(epoch)
|
||||
logger.info("Evaluation results")
|
||||
|
||||
evaluate.pretty_print_nvs_metrics(results)
|
||||
if exp_dir is None:
|
||||
raise ValueError("Cannot save results to json without a specified save path.")
|
||||
with open(os.path.join(exp_dir, "results_test.json"), "w") as f:
|
||||
@ -156,6 +156,7 @@ def _get_eval_frame_data(frame_data: Any) -> Any:
|
||||
frame_data.image_rgb
|
||||
)[:, None, None, None]
|
||||
for k in ("image_rgb", "depth_map", "fg_probability", "mask_crop"):
|
||||
value_masked = getattr(frame_data_for_eval, k).clone() * is_known
|
||||
value = getattr(frame_data_for_eval, k)
|
||||
value_masked = value.clone() * is_known if value is not None else None
|
||||
setattr(frame_data_for_eval, k, value_masked)
|
||||
return frame_data_for_eval
|
||||
|
Loading…
x
Reference in New Issue
Block a user