Fix test evaluation for Blender data

Summary: Blender data doesn't have depths or crops.

Reviewed By: shapovalov

Differential Revision: D38345583

fbshipit-source-id: a19300daf666bbfd799d0038aeefa14641c559d7
This commit is contained in:
Krzysztof Chalupka 2022-08-02 12:40:21 -07:00 committed by Facebook GitHub Bot
parent 3a063f5976
commit 760305e044
4 changed files with 33 additions and 16 deletions

View File

@ -8,9 +8,9 @@ data_source_ImplicitronDataSource_args:
dataset_map_provider_class_type: BlenderDatasetMapProvider dataset_map_provider_class_type: BlenderDatasetMapProvider
dataset_map_provider_BlenderDatasetMapProvider_args: dataset_map_provider_BlenderDatasetMapProvider_args:
base_dir: ${oc.env:BLENDER_DATASET_ROOT} base_dir: ${oc.env:BLENDER_DATASET_ROOT}
n_known_frames_for_test: null
object_name: ${oc.env:BLENDER_SINGLESEQ_CLASS} object_name: ${oc.env:BLENDER_SINGLESEQ_CLASS}
path_manager_factory_class_type: PathManagerFactory path_manager_factory_class_type: PathManagerFactory
n_known_frames_for_test: null
path_manager_factory_PathManagerFactory_args: path_manager_factory_PathManagerFactory_args:
silence_logs: true silence_logs: true
@ -20,8 +20,13 @@ model_factory_ImplicitronModelFactory_args:
n_rays_per_image_sampled_from_mask: 4096 n_rays_per_image_sampled_from_mask: 4096
scene_extent: 2.0 scene_extent: 2.0
renderer_MultiPassEmissionAbsorptionRenderer_args: renderer_MultiPassEmissionAbsorptionRenderer_args:
density_noise_std_train: 1.0
n_pts_per_ray_fine_training: 128 n_pts_per_ray_fine_training: 128
n_pts_per_ray_fine_evaluation: 128 n_pts_per_ray_fine_evaluation: 128
raymarcher_EmissionAbsorptionRaymarcher_args:
blend_output: true
bg_color:
- 1.0
loss_weights: loss_weights:
loss_rgb_mse: 1.0 loss_rgb_mse: 1.0
loss_prev_stage_rgb_mse: 1.0 loss_prev_stage_rgb_mse: 1.0
@ -30,9 +35,12 @@ model_factory_ImplicitronModelFactory_args:
loss_autodecoder_norm: 0.00 loss_autodecoder_norm: 0.00
optimizer_factory_ImplicitronOptimizerFactory_args: optimizer_factory_ImplicitronOptimizerFactory_args:
exponential_lr_step_size: 2000 exponential_lr_step_size: 3001
lr_policy: Exponential
training_loop_ImplicitronTrainingLoop_args: training_loop_ImplicitronTrainingLoop_args:
max_epochs: 2000 max_epochs: 3001
visualize_interval: 0 metric_print_interval: 100
validation_interval: 30 store_checkpoints_purge: 3
test_when_finished: true
validation_interval: 100

View File

@ -161,7 +161,7 @@ class TestExperiment(unittest.TestCase):
class TestNerfRepro(unittest.TestCase): class TestNerfRepro(unittest.TestCase):
@unittest.skip("This test reproduces full NERF training.") @unittest.skip("This runs full NeRF training on Blender data.")
def test_nerf_blender(self): def test_nerf_blender(self):
# Train vanilla NERF. # Train vanilla NERF.
# Set env vars BLENDER_DATASET_ROOT and BLENDER_SINGLESEQ_CLASS first! # Set env vars BLENDER_DATASET_ROOT and BLENDER_SINGLESEQ_CLASS first!

View File

@ -242,14 +242,24 @@ def eval_batch(
if frame_data.depth_map is None or frame_data.depth_map.sum() <= 0: if frame_data.depth_map is None or frame_data.depth_map.sum() <= 0:
warnings.warn("Empty or missing depth map in evaluation!") warnings.warn("Empty or missing depth map in evaluation!")
if frame_data.mask_crop is None:
warnings.warn("mask_crop is None, assuming the whole image is valid.")
# threshold the masks to make ground truth binary masks # threshold the masks to make ground truth binary masks
mask_fg, mask_crop = [ # pyre-ignore [58]
(getattr(frame_data, k) >= mask_thr) for k in ("fg_probability", "mask_crop") mask_fg = frame_data.fg_probability >= mask_thr
] mask_crop = (
frame_data.mask_crop
if frame_data.mask_crop is not None
# pyre-ignore [6]
else torch.ones_like(mask_fg)
)
image_rgb_masked = mask_background( image_rgb_masked = mask_background(
# pyre-fixme[6]: Expected `Tensor` for 1st param but got # pyre-fixme[6]: Expected `Tensor` for 1st param but got
# `Optional[torch.Tensor]`. # `Optional[torch.Tensor]`.
frame_data.image_rgb, frame_data.image_rgb,
# pyre-ignore [6]
mask_fg, mask_fg,
bg_color=bg_color, bg_color=bg_color,
) )
@ -274,7 +284,7 @@ def eval_batch(
results["iou"] = iou( results["iou"] = iou(
cloned_render["mask_render"], cloned_render["mask_render"],
mask_fg, mask_fg, # pyre-ignore [6]
mask=mask_crop, mask=mask_crop,
) )
@ -297,7 +307,7 @@ def eval_batch(
results[metric_name].item(), metric_name, loss_mask_now results[metric_name].item(), metric_name, loss_mask_now
) )
if name_postfix == "_fg": if name_postfix == "_fg" and frame_data.depth_map is not None:
# only record depth metrics for the foreground # only record depth metrics for the foreground
_, abs_ = eval_depth( _, abs_ = eval_depth(
cloned_render["depth_render"], cloned_render["depth_render"],
@ -313,9 +323,7 @@ def eval_batch(
if visualize: if visualize:
visualizer.show_depth(abs_.mean().item(), name_postfix, loss_mask_now) visualizer.show_depth(abs_.mean().item(), name_postfix, loss_mask_now)
if break_after_visualising: if break_after_visualising:
import pdb # noqa: B602 breakpoint() # noqa: B601
pdb.set_trace()
if lpips_model is not None: if lpips_model is not None:
im1, im2 = [ im1, im2 = [

View File

@ -126,6 +126,7 @@ class ImplicitronEvaluator(EvaluatorBase):
) )
results = category_result["results"] results = category_result["results"]
evaluate.pretty_print_nvs_metrics(results)
if dump_to_json: if dump_to_json:
_dump_to_json(epoch, exp_dir, results) _dump_to_json(epoch, exp_dir, results)
@ -140,7 +141,6 @@ def _dump_to_json(
r["eval_epoch"] = int(epoch) r["eval_epoch"] = int(epoch)
logger.info("Evaluation results") logger.info("Evaluation results")
evaluate.pretty_print_nvs_metrics(results)
if exp_dir is None: if exp_dir is None:
raise ValueError("Cannot save results to json without a specified save path.") raise ValueError("Cannot save results to json without a specified save path.")
with open(os.path.join(exp_dir, "results_test.json"), "w") as f: with open(os.path.join(exp_dir, "results_test.json"), "w") as f:
@ -156,6 +156,7 @@ def _get_eval_frame_data(frame_data: Any) -> Any:
frame_data.image_rgb frame_data.image_rgb
)[:, None, None, None] )[:, None, None, None]
for k in ("image_rgb", "depth_map", "fg_probability", "mask_crop"): for k in ("image_rgb", "depth_map", "fg_probability", "mask_crop"):
value_masked = getattr(frame_data_for_eval, k).clone() * is_known value = getattr(frame_data_for_eval, k)
value_masked = value.clone() * is_known if value is not None else None
setattr(frame_data_for_eval, k, value_masked) setattr(frame_data_for_eval, k, value_masked)
return frame_data_for_eval return frame_data_for_eval