test runner for eval_demo

Summary:
Create a test runner for the eval_demo code.  Debugging this is useful for understanding datasets.

Introduces an environment variable INTERACTIVE_TESTING for ignoring tests which are not intended for use in regular test runs.

Reviewed By: shapovalov

Differential Revision: D35964016

fbshipit-source-id: ab0f93aff66b6cfeca942b14466cf81f7feb2224
This commit is contained in:
Jeremy Reizenstein
2022-05-06 08:31:19 -07:00
committed by Facebook GitHub Bot
parent 44cb00e468
commit ec9580a1d4
6 changed files with 93 additions and 15 deletions

View File

@@ -26,11 +26,16 @@ if os.environ.get("FB_TEST", False):
else:
from common_resources import get_skateboard_data
if os.environ.get("FB_TEST", False):
from common_testing import interactive_testing_requested
else:
from tests.common_testing import interactive_testing_requested
class TestDatasetVisualize(unittest.TestCase):
def setUp(self):
if os.environ.get("INSIDE_RE_WORKER") is not None:
raise unittest.SkipTest("Visdom not available")
if not interactive_testing_requested():
return
category = "skateboard"
stack = contextlib.ExitStack()
dataset_root, path_manager = stack.enter_context(get_skateboard_data())
@@ -94,8 +99,8 @@ class TestDatasetVisualize(unittest.TestCase):
def test_one(self):
"""Test dataset visualization."""
if os.environ.get("INSIDE_RE_WORKER") is not None:
raise unittest.SkipTest("Visdom not available")
if not interactive_testing_requested():
return
for max_frames in (16, -1):
for load_dataset_point_cloud in (True, False):
for dataset_key in self.datasets: