From 58be89c6d28884bc91d1511872dd9f0f02e31447 Mon Sep 17 00:00:00 2001 From: ben Date: Thu, 7 May 2020 19:13:15 +1000 Subject: [PATCH 1/3] Fix minor typo in exception message --- benchbot_eval/evaluator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchbot_eval/evaluator.py b/benchbot_eval/evaluator.py index 18cef42..64d6784 100644 --- a/benchbot_eval/evaluator.py +++ b/benchbot_eval/evaluator.py @@ -331,7 +331,7 @@ def _validate_results_set(results_set, (results_set[0][0], task_str, f, s)) elif s != task_str: raise ValueError( - "Evaluator was configured only accept results for task " + "Evaluator was configured to only accept results for task " "'%s', but results file '%s' is for task '%s'" % (required_task, f, s)) From 31a2e2c776cee4133edbb0dba69b3815e1f6e25e Mon Sep 17 00:00:00 2001 From: ben Date: Mon, 11 May 2020 15:00:55 +1000 Subject: [PATCH 2/3] Improve aesthetics of URLs --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 15dc27e..bee9f66 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ -**NOTE: this software is part of the BenchBot software stack, and not intended to be run in isolation (although it can be installed independently through pip & run on results files if desired). For a working BenchBot system, please install the BenchBot software stack by following the instructions [here](https://github.com/RoboticVisionOrg/benchbot).** +**NOTE: this software is part of the BenchBot software stack, and not intended to be run in isolation (although it can be installed independently through pip & run on results files if desired). For a working BenchBot system, please install the BenchBot software stack by following the instructions [here](https://github.com/roboticvisionorg/benchbot).** # BenchBot Evaluation -BenchBot Evaluation is a library of functions used to evaluate the performance of a BenchBot system in two core semantic scene understanding tasks: semantic SLAM, and scene change detection. The easiest way to use this module is through the helper scripts provided with the [BenchBot software stack](https://github.com/RoboticVisionOrg/benchbot). +BenchBot Evaluation is a library of functions used to evaluate the performance of a BenchBot system in two core semantic scene understanding tasks: semantic SLAM, and scene change detection. The easiest way to use this module is through the helper scripts provided with the [BenchBot software stack](https://github.com/roboticvisionorg/benchbot). ## Installing & performing evaluation with BenchBot Evaluation @@ -88,7 +88,7 @@ Notes: } ``` - The above dicts can be obtained at runtime through the `BenchBot.task_details` & `BenchBot.environment_details` [API properties](https://github.com/RoboticVisionOrg/benchbot_api). + The above dicts can be obtained at runtime through the `BenchBot.task_details` & `BenchBot.environment_details` [API properties](https://github.com/roboticvisionorg/benchbot_api). - For `'task_details'`: - `'type'` must be either `'semantic_slam'` or `'scd'` - `'control_mode'` must be either `'passive'` or `'active'` @@ -110,7 +110,7 @@ Notes: ## Generating results for evaluation -An algorithm attempting to solve a semantic scene understanding task only has to fill in the list of `'objects'` and the `'class_list'` field (only if a custom class list has been used); everything else can be pre-populated using the [provided BenchBot API methods](https://github.com/RoboticVisionOrg/benchbot_api). Using these helper methods, only a few lines of code is needed to create results that can be used with our evaluator: +An algorithm attempting to solve a semantic scene understanding task only has to fill in the list of `'objects'` and the `'class_list'` field (only if a custom class list has been used); everything else can be pre-populated using the [provided BenchBot API methods](https://github.com/roboticvisionorg/benchbot_api). Using these helper methods, only a few lines of code is needed to create results that can be used with our evaluator: ```python from benchbot_api import BenchBot From 5e5121b42e44abc281dd96a17bc8cdba5b36de9e Mon Sep 17 00:00:00 2001 From: ben Date: Mon, 11 May 2020 16:47:26 +1000 Subject: [PATCH 3/3] Reject multiple results for the same environment --- benchbot_eval/evaluator.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/benchbot_eval/evaluator.py b/benchbot_eval/evaluator.py index 64d6784..494e420 100644 --- a/benchbot_eval/evaluator.py +++ b/benchbot_eval/evaluator.py @@ -335,15 +335,17 @@ def _validate_results_set(results_set, "'%s', but results file '%s' is for task '%s'" % (required_task, f, s)) - env_strs.append(Evaluator._get_env_string( - d['environment_details'])) - if (required_envs is not None and - env_strs[-1] not in required_envs): + s = Evaluator._get_env_string(d['environment_details']) + if (required_envs is not None and s not in required_envs): raise ValueError( "Evaluator was configured to require environments: %s. " "Results file '%s' is for environment '%s' which is not " - "in the list." % - (", ".join(required_envs), f, env_strs[-1])) + "in the list." % (", ".join(required_envs), f, s)) + elif s in env_strs: + raise ValueError( + "Evaluator received multiple results for environment '%s'. " + "Only one result is permitted per environment." % s) + env_strs.append(s) # Lastly, ensure we have all required environments if relevant if required_envs is not None: