Python object_detection.eval_util.evaluator_options_from_eval_config() Examples
The following are 12
code examples of object_detection.eval_util.evaluator_options_from_eval_config().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
object_detection.eval_util
, or try the search function
.
Example #1
Source File: eval_util_test.py From models with Apache License 2.0 | 6 votes |
def test_get_eval_metric_ops_for_evaluators(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend([ 'coco_detection_metrics', 'coco_mask_metrics', 'precision_at_recall_detection_metrics' ]) eval_config.include_metrics_per_category = True eval_config.recall_lower_bound = 0.2 eval_config.recall_upper_bound = 0.6 evaluator_options = eval_util.evaluator_options_from_eval_config( eval_config) self.assertTrue(evaluator_options['coco_detection_metrics'] ['include_metrics_per_category']) self.assertTrue( evaluator_options['coco_mask_metrics']['include_metrics_per_category']) self.assertAlmostEqual( evaluator_options['precision_at_recall_detection_metrics'] ['recall_lower_bound'], eval_config.recall_lower_bound) self.assertAlmostEqual( evaluator_options['precision_at_recall_detection_metrics'] ['recall_upper_bound'], eval_config.recall_upper_bound)
Example #2
Source File: eval_util_test.py From models with Apache License 2.0 | 6 votes |
def test_get_evaluator_with_evaluator_options(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend( ['coco_detection_metrics', 'precision_at_recall_detection_metrics']) eval_config.include_metrics_per_category = True eval_config.recall_lower_bound = 0.2 eval_config.recall_upper_bound = 0.6 categories = self._get_categories_list() evaluator_options = eval_util.evaluator_options_from_eval_config( eval_config) evaluator = eval_util.get_evaluators(eval_config, categories, evaluator_options) self.assertTrue(evaluator[0]._include_metrics_per_category) self.assertAlmostEqual(evaluator[1]._recall_lower_bound, eval_config.recall_lower_bound) self.assertAlmostEqual(evaluator[1]._recall_upper_bound, eval_config.recall_upper_bound)
Example #3
Source File: eval_util_test.py From vehicle_counting_tensorflow with MIT License | 5 votes |
def test_get_eval_metric_ops_for_evaluators(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend( ['coco_detection_metrics', 'coco_mask_metrics']) eval_config.include_metrics_per_category = True evaluator_options = eval_util.evaluator_options_from_eval_config( eval_config) self.assertTrue(evaluator_options['coco_detection_metrics'][ 'include_metrics_per_category']) self.assertTrue(evaluator_options['coco_mask_metrics'][ 'include_metrics_per_category'])
Example #4
Source File: eval_util_test.py From vehicle_counting_tensorflow with MIT License | 5 votes |
def test_get_evaluator_with_evaluator_options(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend(['coco_detection_metrics']) eval_config.include_metrics_per_category = True categories = self._get_categories_list() evaluator_options = eval_util.evaluator_options_from_eval_config( eval_config) evaluator = eval_util.get_evaluators( eval_config, categories, evaluator_options) self.assertTrue(evaluator[0]._include_metrics_per_category)
Example #5
Source File: eval_util_test.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 5 votes |
def test_get_eval_metric_ops_for_evaluators(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend( ['coco_detection_metrics', 'coco_mask_metrics']) eval_config.include_metrics_per_category = True evaluator_options = eval_util.evaluator_options_from_eval_config( eval_config) self.assertTrue(evaluator_options['coco_detection_metrics'][ 'include_metrics_per_category']) self.assertTrue(evaluator_options['coco_mask_metrics'][ 'include_metrics_per_category'])
Example #6
Source File: eval_util_test.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 5 votes |
def test_get_evaluator_with_evaluator_options(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend(['coco_detection_metrics']) eval_config.include_metrics_per_category = True categories = self._get_categories_list() evaluator_options = eval_util.evaluator_options_from_eval_config( eval_config) evaluator = eval_util.get_evaluators( eval_config, categories, evaluator_options) self.assertTrue(evaluator[0]._include_metrics_per_category)
Example #7
Source File: eval_util_test.py From MAX-Object-Detector with Apache License 2.0 | 5 votes |
def test_get_eval_metric_ops_for_evaluators(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend( ['coco_detection_metrics', 'coco_mask_metrics']) eval_config.include_metrics_per_category = True evaluator_options = eval_util.evaluator_options_from_eval_config( eval_config) self.assertTrue(evaluator_options['coco_detection_metrics'][ 'include_metrics_per_category']) self.assertTrue(evaluator_options['coco_mask_metrics'][ 'include_metrics_per_category'])
Example #8
Source File: eval_util_test.py From MAX-Object-Detector with Apache License 2.0 | 5 votes |
def test_get_evaluator_with_evaluator_options(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend(['coco_detection_metrics']) eval_config.include_metrics_per_category = True categories = self._get_categories_list() evaluator_options = eval_util.evaluator_options_from_eval_config( eval_config) evaluator = eval_util.get_evaluators( eval_config, categories, evaluator_options) self.assertTrue(evaluator[0]._include_metrics_per_category)
Example #9
Source File: eval_util_test.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def test_get_eval_metric_ops_for_evaluators(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend( ['coco_detection_metrics', 'coco_mask_metrics']) eval_config.include_metrics_per_category = True evaluator_options = eval_util.evaluator_options_from_eval_config( eval_config) self.assertTrue(evaluator_options['coco_detection_metrics'][ 'include_metrics_per_category']) self.assertTrue(evaluator_options['coco_mask_metrics'][ 'include_metrics_per_category'])
Example #10
Source File: eval_util_test.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def test_get_evaluator_with_evaluator_options(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend(['coco_detection_metrics']) eval_config.include_metrics_per_category = True categories = self._get_categories_list() evaluator_options = eval_util.evaluator_options_from_eval_config( eval_config) evaluator = eval_util.get_evaluators( eval_config, categories, evaluator_options) self.assertTrue(evaluator[0]._include_metrics_per_category)
Example #11
Source File: eval_util_test.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def test_get_eval_metric_ops_for_evaluators(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend( ['coco_detection_metrics', 'coco_mask_metrics']) eval_config.include_metrics_per_category = True evaluator_options = eval_util.evaluator_options_from_eval_config( eval_config) self.assertTrue(evaluator_options['coco_detection_metrics'][ 'include_metrics_per_category']) self.assertTrue(evaluator_options['coco_mask_metrics'][ 'include_metrics_per_category'])
Example #12
Source File: eval_util_test.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def test_get_evaluator_with_evaluator_options(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend(['coco_detection_metrics']) eval_config.include_metrics_per_category = True categories = self._get_categories_list() evaluator_options = eval_util.evaluator_options_from_eval_config( eval_config) evaluator = eval_util.get_evaluators( eval_config, categories, evaluator_options) self.assertTrue(evaluator[0]._include_metrics_per_category)