Python object_detection.eval_util.get_evaluators() Examples

The following are 15 code examples of object_detection.eval_util.get_evaluators(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module object_detection.eval_util , or try the search function .
Example #1
Source File: eval_util_test.py    From models with Apache License 2.0 6 votes vote down vote up
def test_get_evaluator_with_evaluator_options(self):
    eval_config = eval_pb2.EvalConfig()
    eval_config.metrics_set.extend(
        ['coco_detection_metrics', 'precision_at_recall_detection_metrics'])
    eval_config.include_metrics_per_category = True
    eval_config.recall_lower_bound = 0.2
    eval_config.recall_upper_bound = 0.6
    categories = self._get_categories_list()

    evaluator_options = eval_util.evaluator_options_from_eval_config(
        eval_config)
    evaluator = eval_util.get_evaluators(eval_config, categories,
                                         evaluator_options)

    self.assertTrue(evaluator[0]._include_metrics_per_category)
    self.assertAlmostEqual(evaluator[1]._recall_lower_bound,
                           eval_config.recall_lower_bound)
    self.assertAlmostEqual(evaluator[1]._recall_upper_bound,
                           eval_config.recall_upper_bound) 
Example #2
Source File: eval_util_test.py    From models with Apache License 2.0 6 votes vote down vote up
def test_get_evaluator_with_no_evaluator_options(self):
    eval_config = eval_pb2.EvalConfig()
    eval_config.metrics_set.extend(
        ['coco_detection_metrics', 'precision_at_recall_detection_metrics'])
    eval_config.include_metrics_per_category = True
    eval_config.recall_lower_bound = 0.2
    eval_config.recall_upper_bound = 0.6
    categories = self._get_categories_list()

    evaluator = eval_util.get_evaluators(
        eval_config, categories, evaluator_options=None)

    # Even though we are setting eval_config.include_metrics_per_category = True
    # and bounds on recall, these options are never passed into the
    # DetectionEvaluator constructor (via `evaluator_options`).
    self.assertFalse(evaluator[0]._include_metrics_per_category)
    self.assertAlmostEqual(evaluator[1]._recall_lower_bound, 0.0)
    self.assertAlmostEqual(evaluator[1]._recall_upper_bound, 1.0) 
Example #3
Source File: eval_util_test.py    From vehicle_counting_tensorflow with MIT License 5 votes vote down vote up
def test_get_evaluator_with_evaluator_options(self):
    eval_config = eval_pb2.EvalConfig()
    eval_config.metrics_set.extend(['coco_detection_metrics'])
    eval_config.include_metrics_per_category = True
    categories = self._get_categories_list()

    evaluator_options = eval_util.evaluator_options_from_eval_config(
        eval_config)
    evaluator = eval_util.get_evaluators(
        eval_config, categories, evaluator_options)

    self.assertTrue(evaluator[0]._include_metrics_per_category) 
Example #4
Source File: eval_util_test.py    From vehicle_counting_tensorflow with MIT License 5 votes vote down vote up
def test_get_evaluator_with_no_evaluator_options(self):
    eval_config = eval_pb2.EvalConfig()
    eval_config.metrics_set.extend(['coco_detection_metrics'])
    eval_config.include_metrics_per_category = True
    categories = self._get_categories_list()

    evaluator = eval_util.get_evaluators(
        eval_config, categories, evaluator_options=None)

    # Even though we are setting eval_config.include_metrics_per_category = True
    # this option is never passed into the DetectionEvaluator constructor (via
    # `evaluator_options`).
    self.assertFalse(evaluator[0]._include_metrics_per_category) 
Example #5
Source File: eval_util_test.py    From BMW-TensorFlow-Training-GUI with Apache License 2.0 5 votes vote down vote up
def test_get_evaluator_with_evaluator_options(self):
    eval_config = eval_pb2.EvalConfig()
    eval_config.metrics_set.extend(['coco_detection_metrics'])
    eval_config.include_metrics_per_category = True
    categories = self._get_categories_list()

    evaluator_options = eval_util.evaluator_options_from_eval_config(
        eval_config)
    evaluator = eval_util.get_evaluators(
        eval_config, categories, evaluator_options)

    self.assertTrue(evaluator[0]._include_metrics_per_category) 
Example #6
Source File: eval_util_test.py    From BMW-TensorFlow-Training-GUI with Apache License 2.0 5 votes vote down vote up
def test_get_evaluator_with_no_evaluator_options(self):
    eval_config = eval_pb2.EvalConfig()
    eval_config.metrics_set.extend(['coco_detection_metrics'])
    eval_config.include_metrics_per_category = True
    categories = self._get_categories_list()

    evaluator = eval_util.get_evaluators(
        eval_config, categories, evaluator_options=None)

    # Even though we are setting eval_config.include_metrics_per_category = True
    # this option is never passed into the DetectionEvaluator constructor (via
    # `evaluator_options`).
    self.assertFalse(evaluator[0]._include_metrics_per_category) 
Example #7
Source File: eval_util_test.py    From MAX-Object-Detector with Apache License 2.0 5 votes vote down vote up
def test_get_evaluator_with_evaluator_options(self):
    eval_config = eval_pb2.EvalConfig()
    eval_config.metrics_set.extend(['coco_detection_metrics'])
    eval_config.include_metrics_per_category = True
    categories = self._get_categories_list()

    evaluator_options = eval_util.evaluator_options_from_eval_config(
        eval_config)
    evaluator = eval_util.get_evaluators(
        eval_config, categories, evaluator_options)

    self.assertTrue(evaluator[0]._include_metrics_per_category) 
Example #8
Source File: eval_util_test.py    From MAX-Object-Detector with Apache License 2.0 5 votes vote down vote up
def test_get_evaluator_with_no_evaluator_options(self):
    eval_config = eval_pb2.EvalConfig()
    eval_config.metrics_set.extend(['coco_detection_metrics'])
    eval_config.include_metrics_per_category = True
    categories = self._get_categories_list()

    evaluator = eval_util.get_evaluators(
        eval_config, categories, evaluator_options=None)

    # Even though we are setting eval_config.include_metrics_per_category = True
    # this option is never passed into the DetectionEvaluator constructor (via
    # `evaluator_options`).
    self.assertFalse(evaluator[0]._include_metrics_per_category) 
Example #9
Source File: eval_util_test.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def test_get_evaluator_with_evaluator_options(self):
    eval_config = eval_pb2.EvalConfig()
    eval_config.metrics_set.extend(['coco_detection_metrics'])
    eval_config.include_metrics_per_category = True
    categories = self._get_categories_list()

    evaluator_options = eval_util.evaluator_options_from_eval_config(
        eval_config)
    evaluator = eval_util.get_evaluators(
        eval_config, categories, evaluator_options)

    self.assertTrue(evaluator[0]._include_metrics_per_category) 
Example #10
Source File: eval_util_test.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def test_get_evaluator_with_no_evaluator_options(self):
    eval_config = eval_pb2.EvalConfig()
    eval_config.metrics_set.extend(['coco_detection_metrics'])
    eval_config.include_metrics_per_category = True
    categories = self._get_categories_list()

    evaluator = eval_util.get_evaluators(
        eval_config, categories, evaluator_options=None)

    # Even though we are setting eval_config.include_metrics_per_category = True
    # this option is never passed into the DetectionEvaluator constructor (via
    # `evaluator_options`).
    self.assertFalse(evaluator[0]._include_metrics_per_category) 
Example #11
Source File: eval_util_test.py    From models with Apache License 2.0 5 votes vote down vote up
def test_get_evaluator_with_keypoint_metrics(self):
    eval_config = eval_pb2.EvalConfig()
    person_keypoints_metric = eval_config.parameterized_metric.add()
    person_keypoints_metric.coco_keypoint_metrics.class_label = 'person'
    person_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[
        'left_eye'] = 0.1
    person_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[
        'right_eye'] = 0.2
    dog_keypoints_metric = eval_config.parameterized_metric.add()
    dog_keypoints_metric.coco_keypoint_metrics.class_label = 'dog'
    dog_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[
        'tail_start'] = 0.3
    dog_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[
        'mouth'] = 0.4
    categories = self._get_categories_list_with_keypoints()

    evaluator = eval_util.get_evaluators(
        eval_config, categories, evaluator_options=None)

    # Verify keypoint evaluator class variables.
    self.assertLen(evaluator, 3)
    self.assertFalse(evaluator[0]._include_metrics_per_category)
    self.assertEqual(evaluator[1]._category_name, 'person')
    self.assertEqual(evaluator[2]._category_name, 'dog')
    self.assertAllEqual(evaluator[1]._keypoint_ids, [0, 3])
    self.assertAllEqual(evaluator[2]._keypoint_ids, [1, 2])
    self.assertAllClose([0.1, 0.2], evaluator[1]._oks_sigmas)
    self.assertAllClose([0.3, 0.4], evaluator[2]._oks_sigmas) 
Example #12
Source File: eval_util_test.py    From models with Apache License 2.0 5 votes vote down vote up
def test_get_evaluator_with_unmatched_label(self):
    eval_config = eval_pb2.EvalConfig()
    person_keypoints_metric = eval_config.parameterized_metric.add()
    person_keypoints_metric.coco_keypoint_metrics.class_label = 'unmatched'
    person_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[
        'kpt'] = 0.1
    categories = self._get_categories_list_with_keypoints()

    evaluator = eval_util.get_evaluators(
        eval_config, categories, evaluator_options=None)
    self.assertLen(evaluator, 1)
    self.assertNotIsInstance(
        evaluator[0], coco_evaluation.CocoKeypointEvaluator) 
Example #13
Source File: eval_util_test.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def test_get_evaluator_with_evaluator_options(self):
    eval_config = eval_pb2.EvalConfig()
    eval_config.metrics_set.extend(['coco_detection_metrics'])
    eval_config.include_metrics_per_category = True
    categories = self._get_categories_list()

    evaluator_options = eval_util.evaluator_options_from_eval_config(
        eval_config)
    evaluator = eval_util.get_evaluators(
        eval_config, categories, evaluator_options)

    self.assertTrue(evaluator[0]._include_metrics_per_category) 
Example #14
Source File: eval_util_test.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def test_get_evaluator_with_no_evaluator_options(self):
    eval_config = eval_pb2.EvalConfig()
    eval_config.metrics_set.extend(['coco_detection_metrics'])
    eval_config.include_metrics_per_category = True
    categories = self._get_categories_list()

    evaluator = eval_util.get_evaluators(
        eval_config, categories, evaluator_options=None)

    # Even though we are setting eval_config.include_metrics_per_category = True
    # this option is never passed into the DetectionEvaluator constructor (via
    # `evaluator_options`).
    self.assertFalse(evaluator[0]._include_metrics_per_category) 
Example #15
Source File: offline_eval_map_corloc.py    From models with Apache License 2.0 4 votes vote down vote up
def read_data_and_evaluate(input_config, eval_config):
  """Reads pre-computed object detections and groundtruth from tf_record.

  Args:
    input_config: input config proto of type
      object_detection.protos.InputReader.
    eval_config: evaluation config proto of type
      object_detection.protos.EvalConfig.

  Returns:
    Evaluated detections metrics.

  Raises:
    ValueError: if input_reader type is not supported or metric type is unknown.
  """
  if input_config.WhichOneof('input_reader') == 'tf_record_input_reader':
    input_paths = input_config.tf_record_input_reader.input_path

    categories = label_map_util.create_categories_from_labelmap(
        input_config.label_map_path)

    object_detection_evaluators = eval_util.get_evaluators(
        eval_config, categories)
    # Support a single evaluator
    object_detection_evaluator = object_detection_evaluators[0]

    skipped_images = 0
    processed_images = 0
    for input_path in _generate_filenames(input_paths):
      tf.logging.info('Processing file: {0}'.format(input_path))

      record_iterator = tf.python_io.tf_record_iterator(path=input_path)
      data_parser = tf_example_parser.TfExampleDetectionAndGTParser()

      for string_record in record_iterator:
        tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000,
                               processed_images)
        processed_images += 1

        example = tf.train.Example()
        example.ParseFromString(string_record)
        decoded_dict = data_parser.parse(example)

        if decoded_dict:
          object_detection_evaluator.add_single_ground_truth_image_info(
              decoded_dict[standard_fields.DetectionResultFields.key],
              decoded_dict)
          object_detection_evaluator.add_single_detected_image_info(
              decoded_dict[standard_fields.DetectionResultFields.key],
              decoded_dict)
        else:
          skipped_images += 1
          tf.logging.info('Skipped images: {0}'.format(skipped_images))

    return object_detection_evaluator.evaluate()

  raise ValueError('Unsupported input_reader_config.')