Python object_detection.utils.metrics.compute_average_precision() Examples

The following are 30 code examples of object_detection.utils.metrics.compute_average_precision(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module object_detection.utils.metrics , or try the search function .
Example #1
Source File: metrics_test.py    From object_detection_kitti with Apache License 2.0 5 votes vote down vote up
def test_compute_average_precision(self):
    precision = np.array([0.8, 0.76, 0.9, 0.65, 0.7, 0.5, 0.55, 0], dtype=float)
    recall = np.array([0.3, 0.3, 0.4, 0.4, 0.45, 0.45, 0.5, 0.5], dtype=float)
    processed_precision = np.array([0.9, 0.9, 0.9, 0.7, 0.7, 0.55, 0.55, 0],
                                   dtype=float)
    recall_interval = np.array([0.3, 0, 0.1, 0, 0.05, 0, 0.05, 0], dtype=float)
    expected_mean_ap = np.sum(recall_interval * processed_precision)
    mean_ap = metrics.compute_average_precision(precision, recall)
    self.assertAlmostEqual(expected_mean_ap, mean_ap) 
Example #2
Source File: metrics_test.py    From MBMD with MIT License 5 votes vote down vote up
def test_compute_average_precision(self):
    precision = np.array([0.8, 0.76, 0.9, 0.65, 0.7, 0.5, 0.55, 0], dtype=float)
    recall = np.array([0.3, 0.3, 0.4, 0.4, 0.45, 0.45, 0.5, 0.5], dtype=float)
    processed_precision = np.array([0.9, 0.9, 0.9, 0.7, 0.7, 0.55, 0.55, 0],
                                   dtype=float)
    recall_interval = np.array([0.3, 0, 0.1, 0, 0.05, 0, 0.05, 0], dtype=float)
    expected_mean_ap = np.sum(recall_interval * processed_precision)
    mean_ap = metrics.compute_average_precision(precision, recall)
    self.assertAlmostEqual(expected_mean_ap, mean_ap) 
Example #3
Source File: metrics_test.py    From Elphas with Apache License 2.0 5 votes vote down vote up
def test_compute_average_precision(self):
    precision = np.array([0.8, 0.76, 0.9, 0.65, 0.7, 0.5, 0.55, 0], dtype=float)
    recall = np.array([0.3, 0.3, 0.4, 0.4, 0.45, 0.45, 0.5, 0.5], dtype=float)
    processed_precision = np.array([0.9, 0.9, 0.9, 0.7, 0.7, 0.55, 0.55, 0],
                                   dtype=float)
    recall_interval = np.array([0.3, 0, 0.1, 0, 0.05, 0, 0.05, 0], dtype=float)
    expected_mean_ap = np.sum(recall_interval * processed_precision)
    mean_ap = metrics.compute_average_precision(precision, recall)
    self.assertAlmostEqual(expected_mean_ap, mean_ap) 
Example #4
Source File: object_detection_evaluation.py    From MBMD with MIT License 5 votes vote down vote up
def evaluate(self):
    """Compute evaluation result.

    Returns:
      average_precision_per_class: float numpy array of average precision for
          each class.
      mean_ap: mean average precision of all classes, float scalar
      precisions_per_class: List of precisions, each precision is a float numpy
          array
      recalls_per_class: List of recalls, each recall is a float numpy array
      corloc_per_class: numpy float array
      mean_corloc: Mean CorLoc score for each class, float scalar
    """
    if (self.num_gt_instances_per_class == 0).any():
      logging.warn(
          'The following classes have no ground truth examples: %s',
          np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)))
    for class_index in range(self.num_class):
      if self.num_gt_instances_per_class[class_index] == 0:
        continue
      scores = np.concatenate(self.scores_per_class[class_index])
      tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
      precision, recall = metrics.compute_precision_recall(
          scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
      self.precisions_per_class.append(precision)
      self.recalls_per_class.append(recall)
      average_precision = metrics.compute_average_precision(precision, recall)
      self.average_precision_per_class[class_index] = average_precision

    self.corloc_per_class = metrics.compute_cor_loc(
        self.num_gt_imgs_per_class,
        self.num_images_correctly_detected_per_class)

    mean_ap = np.nanmean(self.average_precision_per_class)
    mean_corloc = np.nanmean(self.corloc_per_class)
    return (self.average_precision_per_class, mean_ap,
            self.precisions_per_class, self.recalls_per_class,
            self.corloc_per_class, mean_corloc) 
Example #5
Source File: metrics_test.py    From MBMD with MIT License 5 votes vote down vote up
def test_compute_precision_recall_and_ap_no_groundtruth(self):
    num_gt = 0
    scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
    labels = np.array([0, 0, 0, 0, 0, 0], dtype=bool)
    expected_precision = None
    expected_recall = None
    precision, recall = metrics.compute_precision_recall(scores, labels, num_gt)
    self.assertEquals(precision, expected_precision)
    self.assertEquals(recall, expected_recall)
    ap = metrics.compute_average_precision(precision, recall)
    self.assertTrue(np.isnan(ap)) 
Example #6
Source File: metrics_test.py    From object_detection_with_tensorflow with MIT License 5 votes vote down vote up
def test_compute_precision_recall_and_ap_no_groundtruth(self):
    num_gt = 0
    scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
    labels = np.array([0, 0, 0, 0, 0, 0], dtype=bool)
    expected_precision = None
    expected_recall = None
    precision, recall = metrics.compute_precision_recall(scores, labels, num_gt)
    self.assertEquals(precision, expected_precision)
    self.assertEquals(recall, expected_recall)
    ap = metrics.compute_average_precision(precision, recall)
    self.assertTrue(np.isnan(ap)) 
Example #7
Source File: metrics_test.py    From AniSeg with Apache License 2.0 5 votes vote down vote up
def test_compute_average_precision(self):
    precision = np.array([0.8, 0.76, 0.9, 0.65, 0.7, 0.5, 0.55, 0], dtype=float)
    recall = np.array([0.3, 0.3, 0.4, 0.4, 0.45, 0.45, 0.5, 0.5], dtype=float)
    processed_precision = np.array([0.9, 0.9, 0.9, 0.7, 0.7, 0.55, 0.55, 0],
                                   dtype=float)
    recall_interval = np.array([0.3, 0, 0.1, 0, 0.05, 0, 0.05, 0], dtype=float)
    expected_mean_ap = np.sum(recall_interval * processed_precision)
    mean_ap = metrics.compute_average_precision(precision, recall)
    self.assertAlmostEqual(expected_mean_ap, mean_ap) 
Example #8
Source File: metrics_test.py    From AniSeg with Apache License 2.0 5 votes vote down vote up
def test_compute_precision_recall_and_ap_no_groundtruth(self):
    num_gt = 0
    scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
    labels = np.array([0, 0, 0, 0, 0, 0], dtype=bool)
    expected_precision = None
    expected_recall = None
    precision, recall = metrics.compute_precision_recall(scores, labels, num_gt)
    self.assertEquals(precision, expected_precision)
    self.assertEquals(recall, expected_recall)
    ap = metrics.compute_average_precision(precision, recall)
    self.assertTrue(np.isnan(ap)) 
Example #9
Source File: metrics_test.py    From MAX-Object-Detector with Apache License 2.0 5 votes vote down vote up
def test_compute_average_precision(self):
    precision = np.array([0.8, 0.76, 0.9, 0.65, 0.7, 0.5, 0.55, 0], dtype=float)
    recall = np.array([0.3, 0.3, 0.4, 0.4, 0.45, 0.45, 0.5, 0.5], dtype=float)
    processed_precision = np.array(
        [0.9, 0.9, 0.9, 0.7, 0.7, 0.55, 0.55, 0], dtype=float)
    recall_interval = np.array([0.3, 0, 0.1, 0, 0.05, 0, 0.05, 0], dtype=float)
    expected_mean_ap = np.sum(recall_interval * processed_precision)
    mean_ap = metrics.compute_average_precision(precision, recall)
    self.assertAlmostEqual(expected_mean_ap, mean_ap) 
Example #10
Source File: metrics_test.py    From MAX-Object-Detector with Apache License 2.0 5 votes vote down vote up
def test_compute_precision_recall_and_ap_no_groundtruth(self):
    num_gt = 0
    scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
    labels = np.array([0, 0, 0, 0, 0, 0], dtype=bool)
    expected_precision = None
    expected_recall = None
    precision, recall = metrics.compute_precision_recall(scores, labels, num_gt)
    self.assertEquals(precision, expected_precision)
    self.assertEquals(recall, expected_recall)
    ap = metrics.compute_average_precision(precision, recall)
    self.assertTrue(np.isnan(ap)) 
Example #11
Source File: metrics_test.py    From open-solution-googleai-object-detection with MIT License 5 votes vote down vote up
def test_compute_average_precision(self):
    precision = np.array([0.8, 0.76, 0.9, 0.65, 0.7, 0.5, 0.55, 0], dtype=float)
    recall = np.array([0.3, 0.3, 0.4, 0.4, 0.45, 0.45, 0.5, 0.5], dtype=float)
    processed_precision = np.array(
        [0.9, 0.9, 0.9, 0.7, 0.7, 0.55, 0.55, 0], dtype=float)
    recall_interval = np.array([0.3, 0, 0.1, 0, 0.05, 0, 0.05, 0], dtype=float)
    expected_mean_ap = np.sum(recall_interval * processed_precision)
    mean_ap = metrics.compute_average_precision(precision, recall)
    self.assertAlmostEqual(expected_mean_ap, mean_ap) 
Example #12
Source File: metrics_test.py    From open-solution-googleai-object-detection with MIT License 5 votes vote down vote up
def test_compute_precision_recall_and_ap_no_groundtruth(self):
    num_gt = 0
    scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
    labels = np.array([0, 0, 0, 0, 0, 0], dtype=bool)
    expected_precision = None
    expected_recall = None
    precision, recall = metrics.compute_precision_recall(scores, labels, num_gt)
    self.assertEqual(precision, expected_precision)
    self.assertEqual(recall, expected_recall)
    ap = metrics.compute_average_precision(precision, recall)
    self.assertTrue(np.isnan(ap)) 
Example #13
Source File: metrics_test.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def test_compute_average_precision(self):
    precision = np.array([0.8, 0.76, 0.9, 0.65, 0.7, 0.5, 0.55, 0], dtype=float)
    recall = np.array([0.3, 0.3, 0.4, 0.4, 0.45, 0.45, 0.5, 0.5], dtype=float)
    processed_precision = np.array(
        [0.9, 0.9, 0.9, 0.7, 0.7, 0.55, 0.55, 0], dtype=float)
    recall_interval = np.array([0.3, 0, 0.1, 0, 0.05, 0, 0.05, 0], dtype=float)
    expected_mean_ap = np.sum(recall_interval * processed_precision)
    mean_ap = metrics.compute_average_precision(precision, recall)
    self.assertAlmostEqual(expected_mean_ap, mean_ap) 
Example #14
Source File: metrics_test.py    From object_detection_with_tensorflow with MIT License 5 votes vote down vote up
def test_compute_average_precision(self):
    precision = np.array([0.8, 0.76, 0.9, 0.65, 0.7, 0.5, 0.55, 0], dtype=float)
    recall = np.array([0.3, 0.3, 0.4, 0.4, 0.45, 0.45, 0.5, 0.5], dtype=float)
    processed_precision = np.array([0.9, 0.9, 0.9, 0.7, 0.7, 0.55, 0.55, 0],
                                   dtype=float)
    recall_interval = np.array([0.3, 0, 0.1, 0, 0.05, 0, 0.05, 0], dtype=float)
    expected_mean_ap = np.sum(recall_interval * processed_precision)
    mean_ap = metrics.compute_average_precision(precision, recall)
    self.assertAlmostEqual(expected_mean_ap, mean_ap) 
Example #15
Source File: metrics_test.py    From object_detection_kitti with Apache License 2.0 5 votes vote down vote up
def test_compute_precision_recall_and_ap_no_groundtruth(self):
    num_gt = 0
    scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
    labels = np.array([0, 0, 0, 0, 0, 0], dtype=bool)
    expected_precision = None
    expected_recall = None
    precision, recall = metrics.compute_precision_recall(scores, labels, num_gt)
    self.assertEquals(precision, expected_precision)
    self.assertEquals(recall, expected_recall)
    ap = metrics.compute_average_precision(precision, recall)
    self.assertTrue(np.isnan(ap)) 
Example #16
Source File: object_detection_evaluation.py    From object_detection_kitti with Apache License 2.0 5 votes vote down vote up
def evaluate(self):
    """Compute evaluation result.

    Returns:
      average_precision_per_class: float numpy array of average precision for
          each class.
      mean_ap: mean average precision of all classes, float scalar
      precisions_per_class: List of precisions, each precision is a float numpy
          array
      recalls_per_class: List of recalls, each recall is a float numpy array
      corloc_per_class: numpy float array
      mean_corloc: Mean CorLoc score for each class, float scalar
    """
    if (self.num_gt_instances_per_class == 0).any():
      logging.warn(
          'The following classes have no ground truth examples: %s',
          np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)))
    for class_index in range(self.num_class):
      if self.num_gt_instances_per_class[class_index] == 0:
        continue
      scores = np.concatenate(self.scores_per_class[class_index])
      tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
      precision, recall = metrics.compute_precision_recall(
          scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
      self.precisions_per_class.append(precision)
      self.recalls_per_class.append(recall)
      average_precision = metrics.compute_average_precision(precision, recall)
      self.average_precision_per_class[class_index] = average_precision

    self.corloc_per_class = metrics.compute_cor_loc(
        self.num_gt_imgs_per_class,
        self.num_images_correctly_detected_per_class)

    mean_ap = np.nanmean(self.average_precision_per_class)
    mean_corloc = np.nanmean(self.corloc_per_class)
    return (self.average_precision_per_class, mean_ap,
            self.precisions_per_class, self.recalls_per_class,
            self.corloc_per_class, mean_corloc) 
Example #17
Source File: metrics_test.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 5 votes vote down vote up
def test_compute_precision_recall_and_ap_no_groundtruth(self):
    num_gt = 0
    scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
    labels = np.array([0, 0, 0, 0, 0, 0], dtype=bool)
    expected_precision = None
    expected_recall = None
    precision, recall = metrics.compute_precision_recall(scores, labels, num_gt)
    self.assertEquals(precision, expected_precision)
    self.assertEquals(recall, expected_recall)
    ap = metrics.compute_average_precision(precision, recall)
    self.assertTrue(np.isnan(ap)) 
Example #18
Source File: metrics_test.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 5 votes vote down vote up
def test_compute_average_precision(self):
    precision = np.array([0.8, 0.76, 0.9, 0.65, 0.7, 0.5, 0.55, 0], dtype=float)
    recall = np.array([0.3, 0.3, 0.4, 0.4, 0.45, 0.45, 0.5, 0.5], dtype=float)
    processed_precision = np.array(
        [0.9, 0.9, 0.9, 0.7, 0.7, 0.55, 0.55, 0], dtype=float)
    recall_interval = np.array([0.3, 0, 0.1, 0, 0.05, 0, 0.05, 0], dtype=float)
    expected_mean_ap = np.sum(recall_interval * processed_precision)
    mean_ap = metrics.compute_average_precision(precision, recall)
    self.assertAlmostEqual(expected_mean_ap, mean_ap) 
Example #19
Source File: object_detection_evaluation.py    From hands-detection with MIT License 5 votes vote down vote up
def evaluate(self):
    """Compute evaluation result.

    Returns:
      average_precision_per_class: float numpy array of average precision for
          each class.
      mean_ap: mean average precision of all classes, float scalar
      precisions_per_class: List of precisions, each precision is a float numpy
          array
      recalls_per_class: List of recalls, each recall is a float numpy array
      corloc_per_class: numpy float array
      mean_corloc: Mean CorLoc score for each class, float scalar
    """
    if (self.num_gt_instances_per_class == 0).any():
      logging.warn(
          'The following classes have no ground truth examples: %s',
          np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)))
    for class_index in range(self.num_class):
      if self.num_gt_instances_per_class[class_index] == 0:
        continue
      scores = np.concatenate(self.scores_per_class[class_index])
      tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
      precision, recall = metrics.compute_precision_recall(
          scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
      self.precisions_per_class.append(precision)
      self.recalls_per_class.append(recall)
      average_precision = metrics.compute_average_precision(precision, recall)
      self.average_precision_per_class[class_index] = average_precision

    self.corloc_per_class = metrics.compute_cor_loc(
        self.num_gt_imgs_per_class,
        self.num_images_correctly_detected_per_class)

    mean_ap = np.nanmean(self.average_precision_per_class)
    mean_corloc = np.nanmean(self.corloc_per_class)
    return (self.average_precision_per_class, mean_ap,
            self.precisions_per_class, self.recalls_per_class,
            self.corloc_per_class, mean_corloc) 
Example #20
Source File: metrics_test.py    From hands-detection with MIT License 5 votes vote down vote up
def test_compute_precision_recall_and_ap_no_groundtruth(self):
    num_gt = 0
    scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
    labels = np.array([0, 0, 0, 0, 0, 0], dtype=bool)
    expected_precision = None
    expected_recall = None
    precision, recall = metrics.compute_precision_recall(scores, labels, num_gt)
    self.assertEquals(precision, expected_precision)
    self.assertEquals(recall, expected_recall)
    ap = metrics.compute_average_precision(precision, recall)
    self.assertTrue(np.isnan(ap)) 
Example #21
Source File: metrics_test.py    From hands-detection with MIT License 5 votes vote down vote up
def test_compute_average_precision(self):
    precision = np.array([0.8, 0.76, 0.9, 0.65, 0.7, 0.5, 0.55, 0], dtype=float)
    recall = np.array([0.3, 0.3, 0.4, 0.4, 0.45, 0.45, 0.5, 0.5], dtype=float)
    processed_precision = np.array([0.9, 0.9, 0.9, 0.7, 0.7, 0.55, 0.55, 0],
                                   dtype=float)
    recall_interval = np.array([0.3, 0, 0.1, 0, 0.05, 0, 0.05, 0], dtype=float)
    expected_mean_ap = np.sum(recall_interval * processed_precision)
    mean_ap = metrics.compute_average_precision(precision, recall)
    self.assertAlmostEqual(expected_mean_ap, mean_ap) 
Example #22
Source File: object_detection_evaluation.py    From moveo_ros with MIT License 5 votes vote down vote up
def evaluate(self):
    """Compute evaluation result.

    Returns:
      average_precision_per_class: float numpy array of average precision for
          each class.
      mean_ap: mean average precision of all classes, float scalar
      precisions_per_class: List of precisions, each precision is a float numpy
          array
      recalls_per_class: List of recalls, each recall is a float numpy array
      corloc_per_class: numpy float array
      mean_corloc: Mean CorLoc score for each class, float scalar
    """
    if (self.num_gt_instances_per_class == 0).any():
      logging.warn(
          'The following classes have no ground truth examples: %s',
          np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)))
    for class_index in range(self.num_class):
      if self.num_gt_instances_per_class[class_index] == 0:
        continue
      scores = np.concatenate(self.scores_per_class[class_index])
      tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
      precision, recall = metrics.compute_precision_recall(
          scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
      self.precisions_per_class.append(precision)
      self.recalls_per_class.append(recall)
      average_precision = metrics.compute_average_precision(precision, recall)
      self.average_precision_per_class[class_index] = average_precision

    self.corloc_per_class = metrics.compute_cor_loc(
        self.num_gt_imgs_per_class,
        self.num_images_correctly_detected_per_class)

    mean_ap = np.nanmean(self.average_precision_per_class)
    mean_corloc = np.nanmean(self.corloc_per_class)
    return (self.average_precision_per_class, mean_ap,
            self.precisions_per_class, self.recalls_per_class,
            self.corloc_per_class, mean_corloc) 
Example #23
Source File: metrics_test.py    From moveo_ros with MIT License 5 votes vote down vote up
def test_compute_precision_recall_and_ap_no_groundtruth(self):
    num_gt = 0
    scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
    labels = np.array([0, 0, 0, 0, 0, 0], dtype=bool)
    expected_precision = None
    expected_recall = None
    precision, recall = metrics.compute_precision_recall(scores, labels, num_gt)
    self.assertEquals(precision, expected_precision)
    self.assertEquals(recall, expected_recall)
    ap = metrics.compute_average_precision(precision, recall)
    self.assertTrue(np.isnan(ap)) 
Example #24
Source File: metrics_test.py    From moveo_ros with MIT License 5 votes vote down vote up
def test_compute_average_precision(self):
    precision = np.array([0.8, 0.76, 0.9, 0.65, 0.7, 0.5, 0.55, 0], dtype=float)
    recall = np.array([0.3, 0.3, 0.4, 0.4, 0.45, 0.45, 0.5, 0.5], dtype=float)
    processed_precision = np.array([0.9, 0.9, 0.9, 0.7, 0.7, 0.55, 0.55, 0],
                                   dtype=float)
    recall_interval = np.array([0.3, 0, 0.1, 0, 0.05, 0, 0.05, 0], dtype=float)
    expected_mean_ap = np.sum(recall_interval * processed_precision)
    mean_ap = metrics.compute_average_precision(precision, recall)
    self.assertAlmostEqual(expected_mean_ap, mean_ap) 
Example #25
Source File: metrics_test.py    From BMW-TensorFlow-Training-GUI with Apache License 2.0 5 votes vote down vote up
def test_compute_precision_recall_and_ap_no_groundtruth(self):
    num_gt = 0
    scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
    labels = np.array([0, 0, 0, 0, 0, 0], dtype=bool)
    expected_precision = None
    expected_recall = None
    precision, recall = metrics.compute_precision_recall(scores, labels, num_gt)
    self.assertEquals(precision, expected_precision)
    self.assertEquals(recall, expected_recall)
    ap = metrics.compute_average_precision(precision, recall)
    self.assertTrue(np.isnan(ap)) 
Example #26
Source File: metrics_test.py    From BMW-TensorFlow-Training-GUI with Apache License 2.0 5 votes vote down vote up
def test_compute_average_precision(self):
    precision = np.array([0.8, 0.76, 0.9, 0.65, 0.7, 0.5, 0.55, 0], dtype=float)
    recall = np.array([0.3, 0.3, 0.4, 0.4, 0.45, 0.45, 0.5, 0.5], dtype=float)
    processed_precision = np.array(
        [0.9, 0.9, 0.9, 0.7, 0.7, 0.55, 0.55, 0], dtype=float)
    recall_interval = np.array([0.3, 0, 0.1, 0, 0.05, 0, 0.05, 0], dtype=float)
    expected_mean_ap = np.sum(recall_interval * processed_precision)
    mean_ap = metrics.compute_average_precision(precision, recall)
    self.assertAlmostEqual(expected_mean_ap, mean_ap) 
Example #27
Source File: metrics_test.py    From ros_tensorflow with Apache License 2.0 5 votes vote down vote up
def test_compute_precision_recall_and_ap_no_groundtruth(self):
    num_gt = 0
    scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
    labels = np.array([0, 0, 0, 0, 0, 0], dtype=bool)
    expected_precision = None
    expected_recall = None
    precision, recall = metrics.compute_precision_recall(scores, labels, num_gt)
    self.assertEquals(precision, expected_precision)
    self.assertEquals(recall, expected_recall)
    ap = metrics.compute_average_precision(precision, recall)
    self.assertTrue(np.isnan(ap)) 
Example #28
Source File: metrics_test.py    From ros_tensorflow with Apache License 2.0 5 votes vote down vote up
def test_compute_average_precision(self):
    precision = np.array([0.8, 0.76, 0.9, 0.65, 0.7, 0.5, 0.55, 0], dtype=float)
    recall = np.array([0.3, 0.3, 0.4, 0.4, 0.45, 0.45, 0.5, 0.5], dtype=float)
    processed_precision = np.array(
        [0.9, 0.9, 0.9, 0.7, 0.7, 0.55, 0.55, 0], dtype=float)
    recall_interval = np.array([0.3, 0, 0.1, 0, 0.05, 0, 0.05, 0], dtype=float)
    expected_mean_ap = np.sum(recall_interval * processed_precision)
    mean_ap = metrics.compute_average_precision(precision, recall)
    self.assertAlmostEqual(expected_mean_ap, mean_ap) 
Example #29
Source File: metrics_test.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def test_compute_precision_recall_and_ap_no_groundtruth(self):
    num_gt = 0
    scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
    labels = np.array([0, 0, 0, 0, 0, 0], dtype=bool)
    expected_precision = None
    expected_recall = None
    precision, recall = metrics.compute_precision_recall(scores, labels, num_gt)
    self.assertEquals(precision, expected_precision)
    self.assertEquals(recall, expected_recall)
    ap = metrics.compute_average_precision(precision, recall)
    self.assertTrue(np.isnan(ap)) 
Example #30
Source File: metrics_test.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def test_compute_average_precision(self):
    precision = np.array([0.8, 0.76, 0.9, 0.65, 0.7, 0.5, 0.55, 0], dtype=float)
    recall = np.array([0.3, 0.3, 0.4, 0.4, 0.45, 0.45, 0.5, 0.5], dtype=float)
    processed_precision = np.array(
        [0.9, 0.9, 0.9, 0.7, 0.7, 0.55, 0.55, 0], dtype=float)
    recall_interval = np.array([0.3, 0, 0.1, 0, 0.05, 0, 0.05, 0], dtype=float)
    expected_mean_ap = np.sum(recall_interval * processed_precision)
    mean_ap = metrics.compute_average_precision(precision, recall)
    self.assertAlmostEqual(expected_mean_ap, mean_ap)