Python object_detection.core.standard_fields.DetectionResultFields() Examples
The following are 30
code examples of object_detection.core.standard_fields.DetectionResultFields().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
object_detection.core.standard_fields
, or try the search function
.
Example #1
Source File: object_detection_evaluation_test.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 6 votes |
def add_common_detected(self): image_key = 'img2' detected_boxes = np.array( [[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]], dtype=float) detected_class_labels = np.array([1, 1, 3], dtype=int) detected_scores = np.array([0.7, 0.8, 0.9], dtype=float) self.wp_eval.add_single_detected_image_info( image_key, { standard_fields.DetectionResultFields.detection_boxes: detected_boxes, standard_fields.DetectionResultFields.detection_scores: detected_scores, standard_fields.DetectionResultFields.detection_classes: detected_class_labels })
Example #2
Source File: object_detection_evaluation_test.py From models with Apache License 2.0 | 6 votes |
def add_common_detected(self): image_key = 'img2' detected_boxes = np.array( [[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]], dtype=float) detected_class_labels = np.array([1, 1, 3], dtype=int) detected_scores = np.array([0.7, 0.8, 0.9], dtype=float) self.wp_eval.add_single_detected_image_info( image_key, { standard_fields.DetectionResultFields.detection_boxes: detected_boxes, standard_fields.DetectionResultFields.detection_scores: detected_scores, standard_fields.DetectionResultFields.detection_classes: detected_class_labels })
Example #3
Source File: coco_evaluation_test.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 6 votes |
def testExceptionRaisedWithMissingGroundtruth(self): """Tests that exception is raised for detection with missing groundtruth.""" categories = [{'id': 1, 'name': 'cat'}, {'id': 2, 'name': 'dog'}, {'id': 3, 'name': 'elephant'}] coco_evaluator = coco_evaluation.CocoDetectionEvaluator(categories) with self.assertRaises(ValueError): coco_evaluator.add_single_detected_image_info( image_id='image1', detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) })
Example #4
Source File: coco_evaluation_test.py From models with Apache License 2.0 | 5 votes |
def testExceptionRaisedWithMissingGroundtruth(self): """Tests that exception is raised for detection with missing groundtruth.""" coco_evaluator = coco_evaluation.CocoDetectionEvaluator( _get_categories_list()) with self.assertRaises(ValueError): coco_evaluator.add_single_detected_image_info( image_id='image1', detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) })
Example #5
Source File: coco_evaluation_test.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def testExceptionRaisedWithMissingGroundtruth(self): """Tests that exception is raised for detection with missing groundtruth.""" coco_evaluator = coco_evaluation.CocoDetectionEvaluator( _get_categories_list()) with self.assertRaises(ValueError): coco_evaluator.add_single_detected_image_info( image_id='image1', detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) })
Example #6
Source File: coco_evaluation_test.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def testRejectionOnDuplicateDetections(self): """Tests that detections cannot be added more than once for an image.""" coco_evaluator = coco_evaluation.CocoDetectionEvaluator( _get_categories_list()) # Add groundtruth coco_evaluator.add_single_ground_truth_image_info( image_id='image1', groundtruth_dict={ standard_fields.InputDataFields.groundtruth_boxes: np.array([[99., 100., 200., 200.]]), standard_fields.InputDataFields.groundtruth_classes: np.array([1]) }) coco_evaluator.add_single_detected_image_info( image_id='image1', detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) }) detections_lists_len = len(coco_evaluator._detection_boxes_list) coco_evaluator.add_single_detected_image_info( image_id='image1', # Note that this image id was previously added. detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) }) self.assertEqual(detections_lists_len, len(coco_evaluator._detection_boxes_list))
Example #7
Source File: coco_evaluation_test.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def testGetOneMAPWithMatchingGroundtruthAndDetectionsEmptyCrowd(self): """Tests computing mAP with empty is_crowd array passed in.""" coco_evaluator = coco_evaluation.CocoDetectionEvaluator( _get_categories_list()) coco_evaluator.add_single_ground_truth_image_info( image_id='image1', groundtruth_dict={ standard_fields.InputDataFields.groundtruth_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.InputDataFields.groundtruth_classes: np.array([1]), standard_fields.InputDataFields.groundtruth_is_crowd: np.array([]) }) coco_evaluator.add_single_detected_image_info( image_id='image1', detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) }) metrics = coco_evaluator.evaluate() self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
Example #8
Source File: coco_evaluation_test.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def testGetOneMAPWithMatchingGroundtruthAndDetectionsSkipCrowd(self): """Tests computing mAP with is_crowd GT boxes skipped.""" coco_evaluator = coco_evaluation.CocoDetectionEvaluator( _get_categories_list()) coco_evaluator.add_single_ground_truth_image_info( image_id='image1', groundtruth_dict={ standard_fields.InputDataFields.groundtruth_boxes: np.array([[100., 100., 200., 200.], [99., 99., 200., 200.]]), standard_fields.InputDataFields.groundtruth_classes: np.array([1, 2]), standard_fields.InputDataFields.groundtruth_is_crowd: np.array([0, 1]) }) coco_evaluator.add_single_detected_image_info( image_id='image1', detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) }) metrics = coco_evaluator.evaluate() self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
Example #9
Source File: object_detection_evaluation.py From models with Apache License 2.0 | 5 votes |
def add_single_detected_image_info(self, image_id, detections_dict): """Adds detections for a single image to be used for evaluation. Args: image_id: A unique string/integer identifier for the image. detections_dict: A dictionary containing - standard_fields.DetectionResultFields.detection_boxes: float32 numpy array of shape [num_boxes, 4] containing `num_boxes` detection boxes of the format [ymin, xmin, ymax, xmax] in absolute image coordinates. standard_fields.DetectionResultFields.detection_scores: float32 numpy array of shape [num_boxes] containing detection scores for the boxes. standard_fields.DetectionResultFields.detection_classes: integer numpy array of shape [num_boxes] containing 1-indexed detection classes for the boxes. standard_fields.DetectionResultFields.detection_masks: uint8 numpy array of shape [num_boxes, height, width] containing `num_boxes` masks of values ranging between 0 and 1. Raises: ValueError: If detection masks are not in detections dictionary. """ detection_classes = ( detections_dict[standard_fields.DetectionResultFields.detection_classes] - self._label_id_offset) detection_masks = None if self._evaluate_masks: if (standard_fields.DetectionResultFields.detection_masks not in detections_dict): raise ValueError('Detection masks not in detections dictionary.') detection_masks = detections_dict[ standard_fields.DetectionResultFields.detection_masks] self._evaluation.add_single_detected_image_info( image_key=image_id, detected_boxes=detections_dict[ standard_fields.DetectionResultFields.detection_boxes], detected_scores=detections_dict[ standard_fields.DetectionResultFields.detection_scores], detected_class_labels=detection_classes, detected_masks=detection_masks)
Example #10
Source File: object_detection_evaluation_test.py From models with Apache License 2.0 | 5 votes |
def add_common_detected(self): image_key = 'img2' detected_boxes = np.array( [[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]], dtype=float) detected_class_labels = np.array([1, 1, 3], dtype=int) detected_scores = np.array([0.7, 0.8, 0.9], dtype=float) self.wp_eval.add_single_detected_image_info( image_key, {standard_fields.DetectionResultFields.detection_boxes: detected_boxes, standard_fields.DetectionResultFields.detection_scores: detected_scores, standard_fields.DetectionResultFields.detection_classes: detected_class_labels})
Example #11
Source File: coco_evaluation.py From models with Apache License 2.0 | 5 votes |
def add_single_detected_image_info(self, image_id, detections_dict): """Adds detections for a single image to be used for evaluation. If a detection has already been added for this image id, a warning is logged, and the detection is skipped. Args: image_id: A unique string/integer identifier for the image. detections_dict: A dictionary containing - DetectionResultFields.detection_classes: integer numpy array of shape [num_masks] containing 1-indexed detection classes for the masks. DetectionResultFields.detection_masks: optional uint8 numpy array of shape [num_masks, image_height, image_width] containing instance masks. The elements of the array must be in {0, 1}. Raises: ValueError: If results and groundtruth shape don't match. """ if image_id not in self._groundtruth_masks: raise ValueError('Missing groundtruth for image id: {}'.format(image_id)) detection_masks = detections_dict[ standard_fields.DetectionResultFields.detection_masks] self._predicted_masks[image_id] = detection_masks self._predicted_class_labels[image_id] = detections_dict[ standard_fields.DetectionResultFields.detection_classes] groundtruth_mask_shape = self._groundtruth_masks[image_id].shape if groundtruth_mask_shape[1:] != detection_masks.shape[1:]: raise ValueError("The shape of results doesn't match groundtruth.")
Example #12
Source File: calibration_evaluation_tf1_test.py From models with Apache License 2.0 | 5 votes |
def testGetECEWithBatchedDetections(self): """Tests that ECE is correct with multiple detections per image.""" calibration_evaluator = calibration_evaluation.CalibrationDetectionEvaluator( _get_categories_list(), iou_threshold=0.5) input_data_fields = standard_fields.InputDataFields detection_fields = standard_fields.DetectionResultFields # Note that image_2 has mismatched classes and detection scores but should # still produce ECE of 0 because detection scores are also 0. eval_dict = { input_data_fields.key: tf.constant(['image_1', 'image_2', 'image_3']), input_data_fields.groundtruth_boxes: tf.constant([[[100., 100., 200., 200.], [50., 50., 100., 100.]], [[50., 50., 100., 100.], [100., 100., 200., 200.]], [[25., 25., 50., 50.], [100., 100., 200., 200.]]], dtype=tf.float32), detection_fields.detection_boxes: tf.constant([[[100., 100., 200., 200.], [50., 50., 100., 100.]], [[50., 50., 100., 100.], [25., 25., 50., 50.]], [[25., 25., 50., 50.], [100., 100., 200., 200.]]], dtype=tf.float32), input_data_fields.groundtruth_classes: tf.constant([[1, 2], [2, 3], [3, 1]], dtype=tf.int64), detection_fields.detection_classes: tf.constant([[1, 2], [1, 1], [3, 1]], dtype=tf.int64), detection_fields.detection_scores: tf.constant([[1.0, 1.0], [0.0, 0.0], [1.0, 1.0]], dtype=tf.float32) } ece_op, update_op = calibration_evaluator.get_estimator_eval_metric_ops( eval_dict)['CalibrationError/ExpectedCalibrationError'] ece = self._get_ece(ece_op, update_op) self.assertAlmostEqual(ece, 0.0)
Example #13
Source File: object_detection_evaluation_test.py From MAX-Object-Detector with Apache License 2.0 | 5 votes |
def add_common_detected(self): image_key = 'img2' detected_boxes = np.array( [[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]], dtype=float) detected_class_labels = np.array([1, 1, 3], dtype=int) detected_scores = np.array([0.7, 0.8, 0.9], dtype=float) self.wp_eval.add_single_detected_image_info( image_key, {standard_fields.DetectionResultFields.detection_boxes: detected_boxes, standard_fields.DetectionResultFields.detection_scores: detected_scores, standard_fields.DetectionResultFields.detection_classes: detected_class_labels})
Example #14
Source File: coco_evaluation_test.py From models with Apache License 2.0 | 5 votes |
def testGetOneMAPWithMatchingGroundtruthAndDetectionsEmptyCrowd(self): """Tests computing mAP with empty is_crowd array passed in.""" coco_evaluator = coco_evaluation.CocoDetectionEvaluator( _get_categories_list()) coco_evaluator.add_single_ground_truth_image_info( image_id='image1', groundtruth_dict={ standard_fields.InputDataFields.groundtruth_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.InputDataFields.groundtruth_classes: np.array([1]), standard_fields.InputDataFields.groundtruth_is_crowd: np.array([]) }) coco_evaluator.add_single_detected_image_info( image_id='image1', detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) }) metrics = coco_evaluator.evaluate() self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
Example #15
Source File: coco_evaluation_test.py From models with Apache License 2.0 | 5 votes |
def testRejectionOnDuplicateDetections(self): """Tests that detections cannot be added more than once for an image.""" coco_evaluator = coco_evaluation.CocoDetectionEvaluator( _get_categories_list()) # Add groundtruth coco_evaluator.add_single_ground_truth_image_info( image_id='image1', groundtruth_dict={ standard_fields.InputDataFields.groundtruth_boxes: np.array([[99., 100., 200., 200.]]), standard_fields.InputDataFields.groundtruth_classes: np.array([1]) }) coco_evaluator.add_single_detected_image_info( image_id='image1', detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) }) detections_lists_len = len(coco_evaluator._detection_boxes_list) coco_evaluator.add_single_detected_image_info( image_id='image1', # Note that this image id was previously added. detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) }) self.assertEqual(detections_lists_len, len(coco_evaluator._detection_boxes_list))
Example #16
Source File: calibration_evaluation_tf1_test.py From models with Apache License 2.0 | 5 votes |
def testGetECEWhenImagesFilteredByIsAnnotated(self): """Tests that ECE is correct when detections filtered by is_annotated.""" calibration_evaluator = calibration_evaluation.CalibrationDetectionEvaluator( _get_categories_list(), iou_threshold=0.5) input_data_fields = standard_fields.InputDataFields detection_fields = standard_fields.DetectionResultFields # ECE will be 0 only if the third image is filtered by is_annotated. eval_dict = { input_data_fields.key: tf.constant(['image_1', 'image_2', 'image_3']), input_data_fields.groundtruth_boxes: tf.constant([[[100., 100., 200., 200.]], [[50., 50., 100., 100.]], [[25., 25., 50., 50.]]], dtype=tf.float32), detection_fields.detection_boxes: tf.constant([[[100., 100., 200., 200.]], [[50., 50., 100., 100.]], [[25., 25., 50., 50.]]], dtype=tf.float32), input_data_fields.groundtruth_classes: tf.constant([[1], [2], [1]], dtype=tf.int64), detection_fields.detection_classes: tf.constant([[1], [1], [3]], dtype=tf.int64), detection_fields.detection_scores: tf.constant([[1.0], [0.0], [1.0]], dtype=tf.float32), 'is_annotated': tf.constant([True, True, False], dtype=tf.bool) } ece_op, update_op = calibration_evaluator.get_estimator_eval_metric_ops( eval_dict)['CalibrationError/ExpectedCalibrationError'] ece = self._get_ece(ece_op, update_op) self.assertAlmostEqual(ece, 0.0)
Example #17
Source File: coco_evaluation_test.py From models with Apache License 2.0 | 5 votes |
def testFiltersDetectionsFromOtherCategories(self): """Tests that the evaluator ignores detections from other categories.""" category_keypoint_dict = _get_category_keypoints_dict() coco_evaluator = coco_evaluation.CocoKeypointEvaluator( category_id=2, category_keypoints=category_keypoint_dict['person'], class_text='dog') coco_evaluator.add_single_ground_truth_image_info( image_id='image1', groundtruth_dict={ standard_fields.InputDataFields.groundtruth_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.InputDataFields.groundtruth_classes: np.array([1]), standard_fields.InputDataFields.groundtruth_keypoints: np.array([[[150., 160.], [170., 180.], [110., 120.], [130., 140.]]]), standard_fields.InputDataFields.groundtruth_keypoint_visibilities: np.array([[2, 2, 2, 2]]) }) coco_evaluator.add_single_detected_image_info( image_id='image1', detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.9]), standard_fields.DetectionResultFields.detection_classes: np.array([1]), standard_fields.DetectionResultFields.detection_keypoints: np.array([[[150., 160.], [170., 180.], [110., 120.], [130., 140.]]]) }) metrics = coco_evaluator.evaluate() self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/dog'], -1.0)
Example #18
Source File: coco_evaluation_test.py From models with Apache License 2.0 | 5 votes |
def testIgnoresCrowdAnnotations(self): """Tests that the evaluator ignores GT marked as crowd.""" category_keypoint_dict = _get_category_keypoints_dict() coco_evaluator = coco_evaluation.CocoKeypointEvaluator( category_id=1, category_keypoints=category_keypoint_dict['person'], class_text='person') coco_evaluator.add_single_ground_truth_image_info( image_id='image1', groundtruth_dict={ standard_fields.InputDataFields.groundtruth_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.InputDataFields.groundtruth_classes: np.array([1]), standard_fields.InputDataFields.groundtruth_is_crowd: np.array([1]), standard_fields.InputDataFields.groundtruth_keypoints: np.array([[[150., 160.], [float('nan'), float('nan')], [float('nan'), float('nan')], [170., 180.]]]), standard_fields.InputDataFields.groundtruth_keypoint_visibilities: np.array([[2, 0, 0, 2]]) }) coco_evaluator.add_single_detected_image_info( image_id='image1', detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]), standard_fields.DetectionResultFields.detection_keypoints: np.array([[[150., 160.], [1., 2.], [3., 4.], [170., 180.]]]) }) metrics = coco_evaluator.evaluate() self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'], -1.0)
Example #19
Source File: coco_evaluation_test.py From models with Apache License 2.0 | 5 votes |
def testGetOneMAPWithMatchingGroundtruthAndDetectionsSkipCrowd(self): """Tests computing mAP with is_crowd GT boxes skipped.""" coco_evaluator = coco_evaluation.CocoDetectionEvaluator( _get_categories_list()) coco_evaluator.add_single_ground_truth_image_info( image_id='image1', groundtruth_dict={ standard_fields.InputDataFields.groundtruth_boxes: np.array([[100., 100., 200., 200.], [99., 99., 200., 200.]]), standard_fields.InputDataFields.groundtruth_classes: np.array([1, 2]), standard_fields.InputDataFields.groundtruth_is_crowd: np.array([0, 1]) }) coco_evaluator.add_single_detected_image_info( image_id='image1', detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) }) metrics = coco_evaluator.evaluate() self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
Example #20
Source File: calibration_evaluation_tf1_test.py From models with Apache License 2.0 | 5 votes |
def testGetECEWithUnmatchedGroundtruthAndDetections(self): """Tests that ECE is correctly calculated when boxes are unmatched.""" calibration_evaluator = calibration_evaluation.CalibrationDetectionEvaluator( _get_categories_list(), iou_threshold=0.5) input_data_fields = standard_fields.InputDataFields detection_fields = standard_fields.DetectionResultFields # No gt and detection boxes match. eval_dict = { input_data_fields.key: tf.constant(['image_1', 'image_2', 'image_3']), input_data_fields.groundtruth_boxes: tf.constant([[[100., 100., 200., 200.]], [[50., 50., 100., 100.]], [[25., 25., 50., 50.]]], dtype=tf.float32), detection_fields.detection_boxes: tf.constant([[[50., 50., 100., 100.]], [[25., 25., 50., 50.]], [[100., 100., 200., 200.]]], dtype=tf.float32), input_data_fields.groundtruth_classes: tf.constant([[1], [2], [3]], dtype=tf.int64), detection_fields.detection_classes: tf.constant([[1], [1], [3]], dtype=tf.int64), # Detection scores of zero when boxes are unmatched = ECE of zero. detection_fields.detection_scores: tf.constant([[0.0], [0.0], [0.0]], dtype=tf.float32) } ece_op, update_op = calibration_evaluator.get_estimator_eval_metric_ops( eval_dict)['CalibrationError/ExpectedCalibrationError'] ece = self._get_ece(ece_op, update_op) self.assertAlmostEqual(ece, 0.0)
Example #21
Source File: exporter_lib_tf2_test.py From models with Apache License 2.0 | 5 votes |
def test_export_saved_model_and_run_inference( self, input_type='image_tensor'): tmp_dir = self.get_temp_dir() self._save_checkpoint_from_mock_model(tmp_dir) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() output_directory = os.path.join(tmp_dir, 'output') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() exporter_lib_v2.export_inference_graph( input_type=input_type, pipeline_config=pipeline_config, trained_checkpoint_dir=tmp_dir, output_directory=output_directory) saved_model_path = os.path.join(output_directory, 'saved_model') detect_fn = tf.saved_model.load(saved_model_path) image = self.get_dummy_input(input_type) detections = detect_fn(image) detection_fields = fields.DetectionResultFields self.assertAllClose(detections[detection_fields.detection_boxes], [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]]) self.assertAllClose(detections[detection_fields.detection_scores], [[0.7, 0.6], [0.9, 0.0]]) self.assertAllClose(detections[detection_fields.detection_classes], [[1, 2], [2, 1]]) self.assertAllClose(detections[detection_fields.num_detections], [2, 1])
Example #22
Source File: object_detection_evaluation_test.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def add_common_detected(self): image_key = 'img2' detected_boxes = np.array( [[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]], dtype=float) detected_class_labels = np.array([1, 1, 3], dtype=int) detected_scores = np.array([0.7, 0.8, 0.9], dtype=float) self.wp_eval.add_single_detected_image_info( image_key, {standard_fields.DetectionResultFields.detection_boxes: detected_boxes, standard_fields.DetectionResultFields.detection_scores: detected_scores, standard_fields.DetectionResultFields.detection_classes: detected_class_labels})
Example #23
Source File: coco_evaluation_test.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def testExceptionRaisedWithMissingGroundtruth(self): """Tests that exception is raised for detection with missing groundtruth.""" coco_evaluator = coco_evaluation.CocoDetectionEvaluator( _get_categories_list()) with self.assertRaises(ValueError): coco_evaluator.add_single_detected_image_info( image_id='image1', detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) })
Example #24
Source File: coco_evaluation_test.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def testRejectionOnDuplicateDetections(self): """Tests that detections cannot be added more than once for an image.""" coco_evaluator = coco_evaluation.CocoDetectionEvaluator( _get_categories_list()) # Add groundtruth coco_evaluator.add_single_ground_truth_image_info( image_id='image1', groundtruth_dict={ standard_fields.InputDataFields.groundtruth_boxes: np.array([[99., 100., 200., 200.]]), standard_fields.InputDataFields.groundtruth_classes: np.array([1]) }) coco_evaluator.add_single_detected_image_info( image_id='image1', detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) }) detections_lists_len = len(coco_evaluator._detection_boxes_list) coco_evaluator.add_single_detected_image_info( image_id='image1', # Note that this image id was previously added. detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) }) self.assertEqual(detections_lists_len, len(coco_evaluator._detection_boxes_list))
Example #25
Source File: coco_evaluation_test.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def testGetOneMAPWithMatchingGroundtruthAndDetectionsEmptyCrowd(self): """Tests computing mAP with empty is_crowd array passed in.""" coco_evaluator = coco_evaluation.CocoDetectionEvaluator( _get_categories_list()) coco_evaluator.add_single_ground_truth_image_info( image_id='image1', groundtruth_dict={ standard_fields.InputDataFields.groundtruth_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.InputDataFields.groundtruth_classes: np.array([1]), standard_fields.InputDataFields.groundtruth_is_crowd: np.array([]) }) coco_evaluator.add_single_detected_image_info( image_id='image1', detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) }) metrics = coco_evaluator.evaluate() self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
Example #26
Source File: coco_evaluation_test.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def testGetOneMAPWithMatchingGroundtruthAndDetectionsSkipCrowd(self): """Tests computing mAP with is_crowd GT boxes skipped.""" coco_evaluator = coco_evaluation.CocoDetectionEvaluator( _get_categories_list()) coco_evaluator.add_single_ground_truth_image_info( image_id='image1', groundtruth_dict={ standard_fields.InputDataFields.groundtruth_boxes: np.array([[100., 100., 200., 200.], [99., 99., 200., 200.]]), standard_fields.InputDataFields.groundtruth_classes: np.array([1, 2]), standard_fields.InputDataFields.groundtruth_is_crowd: np.array([0, 1]) }) coco_evaluator.add_single_detected_image_info( image_id='image1', detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) }) metrics = coco_evaluator.evaluate() self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
Example #27
Source File: coco_evaluation_test.py From MAX-Object-Detector with Apache License 2.0 | 5 votes |
def testExceptionRaisedWithMissingGroundtruth(self): """Tests that exception is raised for detection with missing groundtruth.""" coco_evaluator = coco_evaluation.CocoDetectionEvaluator( _get_categories_list()) with self.assertRaises(ValueError): coco_evaluator.add_single_detected_image_info( image_id='image1', detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) })
Example #28
Source File: coco_evaluation_test.py From MAX-Object-Detector with Apache License 2.0 | 5 votes |
def testRejectionOnDuplicateDetections(self): """Tests that detections cannot be added more than once for an image.""" coco_evaluator = coco_evaluation.CocoDetectionEvaluator( _get_categories_list()) # Add groundtruth coco_evaluator.add_single_ground_truth_image_info( image_id='image1', groundtruth_dict={ standard_fields.InputDataFields.groundtruth_boxes: np.array([[99., 100., 200., 200.]]), standard_fields.InputDataFields.groundtruth_classes: np.array([1]) }) coco_evaluator.add_single_detected_image_info( image_id='image1', detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) }) detections_lists_len = len(coco_evaluator._detection_boxes_list) coco_evaluator.add_single_detected_image_info( image_id='image1', # Note that this image id was previously added. detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) }) self.assertEqual(detections_lists_len, len(coco_evaluator._detection_boxes_list))
Example #29
Source File: coco_evaluation_test.py From MAX-Object-Detector with Apache License 2.0 | 5 votes |
def testGetOneMAPWithMatchingGroundtruthAndDetectionsEmptyCrowd(self): """Tests computing mAP with empty is_crowd array passed in.""" coco_evaluator = coco_evaluation.CocoDetectionEvaluator( _get_categories_list()) coco_evaluator.add_single_ground_truth_image_info( image_id='image1', groundtruth_dict={ standard_fields.InputDataFields.groundtruth_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.InputDataFields.groundtruth_classes: np.array([1]), standard_fields.InputDataFields.groundtruth_is_crowd: np.array([]) }) coco_evaluator.add_single_detected_image_info( image_id='image1', detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) }) metrics = coco_evaluator.evaluate() self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
Example #30
Source File: coco_evaluation_test.py From MAX-Object-Detector with Apache License 2.0 | 5 votes |
def testGetOneMAPWithMatchingGroundtruthAndDetectionsSkipCrowd(self): """Tests computing mAP with is_crowd GT boxes skipped.""" coco_evaluator = coco_evaluation.CocoDetectionEvaluator( _get_categories_list()) coco_evaluator.add_single_ground_truth_image_info( image_id='image1', groundtruth_dict={ standard_fields.InputDataFields.groundtruth_boxes: np.array([[100., 100., 200., 200.], [99., 99., 200., 200.]]), standard_fields.InputDataFields.groundtruth_classes: np.array([1, 2]), standard_fields.InputDataFields.groundtruth_is_crowd: np.array([0, 1]) }) coco_evaluator.add_single_detected_image_info( image_id='image1', detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) }) metrics = coco_evaluator.evaluate() self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)