Python object_detection.core.standard_fields.InputDataFields() Examples
The following are 30
code examples of object_detection.core.standard_fields.InputDataFields().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
object_detection.core.standard_fields
, or try the search function
.
Example #1
Source File: inputs.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def _get_features_dict(input_dict): """Extracts features dict from input dict.""" source_id = _replace_empty_string_with_random_number( input_dict[fields.InputDataFields.source_id]) hash_from_source_id = tf.string_to_hash_bucket_fast(source_id, HASH_BINS) features = { fields.InputDataFields.image: input_dict[fields.InputDataFields.image], HASH_KEY: tf.cast(hash_from_source_id, tf.int32), fields.InputDataFields.true_image_shape: input_dict[fields.InputDataFields.true_image_shape], fields.InputDataFields.original_image_spatial_shape: input_dict[fields.InputDataFields.original_image_spatial_shape] } if fields.InputDataFields.original_image in input_dict: features[fields.InputDataFields.original_image] = input_dict[ fields.InputDataFields.original_image] return features
Example #2
Source File: ops.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 6 votes |
def filter_groundtruth_with_nan_box_coordinates(tensor_dict): """Filters out groundtruth with no bounding boxes. Args: tensor_dict: a dictionary of following groundtruth tensors - fields.InputDataFields.groundtruth_boxes fields.InputDataFields.groundtruth_classes fields.InputDataFields.groundtruth_confidences fields.InputDataFields.groundtruth_keypoints fields.InputDataFields.groundtruth_instance_masks fields.InputDataFields.groundtruth_is_crowd fields.InputDataFields.groundtruth_area fields.InputDataFields.groundtruth_label_types Returns: a dictionary of tensors containing only the groundtruth that have bounding boxes. """ groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] nan_indicator_vector = tf.greater(tf.reduce_sum(tf.cast( tf.is_nan(groundtruth_boxes), dtype=tf.int32), reduction_indices=[1]), 0) valid_indicator_vector = tf.logical_not(nan_indicator_vector) valid_indices = tf.where(valid_indicator_vector) return retain_groundtruth(tensor_dict, valid_indices)
Example #3
Source File: object_detection_evaluation_test.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 6 votes |
def test_value_error_on_duplicate_images(self): categories = [{'id': 1, 'name': 'cat'}, {'id': 2, 'name': 'dog'}, {'id': 3, 'name': 'elephant'}] # Add groundtruth pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator( categories) image_key1 = 'img1' groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], dtype=float) groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int) pascal_evaluator.add_single_ground_truth_image_info( image_key1, {standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes1, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_labels1}) with self.assertRaises(ValueError): pascal_evaluator.add_single_ground_truth_image_info( image_key1, {standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes1, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_labels1})
Example #4
Source File: object_detection_evaluation_test.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 6 votes |
def test_value_error_on_duplicate_images(self): # Add groundtruth self.wp_eval = ( object_detection_evaluation.PrecisionAtRecallDetectionEvaluator( self.categories, recall_lower_bound=0.0, recall_upper_bound=0.5)) image_key1 = 'img1' groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], dtype=float) groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int) self.wp_eval.add_single_ground_truth_image_info( image_key1, { standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes1, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_labels1 }) with self.assertRaises(ValueError): self.wp_eval.add_single_ground_truth_image_info( image_key1, { standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes1, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_labels1 })
Example #5
Source File: object_detection_evaluation_test.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 6 votes |
def create_and_add_common_ground_truth(self): # Add groundtruth self.wp_eval = ( object_detection_evaluation.WeightedPascalDetectionEvaluator( self.categories)) image_key1 = 'img1' groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], dtype=float) groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int) self.wp_eval.add_single_ground_truth_image_info( image_key1, {standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes1, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_labels1}) # add 'img2' separately image_key3 = 'img3' groundtruth_boxes3 = np.array([[0, 0, 1, 1]], dtype=float) groundtruth_class_labels3 = np.array([2], dtype=int) self.wp_eval.add_single_ground_truth_image_info( image_key3, {standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes3, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_labels3})
Example #6
Source File: object_detection_evaluation_test.py From MAX-Object-Detector with Apache License 2.0 | 6 votes |
def create_and_add_common_ground_truth(self): # Add groundtruth self.wp_eval = ( object_detection_evaluation.WeightedPascalDetectionEvaluator( self.categories)) image_key1 = 'img1' groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], dtype=float) groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int) self.wp_eval.add_single_ground_truth_image_info( image_key1, {standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes1, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_labels1}) # add 'img2' separately image_key3 = 'img3' groundtruth_boxes3 = np.array([[0, 0, 1, 1]], dtype=float) groundtruth_class_labels3 = np.array([2], dtype=int) self.wp_eval.add_single_ground_truth_image_info( image_key3, {standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes3, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_labels3})
Example #7
Source File: object_detection_evaluation_test.py From MAX-Object-Detector with Apache License 2.0 | 6 votes |
def test_value_error_on_duplicate_images(self): categories = [{'id': 1, 'name': 'cat'}, {'id': 2, 'name': 'dog'}, {'id': 3, 'name': 'elephant'}] # Add groundtruth pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator( categories) image_key1 = 'img1' groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], dtype=float) groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int) pascal_evaluator.add_single_ground_truth_image_info( image_key1, {standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes1, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_labels1}) with self.assertRaises(ValueError): pascal_evaluator.add_single_ground_truth_image_info( image_key1, {standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes1, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_labels1})
Example #8
Source File: inputs.py From MAX-Object-Detector with Apache License 2.0 | 6 votes |
def _get_features_dict(input_dict): """Extracts features dict from input dict.""" source_id = _replace_empty_string_with_random_number( input_dict[fields.InputDataFields.source_id]) hash_from_source_id = tf.string_to_hash_bucket_fast(source_id, HASH_BINS) features = { fields.InputDataFields.image: input_dict[fields.InputDataFields.image], HASH_KEY: tf.cast(hash_from_source_id, tf.int32), fields.InputDataFields.true_image_shape: input_dict[fields.InputDataFields.true_image_shape], fields.InputDataFields.original_image_spatial_shape: input_dict[fields.InputDataFields.original_image_spatial_shape] } if fields.InputDataFields.original_image in input_dict: features[fields.InputDataFields.original_image] = input_dict[ fields.InputDataFields.original_image] return features
Example #9
Source File: object_detection_evaluation_test.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 6 votes |
def test_value_error_on_duplicate_images(self): # Add groundtruth self.wp_eval = ( object_detection_evaluation.WeightedPascalDetectionEvaluator( self.categories)) image_key1 = 'img1' groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], dtype=float) groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int) self.wp_eval.add_single_ground_truth_image_info( image_key1, {standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes1, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_labels1}) with self.assertRaises(ValueError): self.wp_eval.add_single_ground_truth_image_info( image_key1, {standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes1, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_labels1})
Example #10
Source File: inputs.py From Elphas with Apache License 2.0 | 5 votes |
def augment_input_data(tensor_dict, data_augmentation_options): """Applies data augmentation ops to input tensors. Args: tensor_dict: A dictionary of input tensors keyed by fields.InputDataFields. data_augmentation_options: A list of tuples, where each tuple contains a function and a dictionary that contains arguments and their values. Usually, this is the output of core/preprocessor.build. Returns: A dictionary of tensors obtained by applying data augmentation ops to the input tensor dictionary. """ tensor_dict[fields.InputDataFields.image] = tf.expand_dims( tf.to_float(tensor_dict[fields.InputDataFields.image]), 0) include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks in tensor_dict) include_keypoints = (fields.InputDataFields.groundtruth_keypoints in tensor_dict) tensor_dict = preprocessor.preprocess( tensor_dict, data_augmentation_options, func_arg_map=preprocessor.get_default_func_arg_map( include_instance_masks=include_instance_masks, include_keypoints=include_keypoints)) tensor_dict[fields.InputDataFields.image] = tf.squeeze( tensor_dict[fields.InputDataFields.image], axis=0) return tensor_dict
Example #11
Source File: object_detection_evaluation_test.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 5 votes |
def test_returns_correct_metric_values_with_difficult_list(self): self.create_and_add_common_ground_truth() image_key2 = 'img2' groundtruth_boxes2 = np.array([[10, 10, 11, 11], [500, 500, 510, 510], [10, 10, 12, 12]], dtype=float) groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int) groundtruth_is_difficult_list2 = np.array([False, True, False], dtype=bool) self.wp_eval.add_single_ground_truth_image_info( image_key2, {standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes2, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_labels2, standard_fields.InputDataFields.groundtruth_difficult: groundtruth_is_difficult_list2 }) self.add_common_detected() metrics = self.wp_eval.evaluate() self.assertAlmostEqual( metrics[self.wp_eval._metric_prefix + 'PerformanceByCategory/AP@0.5IOU/dog'], 0.0) self.assertAlmostEqual( metrics[self.wp_eval._metric_prefix + 'PerformanceByCategory/AP@0.5IOU/elephant'], 0.0) self.assertAlmostEqual( metrics[self.wp_eval._metric_prefix + 'PerformanceByCategory/AP@0.5IOU/cat'], 0.5 / 3) self.assertAlmostEqual(metrics[self.wp_eval._metric_prefix + 'Precision/mAP@0.5IOU'], 1. / (3 + 1 + 2) / 3) self.wp_eval.clear() self.assertFalse(self.wp_eval._image_ids)
Example #12
Source File: ops.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 5 votes |
def filter_unrecognized_classes(tensor_dict): """Filters out class labels that are not unrecognized by the labelmap. Decoder would parse unrecognized classes (not included in the labelmap) to a label of value -1. Such targets are unecessary for training, and causes issue for evaluation, due to labeling mapping logic. This function filters those labels out for both training and evaluation. Args: tensor_dict: dictionary containing input tensors keyed by fields.InputDataFields. Returns: A dictionary keyed by fields.InputDataFields containing the tensors obtained after applying the filtering. Raises: ValueError: If groundtruth_classes tensor is not in tensor_dict. """ if fields.InputDataFields.groundtruth_classes not in tensor_dict: raise ValueError('`groundtruth classes` not in tensor_dict.') # Refer to tf_example_decoder for how unrecognized labels are handled. unrecognized_label = -1 recognized_indices = tf.where( tf.greater(tensor_dict[fields.InputDataFields.groundtruth_classes], unrecognized_label)) return retain_groundtruth(tensor_dict, recognized_indices)
Example #13
Source File: ops.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 5 votes |
def retain_groundtruth_with_positive_classes(tensor_dict): """Retains only groundtruth with positive class ids. Args: tensor_dict: a dictionary of following groundtruth tensors - fields.InputDataFields.groundtruth_boxes fields.InputDataFields.groundtruth_classes fields.InputDataFields.groundtruth_confidences fields.InputDataFields.groundtruth_keypoints fields.InputDataFields.groundtruth_instance_masks fields.InputDataFields.groundtruth_is_crowd fields.InputDataFields.groundtruth_area fields.InputDataFields.groundtruth_label_types fields.InputDataFields.groundtruth_difficult Returns: a dictionary of tensors containing only the groundtruth with positive classes. Raises: ValueError: If groundtruth_classes tensor is not in tensor_dict. """ if fields.InputDataFields.groundtruth_classes not in tensor_dict: raise ValueError('`groundtruth classes` not in tensor_dict.') keep_indices = tf.where(tf.greater( tensor_dict[fields.InputDataFields.groundtruth_classes], 0)) return retain_groundtruth(tensor_dict, keep_indices)
Example #14
Source File: model.py From Elphas with Apache License 2.0 | 5 votes |
def _get_groundtruth_data(detection_model, class_agnostic): """Extracts groundtruth data from detection_model. Args: detection_model: A `DetectionModel` object. class_agnostic: Whether the detections are class_agnostic. Returns: A tuple of: groundtruth: Dictionary with the following fields: 'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in normalized coordinates. 'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes. 'groundtruth_masks': 3D float32 tensor of instance masks (if provided in groundtruth) class_agnostic: Boolean indicating whether detections are class agnostic. """ input_data_fields = fields.InputDataFields() groundtruth_boxes = detection_model.groundtruth_lists( fields.BoxListFields.boxes)[0] # For class-agnostic models, groundtruth one-hot encodings collapse to all # ones. if class_agnostic: groundtruth_boxes_shape = tf.shape(groundtruth_boxes) groundtruth_classes_one_hot = tf.ones([groundtruth_boxes_shape[0], 1]) else: groundtruth_classes_one_hot = detection_model.groundtruth_lists( fields.BoxListFields.classes)[0] label_id_offset = 1 # Applying label id offset (b/63711816) groundtruth_classes = ( tf.argmax(groundtruth_classes_one_hot, axis=1) + label_id_offset) groundtruth = { input_data_fields.groundtruth_boxes: groundtruth_boxes, input_data_fields.groundtruth_classes: groundtruth_classes } if detection_model.groundtruth_has_field(fields.BoxListFields.masks): groundtruth[input_data_fields.groundtruth_instance_masks] = ( detection_model.groundtruth_lists(fields.BoxListFields.masks)[0]) return groundtruth
Example #15
Source File: object_detection_evaluation_test.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 5 votes |
def test_returns_correct_metric_values_with_difficult_list(self): self.create_and_add_common_ground_truth() image_key2 = 'img2' groundtruth_boxes2 = np.array( [[10, 10, 11, 11], [500, 500, 510, 510], [10, 10, 12, 12]], dtype=float) groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int) groundtruth_is_difficult_list2 = np.array([False, True, False], dtype=bool) self.wp_eval.add_single_ground_truth_image_info( image_key2, { standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes2, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_labels2, standard_fields.InputDataFields.groundtruth_difficult: groundtruth_is_difficult_list2 }) self.add_common_detected() metrics = self.wp_eval.evaluate() self.assertAlmostEqual( metrics[self.wp_eval._metric_prefix + 'PerformanceByCategory/AP@0.5IOU/dog'], 0.0) self.assertAlmostEqual( metrics[self.wp_eval._metric_prefix + 'PerformanceByCategory/AP@0.5IOU/elephant'], 0.0) self.assertAlmostEqual( metrics[self.wp_eval._metric_prefix + 'PerformanceByCategory/AP@0.5IOU/cat'], 0.5 / 3) self.assertAlmostEqual( metrics[self.wp_eval._metric_prefix + 'Precision/mAP@0.5IOU@[0.0,0.5]Recall'], 1. / (3 + 1 + 2) / 3) self.wp_eval.clear() self.assertFalse(self.wp_eval._image_ids)
Example #16
Source File: object_detection_evaluation_test.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 5 votes |
def test_returns_correct_metric_values(self): self.create_and_add_common_ground_truth() image_key2 = 'img2' groundtruth_boxes2 = np.array([[10, 10, 11, 11], [500, 500, 510, 510], [10, 10, 12, 12]], dtype=float) groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int) self.wp_eval.add_single_ground_truth_image_info( image_key2, {standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes2, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_labels2 }) self.add_common_detected() metrics = self.wp_eval.evaluate() self.assertAlmostEqual( metrics[self.wp_eval._metric_prefix + 'PerformanceByCategory/AP@0.5IOU/dog'], 0.0) self.assertAlmostEqual( metrics[self.wp_eval._metric_prefix + 'PerformanceByCategory/AP@0.5IOU/elephant'], 0.0) self.assertAlmostEqual( metrics[self.wp_eval._metric_prefix + 'PerformanceByCategory/AP@0.5IOU/cat'], 0.5 / 4) self.assertAlmostEqual(metrics[self.wp_eval._metric_prefix + 'Precision/mAP@0.5IOU'], 1. / (4 + 1 + 2) / 3) self.wp_eval.clear() self.assertFalse(self.wp_eval._image_ids)
Example #17
Source File: coco_evaluation_test.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 5 votes |
def testRejectionOnDuplicateDetections(self): """Tests that detections cannot be added more than once for an image.""" categories = [{'id': 1, 'name': 'cat'}, {'id': 2, 'name': 'dog'}, {'id': 3, 'name': 'elephant'}] # Add groundtruth coco_evaluator = coco_evaluation.CocoDetectionEvaluator(categories) coco_evaluator.add_single_ground_truth_image_info( image_id='image1', groundtruth_dict={ standard_fields.InputDataFields.groundtruth_boxes: np.array([[99., 100., 200., 200.]]), standard_fields.InputDataFields.groundtruth_classes: np.array([1]) }) coco_evaluator.add_single_detected_image_info( image_id='image1', detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) }) detections_lists_len = len(coco_evaluator._detection_boxes_list) coco_evaluator.add_single_detected_image_info( image_id='image1', # Note that this image id was previously added. detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) }) self.assertEqual(detections_lists_len, len(coco_evaluator._detection_boxes_list))
Example #18
Source File: inputs.py From MAX-Object-Detector with Apache License 2.0 | 5 votes |
def augment_input_data(tensor_dict, data_augmentation_options): """Applies data augmentation ops to input tensors. Args: tensor_dict: A dictionary of input tensors keyed by fields.InputDataFields. data_augmentation_options: A list of tuples, where each tuple contains a function and a dictionary that contains arguments and their values. Usually, this is the output of core/preprocessor.build. Returns: A dictionary of tensors obtained by applying data augmentation ops to the input tensor dictionary. """ tensor_dict[fields.InputDataFields.image] = tf.expand_dims( tf.to_float(tensor_dict[fields.InputDataFields.image]), 0) include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks in tensor_dict) include_keypoints = (fields.InputDataFields.groundtruth_keypoints in tensor_dict) include_label_weights = (fields.InputDataFields.groundtruth_weights in tensor_dict) include_label_confidences = (fields.InputDataFields.groundtruth_confidences in tensor_dict) tensor_dict = preprocessor.preprocess( tensor_dict, data_augmentation_options, func_arg_map=preprocessor.get_default_func_arg_map( include_label_weights=include_label_weights, include_label_confidences=include_label_confidences, include_instance_masks=include_instance_masks, include_keypoints=include_keypoints)) tensor_dict[fields.InputDataFields.image] = tf.squeeze( tensor_dict[fields.InputDataFields.image], axis=0) return tensor_dict
Example #19
Source File: coco_evaluation_test.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 5 votes |
def testRejectionOnDuplicateGroundtruth(self): """Tests that groundtruth cannot be added more than once for an image.""" categories = [{'id': 1, 'name': 'cat'}, {'id': 2, 'name': 'dog'}, {'id': 3, 'name': 'elephant'}] # Add groundtruth coco_evaluator = coco_evaluation.CocoDetectionEvaluator(categories) image_key1 = 'img1' groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], dtype=float) groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int) coco_evaluator.add_single_ground_truth_image_info(image_key1, { standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes1, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_labels1 }) groundtruth_lists_len = len(coco_evaluator._groundtruth_list) # Add groundtruth with the same image id. coco_evaluator.add_single_ground_truth_image_info(image_key1, { standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes1, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_labels1 }) self.assertEqual(groundtruth_lists_len, len(coco_evaluator._groundtruth_list))
Example #20
Source File: inputs.py From ros_tensorflow with Apache License 2.0 | 5 votes |
def augment_input_data(tensor_dict, data_augmentation_options): """Applies data augmentation ops to input tensors. Args: tensor_dict: A dictionary of input tensors keyed by fields.InputDataFields. data_augmentation_options: A list of tuples, where each tuple contains a function and a dictionary that contains arguments and their values. Usually, this is the output of core/preprocessor.build. Returns: A dictionary of tensors obtained by applying data augmentation ops to the input tensor dictionary. """ tensor_dict[fields.InputDataFields.image] = tf.expand_dims( tf.to_float(tensor_dict[fields.InputDataFields.image]), 0) include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks in tensor_dict) include_keypoints = (fields.InputDataFields.groundtruth_keypoints in tensor_dict) tensor_dict = preprocessor.preprocess( tensor_dict, data_augmentation_options, func_arg_map=preprocessor.get_default_func_arg_map( include_instance_masks=include_instance_masks, include_keypoints=include_keypoints)) tensor_dict[fields.InputDataFields.image] = tf.squeeze( tensor_dict[fields.InputDataFields.image], axis=0) return tensor_dict
Example #21
Source File: object_detection_evaluation_test.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 5 votes |
def test_returns_correct_metric_values(self): self.create_and_add_common_ground_truth() image_key2 = 'img2' groundtruth_boxes2 = np.array( [[10, 10, 11, 11], [500, 500, 510, 510], [10, 10, 12, 12]], dtype=float) groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int) self.wp_eval.add_single_ground_truth_image_info( image_key2, { standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes2, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_labels2 }) self.add_common_detected() metrics = self.wp_eval.evaluate() self.assertAlmostEqual( metrics[self.wp_eval._metric_prefix + 'PerformanceByCategory/AP@0.5IOU/dog'], 0.0) self.assertAlmostEqual( metrics[self.wp_eval._metric_prefix + 'PerformanceByCategory/AP@0.5IOU/elephant'], 0.0) self.assertAlmostEqual( metrics[self.wp_eval._metric_prefix + 'PerformanceByCategory/AP@0.5IOU/cat'], 0.5 / 4) self.assertAlmostEqual( metrics[self.wp_eval._metric_prefix + 'Precision/mAP@0.5IOU@[0.0,0.5]Recall'], 1. / (3 + 1 + 2) / 4) self.wp_eval.clear() self.assertFalse(self.wp_eval._image_ids)
Example #22
Source File: coco_evaluation_test.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 5 votes |
def testGetOneMAPWithMatchingGroundtruthAndDetectionsEmptyCrowd(self): """Tests computing mAP with empty is_crowd array passed in.""" category_list = [{ 'id': 0, 'name': 'person' }, { 'id': 1, 'name': 'cat' }, { 'id': 2, 'name': 'dog' }] coco_evaluator = coco_evaluation.CocoDetectionEvaluator(category_list) coco_evaluator.add_single_ground_truth_image_info( image_id='image1', groundtruth_dict={ standard_fields.InputDataFields.groundtruth_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.InputDataFields.groundtruth_classes: np.array([1]), standard_fields.InputDataFields.groundtruth_is_crowd: np.array([]) }) coco_evaluator.add_single_detected_image_info( image_id='image1', detections_dict={ standard_fields.DetectionResultFields.detection_boxes: np.array([[100., 100., 200., 200.]]), standard_fields.DetectionResultFields.detection_scores: np.array([.8]), standard_fields.DetectionResultFields.detection_classes: np.array([1]) }) metrics = coco_evaluator.evaluate() self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
Example #23
Source File: inputs.py From MAX-Object-Detector with Apache License 2.0 | 5 votes |
def _get_labels_dict(input_dict): """Extracts labels dict from input dict.""" required_label_keys = [ fields.InputDataFields.num_groundtruth_boxes, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, fields.InputDataFields.groundtruth_weights, ] labels_dict = {} for key in required_label_keys: labels_dict[key] = input_dict[key] optional_label_keys = [ fields.InputDataFields.groundtruth_confidences, fields.InputDataFields.groundtruth_keypoints, fields.InputDataFields.groundtruth_instance_masks, fields.InputDataFields.groundtruth_area, fields.InputDataFields.groundtruth_is_crowd, fields.InputDataFields.groundtruth_difficult ] for key in optional_label_keys: if key in input_dict: labels_dict[key] = input_dict[key] if fields.InputDataFields.groundtruth_difficult in labels_dict: labels_dict[fields.InputDataFields.groundtruth_difficult] = tf.cast( labels_dict[fields.InputDataFields.groundtruth_difficult], tf.int32) return labels_dict
Example #24
Source File: inputs.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 5 votes |
def _get_features_dict(input_dict): """Extracts features dict from input dict.""" hash_from_source_id = tf.string_to_hash_bucket_fast( input_dict[fields.InputDataFields.source_id], HASH_BINS) features = { fields.InputDataFields.image: input_dict[fields.InputDataFields.image], HASH_KEY: tf.cast(hash_from_source_id, tf.int32), fields.InputDataFields.true_image_shape: input_dict[fields.InputDataFields.true_image_shape] } if fields.InputDataFields.original_image in input_dict: features[fields.InputDataFields.original_image] = input_dict[ fields.InputDataFields.original_image] return features
Example #25
Source File: inputs.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 5 votes |
def _get_labels_dict(input_dict): """Extracts labels dict from input dict.""" required_label_keys = [ fields.InputDataFields.num_groundtruth_boxes, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, fields.InputDataFields.groundtruth_weights ] labels_dict = {} for key in required_label_keys: labels_dict[key] = input_dict[key] optional_label_keys = [ fields.InputDataFields.groundtruth_keypoints, fields.InputDataFields.groundtruth_instance_masks, fields.InputDataFields.groundtruth_area, fields.InputDataFields.groundtruth_is_crowd, fields.InputDataFields.groundtruth_difficult ] for key in optional_label_keys: if key in input_dict: labels_dict[key] = input_dict[key] if fields.InputDataFields.groundtruth_difficult in labels_dict: labels_dict[fields.InputDataFields.groundtruth_difficult] = tf.cast( labels_dict[fields.InputDataFields.groundtruth_difficult], tf.int32) return labels_dict
Example #26
Source File: inputs.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 5 votes |
def augment_input_data(tensor_dict, data_augmentation_options): """Applies data augmentation ops to input tensors. Args: tensor_dict: A dictionary of input tensors keyed by fields.InputDataFields. data_augmentation_options: A list of tuples, where each tuple contains a function and a dictionary that contains arguments and their values. Usually, this is the output of core/preprocessor.build. Returns: A dictionary of tensors obtained by applying data augmentation ops to the input tensor dictionary. """ tensor_dict[fields.InputDataFields.image] = tf.expand_dims( tf.to_float(tensor_dict[fields.InputDataFields.image]), 0) include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks in tensor_dict) include_keypoints = (fields.InputDataFields.groundtruth_keypoints in tensor_dict) tensor_dict = preprocessor.preprocess( tensor_dict, data_augmentation_options, func_arg_map=preprocessor.get_default_func_arg_map( include_instance_masks=include_instance_masks, include_keypoints=include_keypoints)) tensor_dict[fields.InputDataFields.image] = tf.squeeze( tensor_dict[fields.InputDataFields.image], axis=0) return tensor_dict
Example #27
Source File: eval_util_test.py From ros_tensorflow with Apache License 2.0 | 5 votes |
def _make_evaluation_dict(self): input_data_fields = fields.InputDataFields detection_fields = fields.DetectionResultFields image = tf.zeros(shape=[1, 20, 20, 3], dtype=tf.uint8) key = tf.constant('image1') detection_boxes = tf.constant([[[0., 0., 1., 1.]]]) detection_scores = tf.constant([[0.8]]) detection_classes = tf.constant([[0]]) detection_masks = tf.ones(shape=[1, 1, 20, 20], dtype=tf.float32) num_detections = tf.constant([1]) groundtruth_boxes = tf.constant([[0., 0., 1., 1.]]) groundtruth_classes = tf.constant([1]) groundtruth_instance_masks = tf.ones(shape=[1, 20, 20], dtype=tf.uint8) detections = { detection_fields.detection_boxes: detection_boxes, detection_fields.detection_scores: detection_scores, detection_fields.detection_classes: detection_classes, detection_fields.detection_masks: detection_masks, detection_fields.num_detections: num_detections } groundtruth = { input_data_fields.groundtruth_boxes: groundtruth_boxes, input_data_fields.groundtruth_classes: groundtruth_classes, input_data_fields.groundtruth_instance_masks: groundtruth_instance_masks } return eval_util.result_dict_for_single_example(image, key, detections, groundtruth)
Example #28
Source File: inputs.py From ros_tensorflow with Apache License 2.0 | 5 votes |
def _get_features_dict(input_dict): """Extracts features dict from input dict.""" hash_from_source_id = tf.string_to_hash_bucket_fast( input_dict[fields.InputDataFields.source_id], HASH_BINS) features = { fields.InputDataFields.image: input_dict[fields.InputDataFields.image], HASH_KEY: tf.cast(hash_from_source_id, tf.int32), fields.InputDataFields.true_image_shape: input_dict[fields.InputDataFields.true_image_shape] } if fields.InputDataFields.original_image in input_dict: features[fields.InputDataFields.original_image] = input_dict[ fields.InputDataFields.original_image] return features
Example #29
Source File: inputs.py From ros_tensorflow with Apache License 2.0 | 5 votes |
def _get_labels_dict(input_dict): """Extracts labels dict from input dict.""" required_label_keys = [ fields.InputDataFields.num_groundtruth_boxes, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, fields.InputDataFields.groundtruth_weights ] labels_dict = {} for key in required_label_keys: labels_dict[key] = input_dict[key] optional_label_keys = [ fields.InputDataFields.groundtruth_keypoints, fields.InputDataFields.groundtruth_instance_masks, fields.InputDataFields.groundtruth_area, fields.InputDataFields.groundtruth_is_crowd, fields.InputDataFields.groundtruth_difficult ] for key in optional_label_keys: if key in input_dict: labels_dict[key] = input_dict[key] if fields.InputDataFields.groundtruth_difficult in labels_dict: labels_dict[fields.InputDataFields.groundtruth_difficult] = tf.cast( labels_dict[fields.InputDataFields.groundtruth_difficult], tf.int32) return labels_dict
Example #30
Source File: eval_util_test.py From Gun-Detector with Apache License 2.0 | 5 votes |
def _make_evaluation_dict(self): input_data_fields = fields.InputDataFields detection_fields = fields.DetectionResultFields image = tf.zeros(shape=[1, 20, 20, 3], dtype=tf.uint8) key = tf.constant('image1') detection_boxes = tf.constant([[[0., 0., 1., 1.]]]) detection_scores = tf.constant([[0.8]]) detection_classes = tf.constant([[0]]) detection_masks = tf.ones(shape=[1, 1, 20, 20], dtype=tf.float32) num_detections = tf.constant([1]) groundtruth_boxes = tf.constant([[0., 0., 1., 1.]]) groundtruth_classes = tf.constant([1]) groundtruth_instance_masks = tf.ones(shape=[1, 20, 20], dtype=tf.uint8) detections = { detection_fields.detection_boxes: detection_boxes, detection_fields.detection_scores: detection_scores, detection_fields.detection_classes: detection_classes, detection_fields.detection_masks: detection_masks, detection_fields.num_detections: num_detections } groundtruth = { input_data_fields.groundtruth_boxes: groundtruth_boxes, input_data_fields.groundtruth_classes: groundtruth_classes, input_data_fields.groundtruth_instance_masks: groundtruth_instance_masks } return eval_util.result_dict_for_single_example(image, key, detections, groundtruth)