Python object_detection.utils.ops.retain_groundtruth_with_positive_classes() Examples
The following are 5
code examples of object_detection.utils.ops.retain_groundtruth_with_positive_classes().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
object_detection.utils.ops
, or try the search function
.
Example #1
Source File: ops_test.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 4 votes |
def test_filter_groundtruth_with_positive_classes(self): input_image = tf.placeholder(tf.float32, shape=(None, None, 3)) input_boxes = tf.placeholder(tf.float32, shape=(None, 4)) input_classes = tf.placeholder(tf.int32, shape=(None,)) input_is_crowd = tf.placeholder(tf.bool, shape=(None,)) input_area = tf.placeholder(tf.float32, shape=(None,)) input_difficult = tf.placeholder(tf.float32, shape=(None,)) input_label_types = tf.placeholder(tf.string, shape=(None,)) input_confidences = tf.placeholder(tf.float32, shape=(None,)) valid_indices = tf.placeholder(tf.int32, shape=(None,)) input_tensors = { fields.InputDataFields.image: input_image, fields.InputDataFields.groundtruth_boxes: input_boxes, fields.InputDataFields.groundtruth_classes: input_classes, fields.InputDataFields.groundtruth_is_crowd: input_is_crowd, fields.InputDataFields.groundtruth_area: input_area, fields.InputDataFields.groundtruth_difficult: input_difficult, fields.InputDataFields.groundtruth_label_types: input_label_types, fields.InputDataFields.groundtruth_confidences: input_confidences, } output_tensors = ops.retain_groundtruth_with_positive_classes(input_tensors) image_tensor = np.random.rand(224, 224, 3) feed_dict = { input_image: image_tensor, input_boxes: np.array([[0.2, 0.4, 0.1, 0.8], [0.2, 0.4, 1.0, 0.8]], dtype=np.float), input_classes: np.array([1, 0], dtype=np.int32), input_is_crowd: np.array([False, True], dtype=np.bool), input_area: np.array([32, 48], dtype=np.float32), input_difficult: np.array([True, False], dtype=np.bool), input_label_types: np.array(['APPROPRIATE', 'INCORRECT'], dtype=np.string_), input_confidences: np.array([0.99, 0.5], dtype=np.float32), valid_indices: np.array([0], dtype=np.int32), } expected_tensors = { fields.InputDataFields.image: image_tensor, fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]], fields.InputDataFields.groundtruth_classes: [1], fields.InputDataFields.groundtruth_is_crowd: [False], fields.InputDataFields.groundtruth_area: [32], fields.InputDataFields.groundtruth_difficult: [True], fields.InputDataFields.groundtruth_label_types: [six.b('APPROPRIATE')], fields.InputDataFields.groundtruth_confidences: [0.99], } with self.test_session() as sess: output_tensors = sess.run(output_tensors, feed_dict=feed_dict) for key in [fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_area, fields.InputDataFields.groundtruth_confidences]: self.assertAllClose(expected_tensors[key], output_tensors[key]) for key in [fields.InputDataFields.groundtruth_classes, fields.InputDataFields.groundtruth_is_crowd, fields.InputDataFields.groundtruth_label_types]: self.assertAllEqual(expected_tensors[key], output_tensors[key])
Example #2
Source File: seq_dataset_builder.py From g-tensorflow-models with Apache License 2.0 | 4 votes |
def _build_training_batch_dict(batch_sequences_with_states, unroll_length, batch_size): """Builds training batch samples. Args: batch_sequences_with_states: A batch_sequences_with_states object. unroll_length: Unrolled length for LSTM training. batch_size: Batch size for queue outputs. Returns: A dictionary of tensors based on items in input_reader_config. """ seq_tensors_dict = { fields.InputDataFields.image: [], fields.InputDataFields.groundtruth_boxes: [], fields.InputDataFields.groundtruth_classes: [], 'batch': batch_sequences_with_states, } for i in range(unroll_length): for j in range(batch_size): filtered_dict = util_ops.filter_groundtruth_with_nan_box_coordinates({ fields.InputDataFields.groundtruth_boxes: ( batch_sequences_with_states.sequences['groundtruth_boxes'][j][i]), fields.InputDataFields.groundtruth_classes: ( batch_sequences_with_states.sequences['groundtruth_classes'][j][i] ), }) filtered_dict = util_ops.retain_groundtruth_with_positive_classes( filtered_dict) seq_tensors_dict[fields.InputDataFields.image].append( batch_sequences_with_states.sequences['image'][j][i]) seq_tensors_dict[fields.InputDataFields.groundtruth_boxes].append( filtered_dict[fields.InputDataFields.groundtruth_boxes]) seq_tensors_dict[fields.InputDataFields.groundtruth_classes].append( filtered_dict[fields.InputDataFields.groundtruth_classes]) seq_tensors_dict[fields.InputDataFields.image] = tuple( seq_tensors_dict[fields.InputDataFields.image]) seq_tensors_dict[fields.InputDataFields.groundtruth_boxes] = tuple( seq_tensors_dict[fields.InputDataFields.groundtruth_boxes]) seq_tensors_dict[fields.InputDataFields.groundtruth_classes] = tuple( seq_tensors_dict[fields.InputDataFields.groundtruth_classes]) return seq_tensors_dict
Example #3
Source File: ops_test.py From models with Apache License 2.0 | 4 votes |
def test_filter_groundtruth_with_positive_classes(self): def graph_fn(input_image, input_boxes, input_classes, input_is_crowd, input_area, input_difficult, input_label_types, input_confidences): input_tensors = { fields.InputDataFields.image: input_image, fields.InputDataFields.groundtruth_boxes: input_boxes, fields.InputDataFields.groundtruth_classes: input_classes, fields.InputDataFields.groundtruth_is_crowd: input_is_crowd, fields.InputDataFields.groundtruth_area: input_area, fields.InputDataFields.groundtruth_difficult: input_difficult, fields.InputDataFields.groundtruth_label_types: input_label_types, fields.InputDataFields.groundtruth_confidences: input_confidences, } output_tensors = ops.retain_groundtruth_with_positive_classes( input_tensors) return output_tensors input_image = np.random.rand(224, 224, 3) input_boxes = np.array([[0.2, 0.4, 0.1, 0.8], [0.2, 0.4, 1.0, 0.8]], dtype=np.float) input_classes = np.array([1, 0], dtype=np.int32) input_is_crowd = np.array([False, True], dtype=np.bool) input_area = np.array([32, 48], dtype=np.float32) input_difficult = np.array([True, False], dtype=np.bool) input_label_types = np.array(['APPROPRIATE', 'INCORRECT'], dtype=np.string_) input_confidences = np.array([0.99, 0.5], dtype=np.float32) expected_tensors = { fields.InputDataFields.image: input_image, fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]], fields.InputDataFields.groundtruth_classes: [1], fields.InputDataFields.groundtruth_is_crowd: [False], fields.InputDataFields.groundtruth_area: [32], fields.InputDataFields.groundtruth_difficult: [True], fields.InputDataFields.groundtruth_label_types: [six.b('APPROPRIATE')], fields.InputDataFields.groundtruth_confidences: [0.99], } # Executing on CPU because string types are not supported on TPU. output_tensors = self.execute_cpu(graph_fn, [input_image, input_boxes, input_classes, input_is_crowd, input_area, input_difficult, input_label_types, input_confidences]) for key in [fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_area, fields.InputDataFields.groundtruth_confidences]: self.assertAllClose(expected_tensors[key], output_tensors[key]) for key in [fields.InputDataFields.groundtruth_classes, fields.InputDataFields.groundtruth_is_crowd, fields.InputDataFields.groundtruth_label_types]: self.assertAllEqual(expected_tensors[key], output_tensors[key])
Example #4
Source File: seq_dataset_builder.py From models with Apache License 2.0 | 4 votes |
def _build_training_batch_dict(batch_sequences_with_states, unroll_length, batch_size): """Builds training batch samples. Args: batch_sequences_with_states: A batch_sequences_with_states object. unroll_length: Unrolled length for LSTM training. batch_size: Batch size for queue outputs. Returns: A dictionary of tensors based on items in input_reader_config. """ seq_tensors_dict = { fields.InputDataFields.image: [], fields.InputDataFields.groundtruth_boxes: [], fields.InputDataFields.groundtruth_classes: [], 'batch': batch_sequences_with_states, } for i in range(unroll_length): for j in range(batch_size): filtered_dict = util_ops.filter_groundtruth_with_nan_box_coordinates({ fields.InputDataFields.groundtruth_boxes: ( batch_sequences_with_states.sequences['groundtruth_boxes'][j][i]), fields.InputDataFields.groundtruth_classes: ( batch_sequences_with_states.sequences['groundtruth_classes'][j][i] ), }) filtered_dict = util_ops.retain_groundtruth_with_positive_classes( filtered_dict) seq_tensors_dict[fields.InputDataFields.image].append( batch_sequences_with_states.sequences['image'][j][i]) seq_tensors_dict[fields.InputDataFields.groundtruth_boxes].append( filtered_dict[fields.InputDataFields.groundtruth_boxes]) seq_tensors_dict[fields.InputDataFields.groundtruth_classes].append( filtered_dict[fields.InputDataFields.groundtruth_classes]) seq_tensors_dict[fields.InputDataFields.image] = tuple( seq_tensors_dict[fields.InputDataFields.image]) seq_tensors_dict[fields.InputDataFields.groundtruth_boxes] = tuple( seq_tensors_dict[fields.InputDataFields.groundtruth_boxes]) seq_tensors_dict[fields.InputDataFields.groundtruth_classes] = tuple( seq_tensors_dict[fields.InputDataFields.groundtruth_classes]) return seq_tensors_dict
Example #5
Source File: seq_dataset_builder.py From multilabel-image-classification-tensorflow with MIT License | 4 votes |
def _build_training_batch_dict(batch_sequences_with_states, unroll_length, batch_size): """Builds training batch samples. Args: batch_sequences_with_states: A batch_sequences_with_states object. unroll_length: Unrolled length for LSTM training. batch_size: Batch size for queue outputs. Returns: A dictionary of tensors based on items in input_reader_config. """ seq_tensors_dict = { fields.InputDataFields.image: [], fields.InputDataFields.groundtruth_boxes: [], fields.InputDataFields.groundtruth_classes: [], 'batch': batch_sequences_with_states, } for i in range(unroll_length): for j in range(batch_size): filtered_dict = util_ops.filter_groundtruth_with_nan_box_coordinates({ fields.InputDataFields.groundtruth_boxes: ( batch_sequences_with_states.sequences['groundtruth_boxes'][j][i]), fields.InputDataFields.groundtruth_classes: ( batch_sequences_with_states.sequences['groundtruth_classes'][j][i] ), }) filtered_dict = util_ops.retain_groundtruth_with_positive_classes( filtered_dict) seq_tensors_dict[fields.InputDataFields.image].append( batch_sequences_with_states.sequences['image'][j][i]) seq_tensors_dict[fields.InputDataFields.groundtruth_boxes].append( filtered_dict[fields.InputDataFields.groundtruth_boxes]) seq_tensors_dict[fields.InputDataFields.groundtruth_classes].append( filtered_dict[fields.InputDataFields.groundtruth_classes]) seq_tensors_dict[fields.InputDataFields.image] = tuple( seq_tensors_dict[fields.InputDataFields.image]) seq_tensors_dict[fields.InputDataFields.groundtruth_boxes] = tuple( seq_tensors_dict[fields.InputDataFields.groundtruth_boxes]) seq_tensors_dict[fields.InputDataFields.groundtruth_classes] = tuple( seq_tensors_dict[fields.InputDataFields.groundtruth_classes]) return seq_tensors_dict