Python preprocessing.decode_image() Examples
The following are 18
code examples of preprocessing.decode_image().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
preprocessing
, or try the search function
.
Example #1
Source File: data_providers.py From multilabel-image-classification-tensorflow with MIT License | 6 votes |
def parse_sequence_to_svtcn_batch( serialized_example, preprocess_fn, is_training, num_views, batch_size): """Parses a serialized sequence example into a batch of SVTCN data.""" _, views, seq_len = parse_sequence_example(serialized_example, num_views) # Get svtcn indices. time_indices, view_indices = get_svtcn_indices(seq_len, batch_size, num_views) combined_indices = tf.concat( [tf.expand_dims(view_indices, 1), tf.expand_dims(time_indices, 1)], 1) # Gather the image strings. images = tf.gather_nd(views, combined_indices) # Decode images. images = tf.map_fn(preprocessing.decode_image, images, dtype=tf.float32) # Concatenate anchor and postitive images, preprocess the batch. preprocessed = preprocess_fn(images, is_training) return preprocessed, images, time_indices
Example #2
Source File: data_providers.py From yolo_v2 with Apache License 2.0 | 6 votes |
def parse_sequence_to_svtcn_batch( serialized_example, preprocess_fn, is_training, num_views, batch_size): """Parses a serialized sequence example into a batch of SVTCN data.""" _, views, seq_len = parse_sequence_example(serialized_example, num_views) # Get svtcn indices. time_indices, view_indices = get_svtcn_indices(seq_len, batch_size, num_views) combined_indices = tf.concat( [tf.expand_dims(view_indices, 1), tf.expand_dims(time_indices, 1)], 1) # Gather the image strings. images = tf.gather_nd(views, combined_indices) # Decode images. images = tf.map_fn(preprocessing.decode_image, images, dtype=tf.float32) # Concatenate anchor and postitive images, preprocess the batch. preprocessed = preprocess_fn(images, is_training) return preprocessed, images, time_indices
Example #3
Source File: data_providers.py From models with Apache License 2.0 | 6 votes |
def parse_sequence_to_svtcn_batch( serialized_example, preprocess_fn, is_training, num_views, batch_size): """Parses a serialized sequence example into a batch of SVTCN data.""" _, views, seq_len = parse_sequence_example(serialized_example, num_views) # Get svtcn indices. time_indices, view_indices = get_svtcn_indices(seq_len, batch_size, num_views) combined_indices = tf.concat( [tf.expand_dims(view_indices, 1), tf.expand_dims(time_indices, 1)], 1) # Gather the image strings. images = tf.gather_nd(views, combined_indices) # Decode images. images = tf.map_fn(preprocessing.decode_image, images, dtype=tf.float32) # Concatenate anchor and postitive images, preprocess the batch. preprocessed = preprocess_fn(images, is_training) return preprocessed, images, time_indices
Example #4
Source File: data_providers.py From Gun-Detector with Apache License 2.0 | 6 votes |
def parse_sequence_to_svtcn_batch( serialized_example, preprocess_fn, is_training, num_views, batch_size): """Parses a serialized sequence example into a batch of SVTCN data.""" _, views, seq_len = parse_sequence_example(serialized_example, num_views) # Get svtcn indices. time_indices, view_indices = get_svtcn_indices(seq_len, batch_size, num_views) combined_indices = tf.concat( [tf.expand_dims(view_indices, 1), tf.expand_dims(time_indices, 1)], 1) # Gather the image strings. images = tf.gather_nd(views, combined_indices) # Decode images. images = tf.map_fn(preprocessing.decode_image, images, dtype=tf.float32) # Concatenate anchor and postitive images, preprocess the batch. preprocessed = preprocess_fn(images, is_training) return preprocessed, images, time_indices
Example #5
Source File: data_providers.py From g-tensorflow-models with Apache License 2.0 | 6 votes |
def parse_sequence_to_svtcn_batch( serialized_example, preprocess_fn, is_training, num_views, batch_size): """Parses a serialized sequence example into a batch of SVTCN data.""" _, views, seq_len = parse_sequence_example(serialized_example, num_views) # Get svtcn indices. time_indices, view_indices = get_svtcn_indices(seq_len, batch_size, num_views) combined_indices = tf.concat( [tf.expand_dims(view_indices, 1), tf.expand_dims(time_indices, 1)], 1) # Gather the image strings. images = tf.gather_nd(views, combined_indices) # Decode images. images = tf.map_fn(preprocessing.decode_image, images, dtype=tf.float32) # Concatenate anchor and postitive images, preprocess the batch. preprocessed = preprocess_fn(images, is_training) return preprocessed, images, time_indices
Example #6
Source File: data_providers.py From object_detection_with_tensorflow with MIT License | 6 votes |
def parse_sequence_to_svtcn_batch( serialized_example, preprocess_fn, is_training, num_views, batch_size): """Parses a serialized sequence example into a batch of SVTCN data.""" _, views, seq_len = parse_sequence_example(serialized_example, num_views) # Get svtcn indices. time_indices, view_indices = get_svtcn_indices(seq_len, batch_size, num_views) combined_indices = tf.concat( [tf.expand_dims(view_indices, 1), tf.expand_dims(time_indices, 1)], 1) # Gather the image strings. images = tf.gather_nd(views, combined_indices) # Decode images. images = tf.map_fn(preprocessing.decode_image, images, dtype=tf.float32) # Concatenate anchor and postitive images, preprocess the batch. preprocessed = preprocess_fn(images, is_training) return preprocessed, images, time_indices
Example #7
Source File: data_providers.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def parse_labeled_example( example_proto, view_index, preprocess_fn, image_attr_keys, label_attr_keys): """Parses a labeled test example from a specified view. Args: example_proto: A scalar string Tensor. view_index: Int, index on which view to parse. preprocess_fn: A function with the signature (raw_images, is_training) -> preprocessed_images, where raw_images is a 4-D float32 image `Tensor` of raw images, is_training is a Boolean describing if we're in training, and preprocessed_images is a 4-D float32 image `Tensor` holding preprocessed images. image_attr_keys: List of Strings, names for image keys. label_attr_keys: List of Strings, names for label attributes. Returns: data: A tuple of images, attributes and tasks `Tensors`. """ features = {} for attr_key in image_attr_keys: features[attr_key] = tf.FixedLenFeature((), tf.string) for attr_key in label_attr_keys: features[attr_key] = tf.FixedLenFeature((), tf.int64) parsed_features = tf.parse_single_example(example_proto, features) image_only_keys = [i for i in image_attr_keys if 'image' in i] view_image_key = image_only_keys[view_index] image = preprocessing.decode_image(parsed_features[view_image_key]) preprocessed = preprocess_fn(image, is_training=False) attributes = [parsed_features[k] for k in label_attr_keys] task = parsed_features['task'] return tuple([preprocessed] + attributes + [task])
Example #8
Source File: data_providers.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def parse_labeled_example( example_proto, view_index, preprocess_fn, image_attr_keys, label_attr_keys): """Parses a labeled test example from a specified view. Args: example_proto: A scalar string Tensor. view_index: Int, index on which view to parse. preprocess_fn: A function with the signature (raw_images, is_training) -> preprocessed_images, where raw_images is a 4-D float32 image `Tensor` of raw images, is_training is a Boolean describing if we're in training, and preprocessed_images is a 4-D float32 image `Tensor` holding preprocessed images. image_attr_keys: List of Strings, names for image keys. label_attr_keys: List of Strings, names for label attributes. Returns: data: A tuple of images, attributes and tasks `Tensors`. """ features = {} for attr_key in image_attr_keys: features[attr_key] = tf.FixedLenFeature((), tf.string) for attr_key in label_attr_keys: features[attr_key] = tf.FixedLenFeature((), tf.int64) parsed_features = tf.parse_single_example(example_proto, features) image_only_keys = [i for i in image_attr_keys if 'image' in i] view_image_key = image_only_keys[view_index] image = preprocessing.decode_image(parsed_features[view_image_key]) preprocessed = preprocess_fn(image, is_training=False) attributes = [parsed_features[k] for k in label_attr_keys] task = parsed_features['task'] return tuple([preprocessed] + attributes + [task])
Example #9
Source File: data_providers.py From models with Apache License 2.0 | 5 votes |
def parse_labeled_example( example_proto, view_index, preprocess_fn, image_attr_keys, label_attr_keys): """Parses a labeled test example from a specified view. Args: example_proto: A scalar string Tensor. view_index: Int, index on which view to parse. preprocess_fn: A function with the signature (raw_images, is_training) -> preprocessed_images, where raw_images is a 4-D float32 image `Tensor` of raw images, is_training is a Boolean describing if we're in training, and preprocessed_images is a 4-D float32 image `Tensor` holding preprocessed images. image_attr_keys: List of Strings, names for image keys. label_attr_keys: List of Strings, names for label attributes. Returns: data: A tuple of images, attributes and tasks `Tensors`. """ features = {} for attr_key in image_attr_keys: features[attr_key] = tf.FixedLenFeature((), tf.string) for attr_key in label_attr_keys: features[attr_key] = tf.FixedLenFeature((), tf.int64) parsed_features = tf.parse_single_example(example_proto, features) image_only_keys = [i for i in image_attr_keys if 'image' in i] view_image_key = image_only_keys[view_index] image = preprocessing.decode_image(parsed_features[view_image_key]) preprocessed = preprocess_fn(image, is_training=False) attributes = [parsed_features[k] for k in label_attr_keys] task = parsed_features['task'] return tuple([preprocessed] + attributes + [task])
Example #10
Source File: data_providers.py From yolo_v2 with Apache License 2.0 | 5 votes |
def parse_labeled_example( example_proto, view_index, preprocess_fn, image_attr_keys, label_attr_keys): """Parses a labeled test example from a specified view. Args: example_proto: A scalar string Tensor. view_index: Int, index on which view to parse. preprocess_fn: A function with the signature (raw_images, is_training) -> preprocessed_images, where raw_images is a 4-D float32 image `Tensor` of raw images, is_training is a Boolean describing if we're in training, and preprocessed_images is a 4-D float32 image `Tensor` holding preprocessed images. image_attr_keys: List of Strings, names for image keys. label_attr_keys: List of Strings, names for label attributes. Returns: data: A tuple of images, attributes and tasks `Tensors`. """ features = {} for attr_key in image_attr_keys: features[attr_key] = tf.FixedLenFeature((), tf.string) for attr_key in label_attr_keys: features[attr_key] = tf.FixedLenFeature((), tf.int64) parsed_features = tf.parse_single_example(example_proto, features) image_only_keys = [i for i in image_attr_keys if 'image' in i] view_image_key = image_only_keys[view_index] image = preprocessing.decode_image(parsed_features[view_image_key]) preprocessed = preprocess_fn(image, is_training=False) attributes = [parsed_features[k] for k in label_attr_keys] task = parsed_features['task'] return tuple([preprocessed] + attributes + [task])
Example #11
Source File: data_providers.py From object_detection_with_tensorflow with MIT License | 5 votes |
def parse_labeled_example( example_proto, view_index, preprocess_fn, image_attr_keys, label_attr_keys): """Parses a labeled test example from a specified view. Args: example_proto: A scalar string Tensor. view_index: Int, index on which view to parse. preprocess_fn: A function with the signature (raw_images, is_training) -> preprocessed_images, where raw_images is a 4-D float32 image `Tensor` of raw images, is_training is a Boolean describing if we're in training, and preprocessed_images is a 4-D float32 image `Tensor` holding preprocessed images. image_attr_keys: List of Strings, names for image keys. label_attr_keys: List of Strings, names for label attributes. Returns: data: A tuple of images, attributes and tasks `Tensors`. """ features = {} for attr_key in image_attr_keys: features[attr_key] = tf.FixedLenFeature((), tf.string) for attr_key in label_attr_keys: features[attr_key] = tf.FixedLenFeature((), tf.int64) parsed_features = tf.parse_single_example(example_proto, features) image_only_keys = [i for i in image_attr_keys if 'image' in i] view_image_key = image_only_keys[view_index] image = preprocessing.decode_image(parsed_features[view_image_key]) preprocessed = preprocess_fn(image, is_training=False) attributes = [parsed_features[k] for k in label_attr_keys] task = parsed_features['task'] return tuple([preprocessed] + attributes + [task])
Example #12
Source File: data_providers.py From Gun-Detector with Apache License 2.0 | 5 votes |
def parse_labeled_example( example_proto, view_index, preprocess_fn, image_attr_keys, label_attr_keys): """Parses a labeled test example from a specified view. Args: example_proto: A scalar string Tensor. view_index: Int, index on which view to parse. preprocess_fn: A function with the signature (raw_images, is_training) -> preprocessed_images, where raw_images is a 4-D float32 image `Tensor` of raw images, is_training is a Boolean describing if we're in training, and preprocessed_images is a 4-D float32 image `Tensor` holding preprocessed images. image_attr_keys: List of Strings, names for image keys. label_attr_keys: List of Strings, names for label attributes. Returns: data: A tuple of images, attributes and tasks `Tensors`. """ features = {} for attr_key in image_attr_keys: features[attr_key] = tf.FixedLenFeature((), tf.string) for attr_key in label_attr_keys: features[attr_key] = tf.FixedLenFeature((), tf.int64) parsed_features = tf.parse_single_example(example_proto, features) image_only_keys = [i for i in image_attr_keys if 'image' in i] view_image_key = image_only_keys[view_index] image = preprocessing.decode_image(parsed_features[view_image_key]) preprocessed = preprocess_fn(image, is_training=False) attributes = [parsed_features[k] for k in label_attr_keys] task = parsed_features['task'] return tuple([preprocessed] + attributes + [task])
Example #13
Source File: data_providers.py From g-tensorflow-models with Apache License 2.0 | 4 votes |
def parse_sequence_to_pairs_batch( serialized_example, preprocess_fn, is_training, num_views, batch_size, window): """Parses a serialized sequence example into a batch of preprocessed data. Args: serialized_example: A serialized SequenceExample. preprocess_fn: A function with the signature (raw_images, is_training) -> preprocessed_images. is_training: Boolean, whether or not we're in training. num_views: Int, the number of simultaneous viewpoints at each timestep in the dataset. batch_size: Int, size of the batch to get. window: Int, only take pairs from a maximium window of this size. Returns: preprocessed: A 4-D float32 `Tensor` holding preprocessed images. anchor_images: A 4-D float32 `Tensor` holding raw anchor images. pos_images: A 4-D float32 `Tensor` holding raw positive images. """ _, views, seq_len = parse_sequence_example(serialized_example, num_views) # Get random (anchor, positive) timestep and viewpoint indices. num_pairs = batch_size // 2 ap_time_indices, a_view_indices, p_view_indices = get_tcn_anchor_pos_indices( seq_len, num_views, num_pairs, window) # Gather the image strings. combined_anchor_indices = tf.concat( [tf.expand_dims(a_view_indices, 1), tf.expand_dims(ap_time_indices, 1)], 1) combined_pos_indices = tf.concat( [tf.expand_dims(p_view_indices, 1), tf.expand_dims(ap_time_indices, 1)], 1) anchor_images = tf.gather_nd(views, combined_anchor_indices) pos_images = tf.gather_nd(views, combined_pos_indices) # Decode images. anchor_images = tf.map_fn( preprocessing.decode_image, anchor_images, dtype=tf.float32) pos_images = tf.map_fn( preprocessing.decode_image, pos_images, dtype=tf.float32) # Concatenate [anchor, postitive] images into a batch and preprocess it. concatenated = tf.concat([anchor_images, pos_images], 0) preprocessed = preprocess_fn(concatenated, is_training) anchor_prepro, positive_prepro = tf.split(preprocessed, num_or_size_splits=2, axis=0) # Set static batch dimensions for all image tensors ims = [anchor_prepro, positive_prepro, anchor_images, pos_images] ims = [set_image_tensor_batch_dim(i, num_pairs) for i in ims] [anchor_prepro, positive_prepro, anchor_images, pos_images] = ims # Assign each anchor and positive the same label. anchor_labels = tf.range(1, num_pairs+1) positive_labels = tf.range(1, num_pairs+1) return (anchor_prepro, positive_prepro, anchor_images, pos_images, anchor_labels, positive_labels, seq_len)
Example #14
Source File: data_providers.py From object_detection_with_tensorflow with MIT License | 4 votes |
def parse_sequence_to_pairs_batch( serialized_example, preprocess_fn, is_training, num_views, batch_size, window): """Parses a serialized sequence example into a batch of preprocessed data. Args: serialized_example: A serialized SequenceExample. preprocess_fn: A function with the signature (raw_images, is_training) -> preprocessed_images. is_training: Boolean, whether or not we're in training. num_views: Int, the number of simultaneous viewpoints at each timestep in the dataset. batch_size: Int, size of the batch to get. window: Int, only take pairs from a maximium window of this size. Returns: preprocessed: A 4-D float32 `Tensor` holding preprocessed images. anchor_images: A 4-D float32 `Tensor` holding raw anchor images. pos_images: A 4-D float32 `Tensor` holding raw positive images. """ _, views, seq_len = parse_sequence_example(serialized_example, num_views) # Get random (anchor, positive) timestep and viewpoint indices. num_pairs = batch_size // 2 ap_time_indices, a_view_indices, p_view_indices = get_tcn_anchor_pos_indices( seq_len, num_views, num_pairs, window) # Gather the image strings. combined_anchor_indices = tf.concat( [tf.expand_dims(a_view_indices, 1), tf.expand_dims(ap_time_indices, 1)], 1) combined_pos_indices = tf.concat( [tf.expand_dims(p_view_indices, 1), tf.expand_dims(ap_time_indices, 1)], 1) anchor_images = tf.gather_nd(views, combined_anchor_indices) pos_images = tf.gather_nd(views, combined_pos_indices) # Decode images. anchor_images = tf.map_fn( preprocessing.decode_image, anchor_images, dtype=tf.float32) pos_images = tf.map_fn( preprocessing.decode_image, pos_images, dtype=tf.float32) # Concatenate [anchor, postitive] images into a batch and preprocess it. concatenated = tf.concat([anchor_images, pos_images], 0) preprocessed = preprocess_fn(concatenated, is_training) anchor_prepro, positive_prepro = tf.split(preprocessed, num_or_size_splits=2, axis=0) # Set static batch dimensions for all image tensors ims = [anchor_prepro, positive_prepro, anchor_images, pos_images] ims = [set_image_tensor_batch_dim(i, num_pairs) for i in ims] [anchor_prepro, positive_prepro, anchor_images, pos_images] = ims # Assign each anchor and positive the same label. anchor_labels = tf.range(1, num_pairs+1) positive_labels = tf.range(1, num_pairs+1) return (anchor_prepro, positive_prepro, anchor_images, pos_images, anchor_labels, positive_labels, seq_len)
Example #15
Source File: data_providers.py From models with Apache License 2.0 | 4 votes |
def parse_sequence_to_pairs_batch( serialized_example, preprocess_fn, is_training, num_views, batch_size, window): """Parses a serialized sequence example into a batch of preprocessed data. Args: serialized_example: A serialized SequenceExample. preprocess_fn: A function with the signature (raw_images, is_training) -> preprocessed_images. is_training: Boolean, whether or not we're in training. num_views: Int, the number of simultaneous viewpoints at each timestep in the dataset. batch_size: Int, size of the batch to get. window: Int, only take pairs from a maximium window of this size. Returns: preprocessed: A 4-D float32 `Tensor` holding preprocessed images. anchor_images: A 4-D float32 `Tensor` holding raw anchor images. pos_images: A 4-D float32 `Tensor` holding raw positive images. """ _, views, seq_len = parse_sequence_example(serialized_example, num_views) # Get random (anchor, positive) timestep and viewpoint indices. num_pairs = batch_size // 2 ap_time_indices, a_view_indices, p_view_indices = get_tcn_anchor_pos_indices( seq_len, num_views, num_pairs, window) # Gather the image strings. combined_anchor_indices = tf.concat( [tf.expand_dims(a_view_indices, 1), tf.expand_dims(ap_time_indices, 1)], 1) combined_pos_indices = tf.concat( [tf.expand_dims(p_view_indices, 1), tf.expand_dims(ap_time_indices, 1)], 1) anchor_images = tf.gather_nd(views, combined_anchor_indices) pos_images = tf.gather_nd(views, combined_pos_indices) # Decode images. anchor_images = tf.map_fn( preprocessing.decode_image, anchor_images, dtype=tf.float32) pos_images = tf.map_fn( preprocessing.decode_image, pos_images, dtype=tf.float32) # Concatenate [anchor, postitive] images into a batch and preprocess it. concatenated = tf.concat([anchor_images, pos_images], 0) preprocessed = preprocess_fn(concatenated, is_training) anchor_prepro, positive_prepro = tf.split(preprocessed, num_or_size_splits=2, axis=0) # Set static batch dimensions for all image tensors ims = [anchor_prepro, positive_prepro, anchor_images, pos_images] ims = [set_image_tensor_batch_dim(i, num_pairs) for i in ims] [anchor_prepro, positive_prepro, anchor_images, pos_images] = ims # Assign each anchor and positive the same label. anchor_labels = tf.range(1, num_pairs+1) positive_labels = tf.range(1, num_pairs+1) return (anchor_prepro, positive_prepro, anchor_images, pos_images, anchor_labels, positive_labels, seq_len)
Example #16
Source File: data_providers.py From Gun-Detector with Apache License 2.0 | 4 votes |
def parse_sequence_to_pairs_batch( serialized_example, preprocess_fn, is_training, num_views, batch_size, window): """Parses a serialized sequence example into a batch of preprocessed data. Args: serialized_example: A serialized SequenceExample. preprocess_fn: A function with the signature (raw_images, is_training) -> preprocessed_images. is_training: Boolean, whether or not we're in training. num_views: Int, the number of simultaneous viewpoints at each timestep in the dataset. batch_size: Int, size of the batch to get. window: Int, only take pairs from a maximium window of this size. Returns: preprocessed: A 4-D float32 `Tensor` holding preprocessed images. anchor_images: A 4-D float32 `Tensor` holding raw anchor images. pos_images: A 4-D float32 `Tensor` holding raw positive images. """ _, views, seq_len = parse_sequence_example(serialized_example, num_views) # Get random (anchor, positive) timestep and viewpoint indices. num_pairs = batch_size // 2 ap_time_indices, a_view_indices, p_view_indices = get_tcn_anchor_pos_indices( seq_len, num_views, num_pairs, window) # Gather the image strings. combined_anchor_indices = tf.concat( [tf.expand_dims(a_view_indices, 1), tf.expand_dims(ap_time_indices, 1)], 1) combined_pos_indices = tf.concat( [tf.expand_dims(p_view_indices, 1), tf.expand_dims(ap_time_indices, 1)], 1) anchor_images = tf.gather_nd(views, combined_anchor_indices) pos_images = tf.gather_nd(views, combined_pos_indices) # Decode images. anchor_images = tf.map_fn( preprocessing.decode_image, anchor_images, dtype=tf.float32) pos_images = tf.map_fn( preprocessing.decode_image, pos_images, dtype=tf.float32) # Concatenate [anchor, postitive] images into a batch and preprocess it. concatenated = tf.concat([anchor_images, pos_images], 0) preprocessed = preprocess_fn(concatenated, is_training) anchor_prepro, positive_prepro = tf.split(preprocessed, num_or_size_splits=2, axis=0) # Set static batch dimensions for all image tensors ims = [anchor_prepro, positive_prepro, anchor_images, pos_images] ims = [set_image_tensor_batch_dim(i, num_pairs) for i in ims] [anchor_prepro, positive_prepro, anchor_images, pos_images] = ims # Assign each anchor and positive the same label. anchor_labels = tf.range(1, num_pairs+1) positive_labels = tf.range(1, num_pairs+1) return (anchor_prepro, positive_prepro, anchor_images, pos_images, anchor_labels, positive_labels, seq_len)
Example #17
Source File: data_providers.py From multilabel-image-classification-tensorflow with MIT License | 4 votes |
def parse_sequence_to_pairs_batch( serialized_example, preprocess_fn, is_training, num_views, batch_size, window): """Parses a serialized sequence example into a batch of preprocessed data. Args: serialized_example: A serialized SequenceExample. preprocess_fn: A function with the signature (raw_images, is_training) -> preprocessed_images. is_training: Boolean, whether or not we're in training. num_views: Int, the number of simultaneous viewpoints at each timestep in the dataset. batch_size: Int, size of the batch to get. window: Int, only take pairs from a maximium window of this size. Returns: preprocessed: A 4-D float32 `Tensor` holding preprocessed images. anchor_images: A 4-D float32 `Tensor` holding raw anchor images. pos_images: A 4-D float32 `Tensor` holding raw positive images. """ _, views, seq_len = parse_sequence_example(serialized_example, num_views) # Get random (anchor, positive) timestep and viewpoint indices. num_pairs = batch_size // 2 ap_time_indices, a_view_indices, p_view_indices = get_tcn_anchor_pos_indices( seq_len, num_views, num_pairs, window) # Gather the image strings. combined_anchor_indices = tf.concat( [tf.expand_dims(a_view_indices, 1), tf.expand_dims(ap_time_indices, 1)], 1) combined_pos_indices = tf.concat( [tf.expand_dims(p_view_indices, 1), tf.expand_dims(ap_time_indices, 1)], 1) anchor_images = tf.gather_nd(views, combined_anchor_indices) pos_images = tf.gather_nd(views, combined_pos_indices) # Decode images. anchor_images = tf.map_fn( preprocessing.decode_image, anchor_images, dtype=tf.float32) pos_images = tf.map_fn( preprocessing.decode_image, pos_images, dtype=tf.float32) # Concatenate [anchor, postitive] images into a batch and preprocess it. concatenated = tf.concat([anchor_images, pos_images], 0) preprocessed = preprocess_fn(concatenated, is_training) anchor_prepro, positive_prepro = tf.split(preprocessed, num_or_size_splits=2, axis=0) # Set static batch dimensions for all image tensors ims = [anchor_prepro, positive_prepro, anchor_images, pos_images] ims = [set_image_tensor_batch_dim(i, num_pairs) for i in ims] [anchor_prepro, positive_prepro, anchor_images, pos_images] = ims # Assign each anchor and positive the same label. anchor_labels = tf.range(1, num_pairs+1) positive_labels = tf.range(1, num_pairs+1) return (anchor_prepro, positive_prepro, anchor_images, pos_images, anchor_labels, positive_labels, seq_len)
Example #18
Source File: data_providers.py From yolo_v2 with Apache License 2.0 | 4 votes |
def parse_sequence_to_pairs_batch( serialized_example, preprocess_fn, is_training, num_views, batch_size, window): """Parses a serialized sequence example into a batch of preprocessed data. Args: serialized_example: A serialized SequenceExample. preprocess_fn: A function with the signature (raw_images, is_training) -> preprocessed_images. is_training: Boolean, whether or not we're in training. num_views: Int, the number of simultaneous viewpoints at each timestep in the dataset. batch_size: Int, size of the batch to get. window: Int, only take pairs from a maximium window of this size. Returns: preprocessed: A 4-D float32 `Tensor` holding preprocessed images. anchor_images: A 4-D float32 `Tensor` holding raw anchor images. pos_images: A 4-D float32 `Tensor` holding raw positive images. """ _, views, seq_len = parse_sequence_example(serialized_example, num_views) # Get random (anchor, positive) timestep and viewpoint indices. num_pairs = batch_size // 2 ap_time_indices, a_view_indices, p_view_indices = get_tcn_anchor_pos_indices( seq_len, num_views, num_pairs, window) # Gather the image strings. combined_anchor_indices = tf.concat( [tf.expand_dims(a_view_indices, 1), tf.expand_dims(ap_time_indices, 1)], 1) combined_pos_indices = tf.concat( [tf.expand_dims(p_view_indices, 1), tf.expand_dims(ap_time_indices, 1)], 1) anchor_images = tf.gather_nd(views, combined_anchor_indices) pos_images = tf.gather_nd(views, combined_pos_indices) # Decode images. anchor_images = tf.map_fn( preprocessing.decode_image, anchor_images, dtype=tf.float32) pos_images = tf.map_fn( preprocessing.decode_image, pos_images, dtype=tf.float32) # Concatenate [anchor, postitive] images into a batch and preprocess it. concatenated = tf.concat([anchor_images, pos_images], 0) preprocessed = preprocess_fn(concatenated, is_training) anchor_prepro, positive_prepro = tf.split(preprocessed, num_or_size_splits=2, axis=0) # Set static batch dimensions for all image tensors ims = [anchor_prepro, positive_prepro, anchor_images, pos_images] ims = [set_image_tensor_batch_dim(i, num_pairs) for i in ims] [anchor_prepro, positive_prepro, anchor_images, pos_images] = ims # Assign each anchor and positive the same label. anchor_labels = tf.range(1, num_pairs+1) positive_labels = tf.range(1, num_pairs+1) return (anchor_prepro, positive_prepro, anchor_images, pos_images, anchor_labels, positive_labels, seq_len)