Python tensorflow.TFRecordReader() Examples
The following are 30
code examples of tensorflow.TFRecordReader().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: read_tfrecord_data.py From tensorflow_input_image_by_tfrecord with Apache License 2.0 | 6 votes |
def read_and_decode(filename_queue): reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example(serialized_example, features = { "image/encoded": tf.FixedLenFeature([], tf.string), "image/height": tf.FixedLenFeature([], tf.int64), "image/width": tf.FixedLenFeature([], tf.int64), "image/filename": tf.FixedLenFeature([], tf.string), "image/class/label": tf.FixedLenFeature([], tf.int64),}) image_encoded = features["image/encoded"] image_raw = tf.image.decode_jpeg(image_encoded, channels=3) current_image_object = image_object() current_image_object.image = tf.image.resize_image_with_crop_or_pad(image_raw, FLAGS.image_height, FLAGS.image_width) # cropped image with size 299x299 # current_image_object.image = tf.cast(image_crop, tf.float32) * (1./255) - 0.5 current_image_object.height = features["image/height"] # height of the raw image current_image_object.width = features["image/width"] # width of the raw image current_image_object.filename = features["image/filename"] # filename of the raw image current_image_object.label = tf.cast(features["image/class/label"], tf.int32) # label of the raw image return current_image_object
Example #2
Source File: _ds_examples.py From tensorfx with Apache License 2.0 | 6 votes |
def read_instances(self, count, shuffle, epochs): """Reads the data represented by this DataSource using a TensorFlow reader. Arguments: epochs: The number of epochs or passes over the data to perform. Returns: A tensor containing instances that are read. """ # None implies unlimited; switch the value to None when epochs is 0. epochs = epochs or None options = None if self._compressed: options = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.GZIP) files = tf.train.match_filenames_once(self._path, name='files') queue = tf.train.string_input_producer(files, num_epochs=epochs, shuffle=shuffle, name='queue') reader = tf.TFRecordReader(options=options, name='reader') _, instances = reader.read_up_to(queue, count, name='read') return instances
Example #3
Source File: vfn_train.py From view-finding-network with GNU General Public License v3.0 | 6 votes |
def read_and_decode_aug(filename_queue): reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example( serialized_example, # Defaults are not specified since both keys are required. features={ 'image_raw': tf.FixedLenFeature([], tf.string), }) image = tf.decode_raw(features['image_raw'], tf.uint8) image = tf.image.random_flip_left_right(tf.reshape(image, [227, 227, 6])) # Convert from [0, 255] -> [-0.5, 0.5] floats. image = tf.cast(image, tf.float32) * (1. / 255) - 0.5 image = tf.image.random_brightness(image, 0.01) image = tf.image.random_contrast(image, 0.95, 1.05) return tf.split(image, 2, 2) # 3rd dimension two parts
Example #4
Source File: vfn_train.py From view-finding-network with GNU General Public License v3.0 | 6 votes |
def read_and_decode(filename_queue): reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example( serialized_example, # Defaults are not specified since both keys are required. features={ 'image_raw': tf.FixedLenFeature([], tf.string), }) image = tf.decode_raw(features['image_raw'], tf.uint8) image = tf.reshape(image, [227, 227, 6]) # Convert from [0, 255] -> [-0.5, 0.5] floats. image = tf.cast(image, tf.float32) * (1. / 255) - 0.5 return tf.split(image, 2, 2) # 3rd dimension two parts
Example #5
Source File: loader.py From SketchCNN with MIT License | 6 votes |
def _read_raw(self): """Read raw data from TFRecord. Returns: :return: data list [input_raw, label_raw]. """ self._reader = tf.TFRecordReader() _, serialized_example = self._reader.read(self._queue) features = tf.parse_single_example(serialized_example, features={ 'name': tf.FixedLenFeature([], tf.string), 'block': tf.FixedLenFeature([], tf.string) }) input_raw, label_raw = decode_block(features['block'], tensor_size=self._raw_size) if self._with_key: return input_raw, label_raw, features['name'] return input_raw, label_raw
Example #6
Source File: model.py From cloudml-dist-mnist-example with Apache License 2.0 | 6 votes |
def read_and_decode(filename_queue): reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example( serialized_example, features={ 'image_raw': tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.int64), }) image = tf.decode_raw(features['image_raw'], tf.uint8) image.set_shape([784]) image = tf.cast(image, tf.float32) * (1. / 255) label = tf.cast(features['label'], tf.int32) return image, label
Example #7
Source File: readers.py From youtube-8m with Apache License 2.0 | 6 votes |
def prepare_reader(self, filename_queue, batch_size=1024): reader = tf.TFRecordReader() _, serialized_examples = reader.read_up_to(filename_queue, batch_size) # set the mapping from the fields to data types in the proto num_features = len(self.feature_names) assert num_features > 0, "self.feature_names is empty!" assert len(self.feature_names) == len(self.feature_sizes), \ "length of feature_names (={}) != length of feature_sizes (={})".format( \ len(self.feature_names), len(self.feature_sizes)) feature_map = {"video_id": tf.FixedLenFeature([], tf.string), "labels": tf.VarLenFeature(tf.int64)} for feature_index in range(num_features): feature_map[self.feature_names[feature_index]] = tf.FixedLenFeature( [self.feature_sizes[feature_index]], tf.float32) features = tf.parse_example(serialized_examples, features=feature_map) labels = tf.sparse_to_indicator(features["labels"], self.num_classes) labels.set_shape([None, self.num_classes]) concatenated_features = tf.concat([ features[feature_name] for feature_name in self.feature_names], 1) return features["video_id"], concatenated_features, labels, tf.ones([tf.shape(serialized_examples)[0]])
Example #8
Source File: reader.py From CycleGAN-TensorFlow with MIT License | 6 votes |
def __init__(self, tfrecords_file, image_size=256, min_queue_examples=1000, batch_size=1, num_threads=8, name=''): """ Args: tfrecords_file: string, tfrecords file path min_queue_examples: integer, minimum number of samples to retain in the queue that provides of batches of examples batch_size: integer, number of images per batch num_threads: integer, number of preprocess threads """ self.tfrecords_file = tfrecords_file self.image_size = image_size self.min_queue_examples = min_queue_examples self.batch_size = batch_size self.num_threads = num_threads self.reader = tf.TFRecordReader() self.name = name
Example #9
Source File: inputs.py From ffn with Apache License 2.0 | 6 votes |
def load_patch_coordinates_from_filename_queue(filename_queue): """Loads coordinates and volume names from filename queue. Args: filename_queue: Tensorflow queue created from create_filename_queue() Returns: Tuple of coordinates (shape `[1, 3]`) and volume name (shape `[1]`) tensors. """ record_options = tf.python_io.TFRecordOptions( tf.python_io.TFRecordCompressionType.GZIP) keys, protos = tf.TFRecordReader(options=record_options).read(filename_queue) examples = tf.parse_single_example(protos, features=dict( center=tf.FixedLenFeature(shape=[1, 3], dtype=tf.int64), label_volume_name=tf.FixedLenFeature(shape=[1], dtype=tf.string), )) coord = examples['center'] volname = examples['label_volume_name'] return coord, volname
Example #10
Source File: inputs.py From DOTA_models with Apache License 2.0 | 6 votes |
def _read_single_sequence_example(file_list, tokens_shape=None): """Reads and parses SequenceExamples from TFRecord-encoded file_list.""" tf.logging.info('Constructing TFRecordReader from files: %s', file_list) file_queue = tf.train.string_input_producer(file_list) reader = tf.TFRecordReader() seq_key, serialized_record = reader.read(file_queue) ctx, sequence = tf.parse_single_sequence_example( serialized_record, sequence_features={ data_utils.SequenceWrapper.F_TOKEN_ID: tf.FixedLenSequenceFeature(tokens_shape or [], dtype=tf.int64), data_utils.SequenceWrapper.F_LABEL: tf.FixedLenSequenceFeature([], dtype=tf.int64), data_utils.SequenceWrapper.F_WEIGHT: tf.FixedLenSequenceFeature([], dtype=tf.float32), }) return seq_key, ctx, sequence
Example #11
Source File: readers.py From Youtube-8M-WILLOW with Apache License 2.0 | 6 votes |
def prepare_reader(self, filename_queue, max_quantized_value=2, min_quantized_value=-2): """Creates a single reader thread for YouTube8M SequenceExamples. Args: filename_queue: A tensorflow queue of filename locations. max_quantized_value: the maximum of the quantized value. min_quantized_value: the minimum of the quantized value. Returns: A tuple of video indexes, video features, labels, and padding data. """ reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) return self.prepare_serialized_examples(serialized_example, max_quantized_value, min_quantized_value)
Example #12
Source File: test.py From video2tfrecord with MIT License | 5 votes |
def read_and_decode(filename_queue, n_frames): """Creates one image sequence""" reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) image_seq = [] if n_frames == 'all': n_frames = 354 # travis kills due to too large tfrecord for image_count in range(n_frames): path = 'blob' + '/' + str(image_count) feature_dict = {path: tf.FixedLenFeature([], tf.string), 'height': tf.FixedLenFeature([], tf.int64), 'width': tf.FixedLenFeature([], tf.int64), 'depth': tf.FixedLenFeature([], tf.int64)} features = tf.parse_single_example(serialized_example, features=feature_dict) image_buffer = tf.reshape(features[path], shape=[]) image = tf.decode_raw(image_buffer, tf.uint8) image = tf.reshape(image, tf.stack([height, width, num_depth])) image = tf.reshape(image, [1, height, width, num_depth]) image_seq.append(image) image_seq = tf.concat(image_seq, 0) return image_seq
Example #13
Source File: task.py From cloudml-samples with Apache License 2.0 | 5 votes |
def gzip_reader_fn(): return tf.TFRecordReader(options=tf.python_io.TFRecordOptions( compression_type=tf.python_io.TFRecordCompressionType.GZIP))
Example #14
Source File: 18_basic_tfrecord.py From deep-learning-note with MIT License | 5 votes |
def read_from_tfrecord(filenames): tfrecord_file_queue = tf.train.string_input_producer(filenames, name='queue') reader = tf.TFRecordReader() _, tfrecord_serialized = reader.read(tfrecord_file_queue) tfrecord_features = tf.parse_single_example(tfrecord_serialized, features={ 'label': tf.FixedLenFeature([],tf.int64), 'shape': tf.FixedLenFeature([],tf.string), 'image': tf.FixedLenFeature([],tf.string), }, name='features') image = tf.decode_raw(tfrecord_features['image'], tf.uint8) shape = tf.decode_raw(tfrecord_features['shape'], tf.int32) image = tf.reshape(image, shape) label = tfrecord_features['label'] return label, shape, image
Example #15
Source File: readtf.py From udacity-driving-reader with Apache License 2.0 | 5 votes |
def create_read_graph(data_dir, name, num_readers=4, estimated_examples_per_shard=64, coder=None): # Get sharded tf example files for the dataset data_files = datafiles(data_dir, name) # Create queue for sharded tf example files # FIXME the num_epochs argument seems to have no impact? Queue keeps looping forever if not stopped. filename_queue = tf.train.string_input_producer(data_files, shuffle=False, capacity=1, num_epochs=1) # Create queue for examples examples_queue = tf.FIFOQueue(capacity=estimated_examples_per_shard + 4, dtypes=[tf.string]) enqueue_ops = [] processed = [] if num_readers > 1: for _ in range(num_readers): reader = tf.TFRecordReader() _, example = reader.read(filename_queue) enqueue_ops.append(examples_queue.enqueue([example])) example_serialized = examples_queue.dequeue() tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops)) else: reader = tf.TFRecordReader() _, example_serialized = reader.read(filename_queue) for x in range(10): image_buffer, image_timestamp, steering_angles, steering_timestamps = example_parser(example_serialized) decoded_image = tf.image.decode_jpeg(image_buffer) print(decoded_image.get_shape(), image_timestamp.get_shape(), steering_angles.get_shape(), steering_timestamps.get_shape()) decoded_image = tf.reshape(decoded_image, shape=[480, 640, 3]) processed.append((decoded_image, image_timestamp, steering_angles, steering_timestamps)) batch_size = 10 batch_queue_capacity = 2 * batch_size batch_data = tf.train.batch_join( processed, batch_size=batch_size, capacity=batch_queue_capacity) return batch_data
Example #16
Source File: readers.py From youtube-8m with Apache License 2.0 | 5 votes |
def prepare_reader(self, filename_queue, batch_size=1024): """Creates a single reader thread for pre-aggregated YouTube 8M Examples. Args: filename_queue: A tensorflow queue of filename locations. Returns: A tuple of video indexes, features, labels, and padding data. """ reader = tf.TFRecordReader() _, serialized_examples = reader.read_up_to(filename_queue, batch_size) # set the mapping from the fields to data types in the proto num_features = len(self.feature_names) assert num_features > 0, "self.feature_names is empty!" assert len(self.feature_names) == len(self.feature_sizes), \ "length of feature_names (={}) != length of feature_sizes (={})".format( \ len(self.feature_names), len(self.feature_sizes)) feature_map = {"video_id": tf.FixedLenFeature([], tf.string), "predictions": tf.FixedLenFeature([self.num_classes], tf.float32), "labels": tf.VarLenFeature(tf.int64)} for feature_index in range(num_features): feature_map[self.feature_names[feature_index]] = tf.FixedLenFeature( [self.feature_sizes[feature_index]], tf.float32) features = tf.parse_example(serialized_examples, features=feature_map) labels = tf.sparse_to_indicator(features["labels"], self.num_classes) labels.set_shape([None, self.num_classes]) concatenated_features = tf.concat([ features[feature_name] for feature_name in self.feature_names], 1) return features["video_id"], concatenated_features, labels, tf.ones([tf.shape(serialized_examples)[0]]), features["predictions"]
Example #17
Source File: readers.py From youtube-8m with Apache License 2.0 | 5 votes |
def prepare_reader(self, filename_queue, batch_size=1024): """Creates a single reader thread for pre-aggregated YouTube 8M Examples. Args: filename_queue: A tensorflow queue of filename locations. Returns: A tuple of video indexes, features, labels, and padding data. """ reader = tf.TFRecordReader() _, serialized_examples = reader.read_up_to(filename_queue, batch_size) # set the mapping from the fields to data types in the proto num_features = len(self.feature_names) assert num_features > 0, "self.feature_names is empty!" assert len(self.feature_names) == len(self.feature_sizes), \ "length of feature_names (={}) != length of feature_sizes (={})".format( \ len(self.feature_names), len(self.feature_sizes)) feature_map = {"video_id": tf.FixedLenFeature([], tf.string), "labels": tf.VarLenFeature(tf.int64)} for feature_index in range(num_features): feature_map[self.feature_names[feature_index]] = tf.FixedLenFeature( [self.feature_sizes[feature_index]], tf.float32) features = tf.parse_example(serialized_examples, features=feature_map) labels = tf.sparse_to_indicator(features["labels"], self.num_classes) labels.set_shape([None, self.num_classes]) concatenated_features = tf.concat([ features[feature_name] for feature_name in self.feature_names], 1) return features["video_id"], concatenated_features, labels, tf.ones([tf.shape(serialized_examples)[0]])
Example #18
Source File: writers.py From youtube-8m with Apache License 2.0 | 5 votes |
def prepare_writer(self, filename_queue, batch_size=1024): """Creates a single reader thread for pre-aggregated YouTube 8M Examples. Args: filename_queue: A tensorflow queue of filename locations. Returns: A tuple of video indexes, features, labels, and padding data. """ reader = tf.TFRecordReader() _, serialized_examples = reader.read_up_to(filename_queue, batch_size) # set the mapping from the fields to data types in the proto num_features = len(self.feature_names) assert num_features > 0, "self.feature_names is empty!" assert len(self.feature_names) == len(self.feature_sizes), \ "length of feature_names (={}) != length of feature_sizes (={})".format( \ len(self.feature_names), len(self.feature_sizes)) feature_map = {"video_id": tf.FixedLenFeature([], tf.string), "labels": tf.VarLenFeature(tf.int64)} for feature_index in range(num_features): feature_map[self.feature_names[feature_index]] = tf.FixedLenFeature( [self.feature_sizes[feature_index]], tf.float32) features = tf.parse_example(serialized_examples, features=feature_map) labels = tf.sparse_to_indicator(features["labels"], self.num_classes) labels.set_shape([None, self.num_classes]) concatenated_features = tf.concat([ features[feature_name] for feature_name in self.feature_names], 1) return features["video_id"], concatenated_features, labels, tf.ones([tf.shape(serialized_examples)[0]])
Example #19
Source File: readers.py From youtube-8m with Apache License 2.0 | 5 votes |
def prepare_reader(self, filename_queue): reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) contexts, features = tf.parse_single_sequence_example( serialized_example, context_features={ "video_id": tf.FixedLenFeature([], tf.string), "labels": tf.VarLenFeature(tf.int64)}, sequence_features={ "rgb": tf.FixedLenSequenceFeature([], dtype=tf.string), "audio": tf.FixedLenSequenceFeature([], dtype=tf.string), }) # read ground truth labels labels = (tf.cast( tf.sparse_to_dense(contexts["labels"].values, (self.num_classes,), 1, validate_indices=False), tf.bool)) rgbs, num_frames = self.get_video_matrix(features["rgb"], 1024, self.max_frames) audios, num_frames = self.get_video_matrix(features["audio"], 1024, self.max_frames) batch_video_ids = tf.expand_dims(contexts["video_id"], 0) batch_rgbs = tf.expand_dims(rgbs, 0) batch_audios = tf.expand_dims(audios, 0) batch_labels = tf.expand_dims(labels, 0) batch_frames = tf.expand_dims(num_frames, 0) return batch_video_ids, batch_rgbs, batch_audios, batch_labels, batch_frames
Example #20
Source File: readers.py From youtube-8m with Apache License 2.0 | 5 votes |
def prepare_reader(self, filename_queue, batch_size=1024): """Creates a single reader thread for pre-aggregated YouTube 8M Examples. Args: filename_queue: A tensorflow queue of filename locations. Returns: A tuple of video indexes, features, labels, and padding data. """ reader = tf.TFRecordReader() _, serialized_examples = reader.read_up_to(filename_queue, batch_size) # set the mapping from the fields to data types in the proto num_features = len(self.feature_names) assert num_features > 0, "self.feature_names is empty!" assert len(self.feature_names) == len(self.feature_sizes), \ "length of feature_names (={}) != length of feature_sizes (={})".format( \ len(self.feature_names), len(self.feature_sizes)) feature_map = {"video_id": tf.FixedLenFeature([], tf.string), "labels": tf.VarLenFeature(tf.int64)} for feature_index in range(num_features): feature_map[self.feature_names[feature_index]] = tf.FixedLenFeature( [self.feature_sizes[feature_index]], tf.float32) features = tf.parse_example(serialized_examples, features=feature_map) labels = tf.sparse_to_indicator(features["labels"], self.num_classes) labels.set_shape([None, self.num_classes]) concatenated_features = tf.concat([ features[feature_name] for feature_name in self.feature_names], 1) return features["video_id"], concatenated_features, labels, tf.ones([tf.shape(serialized_examples)[0]])
Example #21
Source File: OxFlowers_BCNN_85.py From OxFlowers_BCNN with MIT License | 5 votes |
def read_and_decode(filename): """ read and decode a TFRecords file. returns numpy array objects. pipeline: TFRecords --> queue --> serialized_example --> dict. """ # Output strings (e.g. filenames) to a queue for an input pipeline. filename_queue = tf.train.string_input_producer([filename]) # print(filename_queue) # A Reader that outputs the records from a TFRecords file. reader = tf.TFRecordReader() # reader.read(queue) # Args queue: A Queue or a mutable string Tensor representing a handle to a Queue, with string work items. # Returns: A tuple of Tensors (key, value). key: A string scalar Tensor. value: A string scalar Tensor. _, serialized_example = reader.read(filename_queue) # print(serialized_example) # Parses a single Example proto. # Returns a dict mapping feature keys to Tensor and SparseTensor values. features = tf.parse_single_example(serialized_example,features={ 'label': tf.FixedLenFeature([], tf.int64), 'img_raw' : tf.FixedLenFeature([], tf.string),}) # Reinterpret the bytes of a string as a vector of numbers. imgs = tf.decode_raw(features['img_raw'], tf.uint8) # print(img.dtype) # print(img.shape) # Reshapes a tensor. imgs = tf.reshape(imgs, [-1, ImageWidth, ImageHeight, ImageChannels]) # cast the data from (0, 255) to (-0.5, 0.5) # (-0.5, 0.5) may be better than (0, 1). imgs = tf.cast(imgs, tf.float32) * (1. / 255) - 0.5 labels = tf.cast(features['label'], tf.int64) # print(type(imgs)) # print(imgs.shape) # print(type(labels)) # print(labels.shape) return imgs, labels
Example #22
Source File: detection_inference.py From Person-Detection-and-Tracking with MIT License | 5 votes |
def build_input(tfrecord_paths): """Builds the graph's input. Args: tfrecord_paths: List of paths to the input TFRecords Returns: serialized_example_tensor: The next serialized example. String scalar Tensor image_tensor: The decoded image of the example. Uint8 tensor, shape=[1, None, None,3] """ filename_queue = tf.train.string_input_producer( tfrecord_paths, shuffle=False, num_epochs=1) tf_record_reader = tf.TFRecordReader() _, serialized_example_tensor = tf_record_reader.read(filename_queue) features = tf.parse_single_example( serialized_example_tensor, features={ standard_fields.TfExampleFields.image_encoded: tf.FixedLenFeature([], tf.string), }) encoded_image = features[standard_fields.TfExampleFields.image_encoded] image_tensor = tf.image.decode_image(encoded_image, channels=3) image_tensor.set_shape([None, None, 3]) image_tensor = tf.expand_dims(image_tensor, 0) return serialized_example_tensor, image_tensor
Example #23
Source File: tfrecord.py From graph-based-image-classification with MIT License | 5 votes |
def write_tfrecord(writer, data, label): """Writes the data and label as a TFRecord example. Args: writer: A TFRecordReader. data: A dictionary holding numpy arrays of data. label: An int64 label index. """ features = {key: _bytes_feature(data[key]) for key in data} features['label'] = _int64_feature(label) example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(example.SerializeToString())
Example #24
Source File: tfrecord.py From graph-based-image-classification with MIT License | 5 votes |
def read_tfrecord(filename_queue, shapes={}): """Reads and parses TFRecord examples from data files. Args: filename_queue: A queue of strings with the filenames to read from. shapes: A dictionary containing the shape for a feature in a single example. Returns: A record object. """ reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) features = {key: tf.FixedLenFeature([], tf.string) for key in shapes} features['label'] = tf.FixedLenFeature([], tf.int64) example = tf.parse_single_example(serialized_example, features=features) data = {key: tf.decode_raw(example[key], tf.float32) for key in shapes} data = {key: tf.reshape(data[key], shapes[key]) for key in shapes} label = tf.reshape(example['label'], [1]) return data, label
Example #25
Source File: detection_inference.py From ros_people_object_detection_tensorflow with Apache License 2.0 | 5 votes |
def build_input(tfrecord_paths): """Builds the graph's input. Args: tfrecord_paths: List of paths to the input TFRecords Returns: serialized_example_tensor: The next serialized example. String scalar Tensor image_tensor: The decoded image of the example. Uint8 tensor, shape=[1, None, None,3] """ filename_queue = tf.train.string_input_producer( tfrecord_paths, shuffle=False, num_epochs=1) tf_record_reader = tf.TFRecordReader() _, serialized_example_tensor = tf_record_reader.read(filename_queue) features = tf.parse_single_example( serialized_example_tensor, features={ standard_fields.TfExampleFields.image_encoded: tf.FixedLenFeature([], tf.string), }) encoded_image = features[standard_fields.TfExampleFields.image_encoded] image_tensor = tf.image.decode_image(encoded_image, channels=3) image_tensor.set_shape([None, None, 3]) image_tensor = tf.expand_dims(image_tensor, 0) return serialized_example_tensor, image_tensor
Example #26
Source File: fully_connected_reader.py From tensorflow_input_image_by_tfrecord with Apache License 2.0 | 5 votes |
def read_and_decode(filename_queue): reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example( serialized_example, # Defaults are not specified since both keys are required. features={ 'image/encoded': tf.FixedLenFeature([], tf.string), 'image/class/label': tf.FixedLenFeature([], tf.int64), }) # Convert from a scalar string tensor (whose single string has # length mnist.IMAGE_PIXELS) to a uint8 tensor with shape # [mnist.IMAGE_PIXELS]. image = tf.decode_raw(features['image/encoded'], tf.uint8) image.set_shape([128*128]) # OPTIONAL: Could reshape into a 28x28 image and apply distortions # here. Since we are not applying any distortions in this # example, and the next step expects the image to be flattened # into a vector, we don't bother. # Convert from [0, 255] -> [-0.5, 0.5] floats. image = tf.cast(image, tf.float32) * (1. / 255) - 0.5 # Convert label from a scalar uint8 tensor to an int32 scalar. label = tf.cast(features['image/class/label'], tf.int32) return image, label
Example #27
Source File: inputs.py From tensorflow_fasttext with MIT License | 5 votes |
def InputFn(mode, use_ngrams, input_file, vocab_file, vocab_size, embedding_dimension, num_oov_vocab_buckets, label_file, label_size, ngram_embedding_dimension, num_ngram_hash_buckets, batch_size, num_epochs=None, num_threads=1): if num_epochs <= 0: num_epochs=None def input_fn(): include_target = mode != tf.estimator.ModeKeys.PREDICT parse_spec = ParseSpec(use_ngrams, include_target) print("ParseSpec", parse_spec) print("Input file:", input_file) features = tf.contrib.learn.read_batch_features( input_file, batch_size, parse_spec, tf.TFRecordReader, num_epochs=num_epochs, reader_num_threads=num_threads) label = None if include_target: label = features.pop("label") return features, label return input_fn
Example #28
Source File: mnist.py From glas with Apache License 2.0 | 5 votes |
def dataset(directory, subset, num_folds, fold, holdout): """ Return the mnist dataset """ decoder = slim.tfexample_decoder.TFExampleDecoder( {'image/encoded': tf.FixedLenFeature([], tf.string), 'image/format': tf.FixedLenFeature([], tf.string)}, {'image': slim.tfexample_decoder.Image(shape=IMAGE_SHAPE, channels=1)} ) filenames = encode_utils.get_filenames(directory, subset) filenames = get_folds(filenames, num_folds, fold, holdout) return slim.dataset.Dataset( filenames, tf.TFRecordReader, decoder, encode_utils.num_examples(filenames), _ITEMS_TO_DESCRIPTIONS, data_shape=IMAGE_SHAPE)
Example #29
Source File: coco.py From FastMaskRCNN with Apache License 2.0 | 5 votes |
def read(tfrecords_filename): if not isinstance(tfrecords_filename, list): tfrecords_filename = [tfrecords_filename] filename_queue = tf.train.string_input_producer( tfrecords_filename, num_epochs=100) options = tf.python_io.TFRecordOptions(TFRecordCompressionType.ZLIB) reader = tf.TFRecordReader(options=options) _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example( serialized_example, features={ 'image/img_id': tf.FixedLenFeature([], tf.int64), 'image/encoded': tf.FixedLenFeature([], tf.string), 'image/height': tf.FixedLenFeature([], tf.int64), 'image/width': tf.FixedLenFeature([], tf.int64), 'label/num_instances': tf.FixedLenFeature([], tf.int64), 'label/gt_masks': tf.FixedLenFeature([], tf.string), 'label/gt_boxes': tf.FixedLenFeature([], tf.string), 'label/encoded': tf.FixedLenFeature([], tf.string), }) # image = tf.image.decode_jpeg(features['image/encoded'], channels=3) img_id = tf.cast(features['image/img_id'], tf.int32) ih = tf.cast(features['image/height'], tf.int32) iw = tf.cast(features['image/width'], tf.int32) num_instances = tf.cast(features['label/num_instances'], tf.int32) image = tf.decode_raw(features['image/encoded'], tf.uint8) imsize = tf.size(image) image = tf.cond(tf.equal(imsize, ih * iw), \ lambda: tf.image.grayscale_to_rgb(tf.reshape(image, (ih, iw, 1))), \ lambda: tf.reshape(image, (ih, iw, 3))) gt_boxes = tf.decode_raw(features['label/gt_boxes'], tf.float32) gt_boxes = tf.reshape(gt_boxes, [num_instances, 5]) gt_masks = tf.decode_raw(features['label/gt_masks'], tf.uint8) gt_masks = tf.cast(gt_masks, tf.int32) gt_masks = tf.reshape(gt_masks, [num_instances, ih, iw]) return image, ih, iw, gt_boxes, gt_masks, num_instances, img_id
Example #30
Source File: task.py From cloudml-samples with Apache License 2.0 | 5 votes |
def gzip_reader_fn(): return tf.TFRecordReader(options=tf.python_io.TFRecordOptions( compression_type=tf.python_io.TFRecordCompressionType.GZIP))