Python tensorflow.FixedLenSequenceFeature() Examples
The following are 30
code examples of tensorflow.FixedLenSequenceFeature().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: inputs.py From hands-detection with MIT License | 6 votes |
def _read_single_sequence_example(file_list, tokens_shape=None): """Reads and parses SequenceExamples from TFRecord-encoded file_list.""" tf.logging.info('Constructing TFRecordReader from files: %s', file_list) file_queue = tf.train.string_input_producer(file_list) reader = tf.TFRecordReader() seq_key, serialized_record = reader.read(file_queue) ctx, sequence = tf.parse_single_sequence_example( serialized_record, sequence_features={ data_utils.SequenceWrapper.F_TOKEN_ID: tf.FixedLenSequenceFeature(tokens_shape or [], dtype=tf.int64), data_utils.SequenceWrapper.F_LABEL: tf.FixedLenSequenceFeature([], dtype=tf.int64), data_utils.SequenceWrapper.F_WEIGHT: tf.FixedLenSequenceFeature([], dtype=tf.float32), }) return seq_key, ctx, sequence
Example #2
Source File: model.py From thai-word-segmentation with MIT License | 6 votes |
def _parse_record(example_proto): context_features = { "length": tf.FixedLenFeature([], dtype=tf.int64) } sequence_features = { "tokens": tf.FixedLenSequenceFeature([], dtype=tf.int64), "labels": tf.FixedLenSequenceFeature([], dtype=tf.int64) } context_parsed, sequence_parsed = tf.parse_single_sequence_example(serialized=example_proto, context_features=context_features, sequence_features=sequence_features) return context_parsed['length'], sequence_parsed['tokens'], sequence_parsed['labels'] # Read training data from TFRecord file, shuffle, loop over data infinitely and # pad to the longest sentence
Example #3
Source File: inputs.py From DOTA_models with Apache License 2.0 | 6 votes |
def _read_single_sequence_example(file_list, tokens_shape=None): """Reads and parses SequenceExamples from TFRecord-encoded file_list.""" tf.logging.info('Constructing TFRecordReader from files: %s', file_list) file_queue = tf.train.string_input_producer(file_list) reader = tf.TFRecordReader() seq_key, serialized_record = reader.read(file_queue) ctx, sequence = tf.parse_single_sequence_example( serialized_record, sequence_features={ data_utils.SequenceWrapper.F_TOKEN_ID: tf.FixedLenSequenceFeature(tokens_shape or [], dtype=tf.int64), data_utils.SequenceWrapper.F_LABEL: tf.FixedLenSequenceFeature([], dtype=tf.int64), data_utils.SequenceWrapper.F_WEIGHT: tf.FixedLenSequenceFeature([], dtype=tf.float32), }) return seq_key, ctx, sequence
Example #4
Source File: vqa.py From BERT with Apache License 2.0 | 6 votes |
def example_reading_spec(self): data_fields, data_items_to_decoders = ( super(ImageVqav2Tokens10kLabels3k, self).example_reading_spec()) data_fields["image/image_id"] = tf.FixedLenFeature((), tf.int64) data_fields["image/question_id"] = tf.FixedLenFeature((), tf.int64) data_fields["image/question"] = tf.FixedLenSequenceFeature( (), tf.int64, allow_missing=True) data_fields["image/answer"] = tf.FixedLenSequenceFeature( (), tf.int64, allow_missing=True) data_items_to_decoders[ "question"] = tf.contrib.slim.tfexample_decoder.Tensor( "image/question") data_items_to_decoders[ "targets"] = tf.contrib.slim.tfexample_decoder.Tensor( "image/answer") return data_fields, data_items_to_decoders
Example #5
Source File: data_providers.py From yolo_v2 with Apache License 2.0 | 6 votes |
def parse_sequence_example(serialized_example, num_views): """Parses a serialized sequence example into views, sequence length data.""" context_features = { 'task': tf.FixedLenFeature(shape=[], dtype=tf.string), 'len': tf.FixedLenFeature(shape=[], dtype=tf.int64) } view_names = ['view%d' % i for i in range(num_views)] fixed_features = [ tf.FixedLenSequenceFeature( shape=[], dtype=tf.string) for _ in range(len(view_names))] sequence_features = dict(zip(view_names, fixed_features)) context_parse, sequence_parse = tf.parse_single_sequence_example( serialized=serialized_example, context_features=context_features, sequence_features=sequence_features) views = tf.stack([sequence_parse[v] for v in view_names]) lens = [sequence_parse[v].get_shape().as_list()[0] for v in view_names] assert len(set(lens)) == 1 seq_len = tf.shape(sequence_parse[v])[0] return context_parse, views, seq_len
Example #6
Source File: input_pipeline.py From unsupervised_captioning with MIT License | 6 votes |
def parse_sentence(serialized): """Parses a tensorflow.SequenceExample into an caption. Args: serialized: A scalar string Tensor; a single serialized SequenceExample. Returns: key: The keywords in a sentence. num_key: The number of keywords. sentence: A description. sentence_length: The length of the description. """ context, sequence = tf.parse_single_sequence_example( serialized, context_features={}, sequence_features={ 'sentence': tf.FixedLenSequenceFeature([], dtype=tf.int64), }) sentence = tf.to_int32(sequence['sentence']) key = controlled_shuffle(sentence[1:-1]) key = random_drop(key) key = tf.concat([key, [FLAGS.end_id]], axis=0) return key, tf.shape(key)[0], sentence, tf.shape(sentence)[0]
Example #7
Source File: inputs.py From yolo_v2 with Apache License 2.0 | 6 votes |
def _read_single_sequence_example(file_list, tokens_shape=None): """Reads and parses SequenceExamples from TFRecord-encoded file_list.""" tf.logging.info('Constructing TFRecordReader from files: %s', file_list) file_queue = tf.train.string_input_producer(file_list) reader = tf.TFRecordReader() seq_key, serialized_record = reader.read(file_queue) ctx, sequence = tf.parse_single_sequence_example( serialized_record, sequence_features={ data_utils.SequenceWrapper.F_TOKEN_ID: tf.FixedLenSequenceFeature(tokens_shape or [], dtype=tf.int64), data_utils.SequenceWrapper.F_LABEL: tf.FixedLenSequenceFeature([], dtype=tf.int64), data_utils.SequenceWrapper.F_WEIGHT: tf.FixedLenSequenceFeature([], dtype=tf.float32), }) return seq_key, ctx, sequence
Example #8
Source File: vqa.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def example_reading_spec(self): data_fields, data_items_to_decoders = ( super(ImageVqav2Tokens10kLabels3k, self).example_reading_spec()) data_fields["image/image_id"] = tf.FixedLenFeature((), tf.int64) data_fields["image/question_id"] = tf.FixedLenFeature((), tf.int64) data_fields["image/question"] = tf.FixedLenSequenceFeature( (), tf.int64, allow_missing=True) data_fields["image/answer"] = tf.FixedLenSequenceFeature( (), tf.int64, allow_missing=True) data_items_to_decoders[ "question"] = tf.contrib.slim.tfexample_decoder.Tensor( "image/question") data_items_to_decoders[ "targets"] = tf.contrib.slim.tfexample_decoder.Tensor( "image/answer") return data_fields, data_items_to_decoders
Example #9
Source File: input_pipeline.py From unsupervised_captioning with MIT License | 6 votes |
def parse_image(serialized): """Parses a tensorflow.SequenceExample into an image and detected objects. Args: serialized: A scalar string Tensor; a single serialized SequenceExample. Returns: encoded_image: A scalar string Tensor containing a JPEG encoded image. classes: A 1-D int64 Tensor containing the detected objects. scores: A 1-D float32 Tensor containing the detection scores. """ context, sequence = tf.parse_single_sequence_example( serialized, context_features={ 'image/data': tf.FixedLenFeature([], dtype=tf.string) }, sequence_features={ 'classes': tf.FixedLenSequenceFeature([], dtype=tf.int64), 'scores': tf.FixedLenSequenceFeature([], dtype=tf.float32), }) encoded_image = context['image/data'] classes = tf.to_int32(sequence['classes']) scores = sequence['scores'] return encoded_image, classes, scores
Example #10
Source File: serialize_fasta.py From tape-neurips2019 with MIT License | 6 votes |
def deserialize_fasta_sequence(example): context = { 'protein_length': tf.FixedLenFeature([1], tf.int64), 'id': tf.FixedLenFeature([], tf.string) } features = { 'primary': tf.FixedLenSequenceFeature([1], tf.int64), } context, features = tf.parse_single_sequence_example( example, context_features=context, sequence_features=features ) return {'id': context['id'], 'primary': tf.to_int32(features['primary'][:, 0]), 'protein_length': tf.to_int32(context['protein_length'][0])}
Example #11
Source File: eval_obj2sen.py From unsupervised_captioning with MIT License | 6 votes |
def parse_image(serialized): """Parses a tensorflow.SequenceExample into an image and detected objects. Args: serialized: A scalar string Tensor; a single serialized SequenceExample. Returns: name: A scalar string Tensor containing the image name. classes: A 1-D int64 Tensor containing the detected objects. scores: A 1-D float32 Tensor containing the detection scores. """ context, sequence = tf.parse_single_sequence_example( serialized, context_features={ 'image/name': tf.FixedLenFeature([], dtype=tf.string) }, sequence_features={ 'classes': tf.FixedLenSequenceFeature([], dtype=tf.int64), 'scores': tf.FixedLenSequenceFeature([], dtype=tf.float32), }) name = context['image/name'] classes = tf.to_int32(sequence['classes']) scores = sequence['scores'] return name, classes, scores
Example #12
Source File: data_providers.py From Gun-Detector with Apache License 2.0 | 6 votes |
def parse_sequence_example(serialized_example, num_views): """Parses a serialized sequence example into views, sequence length data.""" context_features = { 'task': tf.FixedLenFeature(shape=[], dtype=tf.string), 'len': tf.FixedLenFeature(shape=[], dtype=tf.int64) } view_names = ['view%d' % i for i in range(num_views)] fixed_features = [ tf.FixedLenSequenceFeature( shape=[], dtype=tf.string) for _ in range(len(view_names))] sequence_features = dict(zip(view_names, fixed_features)) context_parse, sequence_parse = tf.parse_single_sequence_example( serialized=serialized_example, context_features=context_features, sequence_features=sequence_features) views = tf.stack([sequence_parse[v] for v in view_names]) lens = [sequence_parse[v].get_shape().as_list()[0] for v in view_names] assert len(set(lens)) == 1 seq_len = tf.shape(sequence_parse[view_names[-1]])[0] return context_parse, views, seq_len
Example #13
Source File: inputs.py From Gun-Detector with Apache License 2.0 | 6 votes |
def _read_single_sequence_example(file_list, tokens_shape=None): """Reads and parses SequenceExamples from TFRecord-encoded file_list.""" tf.logging.info('Constructing TFRecordReader from files: %s', file_list) file_queue = tf.train.string_input_producer(file_list) reader = tf.TFRecordReader() seq_key, serialized_record = reader.read(file_queue) ctx, sequence = tf.parse_single_sequence_example( serialized_record, sequence_features={ data_utils.SequenceWrapper.F_TOKEN_ID: tf.FixedLenSequenceFeature(tokens_shape or [], dtype=tf.int64), data_utils.SequenceWrapper.F_LABEL: tf.FixedLenSequenceFeature([], dtype=tf.int64), data_utils.SequenceWrapper.F_WEIGHT: tf.FixedLenSequenceFeature([], dtype=tf.float32), }) return seq_key, ctx, sequence
Example #14
Source File: gen_obj2sen_caption.py From unsupervised_captioning with MIT License | 6 votes |
def parse_image(serialized, tf): """Parses a tensorflow.SequenceExample into an image and detected objects. Args: serialized: A scalar string Tensor; a single serialized SequenceExample. Returns: encoded_image: A scalar string Tensor containing a JPEG encoded image. classes: A 1-D int64 Tensor containing the detected objects. scores: A 1-D float32 Tensor containing the detection scores. """ context, sequence = tf.parse_single_sequence_example( serialized, sequence_features={ 'classes': tf.FixedLenSequenceFeature([], dtype=tf.int64), 'scores': tf.FixedLenSequenceFeature([], dtype=tf.float32), }) classes = tf.to_int32(sequence['classes']) scores = sequence['scores'] return classes, scores
Example #15
Source File: obj2sen.py From unsupervised_captioning with MIT License | 6 votes |
def parse_sentence(serialized): """Parses a tensorflow.SequenceExample into an caption. Args: serialized: A scalar string Tensor; a single serialized SequenceExample. Returns: key: The keywords in a sentence. num_key: The number of keywords. sentence: A description. sentence_length: The length of the description. """ context, sequence = tf.parse_single_sequence_example( serialized, context_features={}, sequence_features={ 'key': tf.FixedLenSequenceFeature([], dtype=tf.int64), 'sentence': tf.FixedLenSequenceFeature([], dtype=tf.int64), }) key = tf.to_int32(sequence['key']) key = tf.random_shuffle(key) sentence = tf.to_int32(sequence['sentence']) return key, tf.shape(key)[0], sentence, tf.shape(sentence)[0]
Example #16
Source File: parsing_ops_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testSequenceExampleListWithInconsistentDataFails(self): original = sequence_example(feature_lists=feature_lists({ "a": feature_list([ int64_feature([-1, 0]), float_feature([2, 3]) ]) })) serialized = original.SerializeToString() self._test( { "example_name": "in1", "serialized": tf.convert_to_tensor(serialized), "sequence_features": {"a": tf.FixedLenSequenceFeature( (2,), tf.int64)} }, expected_err=(tf.OpError, "Feature list: a, Index: 1." " Data types don't match. Expected type: int64"))
Example #17
Source File: parsing_ops_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testSequenceExampleListWithWrongDataTypeFails(self): original = sequence_example(feature_lists=feature_lists({ "a": feature_list([ float_feature([2, 3]) ]) })) serialized = original.SerializeToString() self._test( { "example_name": "in1", "serialized": tf.convert_to_tensor(serialized), "sequence_features": {"a": tf.FixedLenSequenceFeature( (2,), tf.int64)} }, expected_err=(tf.OpError, "Feature list: a, Index: 0. Data types don't match." " Expected type: int64"))
Example #18
Source File: parsing_ops_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testSequenceExampleListWithWrongShapeFails(self): original = sequence_example(feature_lists=feature_lists({ "a": feature_list([ int64_feature([2, 3]), int64_feature([2, 3, 4]) ]), })) serialized = original.SerializeToString() self._test( { "example_name": "in1", "serialized": tf.convert_to_tensor(serialized), "sequence_features": {"a": tf.FixedLenSequenceFeature( (2,), tf.int64)} }, expected_err=(tf.OpError, r"Name: in1, Key: a, Index: 1." r" Number of int64 values != expected." r" values size: 3 but output shape: \[2\]"))
Example #19
Source File: parsing_ops_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testSequenceExampleWithMissingFeatureListFails(self): original = sequence_example(feature_lists=feature_lists({})) # Test fails because we didn't add: # feature_list_dense_defaults = {"a": None} self._test( { "example_name": "in1", "serialized": tf.convert_to_tensor(original.SerializeToString()), "sequence_features": {"a": tf.FixedLenSequenceFeature( (2,), tf.int64)} }, expected_err=( tf.OpError, "Name: in1, Feature list 'a' is required but could not be found." " Did you mean to include it in" " feature_list_dense_missing_assumed_empty or" " feature_list_dense_defaults?"))
Example #20
Source File: input_utils.py From tensorrec with Apache License 2.0 | 6 votes |
def create_tensorrec_dataset_from_tfrecord(tfrecord_path): """ Loads a TFRecord file and creates a Dataset with the contents. :param tfrecord_path: str :return: tf.data.Dataset """ def parse_tensorrec_tfrecord(example_proto): features = { 'row_index': tf.FixedLenSequenceFeature((), tf.int64, allow_missing=True), 'col_index': tf.FixedLenSequenceFeature((), tf.int64, allow_missing=True), 'values': tf.FixedLenSequenceFeature((), tf.float32, allow_missing=True), 'd0': tf.FixedLenFeature((), tf.int64), 'd1': tf.FixedLenFeature((), tf.int64), } parsed_features = tf.parse_single_example(example_proto, features) return (parsed_features['row_index'], parsed_features['col_index'], parsed_features['values'], parsed_features['d0'], parsed_features['d1']) dataset = tf.data.TFRecordDataset(tfrecord_path).map(parse_tensorrec_tfrecord) return dataset
Example #21
Source File: inputs.py From object_detection_kitti with Apache License 2.0 | 6 votes |
def _read_single_sequence_example(file_list, tokens_shape=None): """Reads and parses SequenceExamples from TFRecord-encoded file_list.""" tf.logging.info('Constructing TFRecordReader from files: %s', file_list) file_queue = tf.train.string_input_producer(file_list) reader = tf.TFRecordReader() seq_key, serialized_record = reader.read(file_queue) ctx, sequence = tf.parse_single_sequence_example( serialized_record, sequence_features={ data_utils.SequenceWrapper.F_TOKEN_ID: tf.FixedLenSequenceFeature(tokens_shape or [], dtype=tf.int64), data_utils.SequenceWrapper.F_LABEL: tf.FixedLenSequenceFeature([], dtype=tf.int64), data_utils.SequenceWrapper.F_WEIGHT: tf.FixedLenSequenceFeature([], dtype=tf.float32), }) return seq_key, ctx, sequence
Example #22
Source File: batch_inputs.py From sample-cnn with MIT License | 6 votes |
def _read_sequence_example(filename_queue, n_labels=50, n_samples=59049, n_segments=10): reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) context, sequence = tf.parse_single_sequence_example( serialized_example, context_features={ 'raw_labels': tf.FixedLenFeature([], dtype=tf.string) }, sequence_features={ 'raw_segments': tf.FixedLenSequenceFeature([], dtype=tf.string) }) segments = tf.decode_raw(sequence['raw_segments'], tf.float32) segments.set_shape([n_segments, n_samples]) labels = tf.decode_raw(context['raw_labels'], tf.uint8) labels.set_shape([n_labels]) labels = tf.cast(labels, tf.float32) return segments, labels
Example #23
Source File: input_pipeline.py From realtime-embeddings-matching with Apache License 2.0 | 6 votes |
def parse_fn(serialized_example): """Parse a serialized example.""" # user_id is not currently used. context_features = { 'user_id': tf.FixedLenFeature([], dtype=tf.int64) } sequence_features = { 'movie_ids': tf.FixedLenSequenceFeature([], dtype=tf.int64) } parsed_feature, parsed_sequence_feature = tf.parse_single_sequence_example( serialized=serialized_example, context_features=context_features, sequence_features=sequence_features ) movie_ids = parsed_sequence_feature['movie_ids'] return movie_ids
Example #24
Source File: data_utils.py From bran with Apache License 2.0 | 6 votes |
def ner_example_parser(filename_queue): reader = tf.TFRecordReader() key, record_string = reader.read(filename_queue) # Define how to parse the example context_features = { 'seq_len': tf.FixedLenFeature([], tf.int64), } sequence_features = { "tokens": tf.FixedLenSequenceFeature([], dtype=tf.int64), "ner_labels": tf.FixedLenSequenceFeature([], dtype=tf.int64), "entities": tf.FixedLenSequenceFeature([], dtype=tf.int64), } context_parsed, sequence_parsed = tf.parse_single_sequence_example(serialized=record_string, context_features=context_features, sequence_features=sequence_features) tokens = sequence_parsed['tokens'] ner_labels = sequence_parsed['ner_labels'] entities = sequence_parsed['entities'] seq_len = context_parsed['seq_len'] return [tokens, ner_labels, entities, seq_len]
Example #25
Source File: dataset_utils.py From listen-attend-and-spell with Apache License 2.0 | 6 votes |
def read_dataset(filename, num_channels=39): """Read data from tfrecord file.""" def parse_fn(example_proto): """Parse function for reading single sequence example.""" sequence_features = { 'inputs': tf.FixedLenSequenceFeature(shape=[num_channels], dtype=tf.float32), 'labels': tf.FixedLenSequenceFeature(shape=[], dtype=tf.string) } context, sequence = tf.parse_single_sequence_example( serialized=example_proto, sequence_features=sequence_features ) return sequence['inputs'], sequence['labels'] dataset = tf.data.TFRecordDataset(filename) dataset = dataset.map(parse_fn) return dataset
Example #26
Source File: data_utils.py From ID-CNN-CWS with GNU General Public License v3.0 | 6 votes |
def example_parser(self, filename_queue): reader = tf.TFRecordReader() key, record_string = reader.read(filename_queue) features = { 'labels': tf.FixedLenSequenceFeature([], tf.int64), 'tokens': tf.FixedLenSequenceFeature([], tf.int64), 'shapes': tf.FixedLenSequenceFeature([], tf.int64), 'chars': tf.FixedLenSequenceFeature([], tf.int64), 'seq_len': tf.FixedLenSequenceFeature([], tf.int64), 'tok_len': tf.FixedLenSequenceFeature([], tf.int64), } _, example = tf.parse_single_sequence_example(serialized=record_string, sequence_features=features) labels = example['labels'] tokens = example['tokens'] shapes = example['shapes'] chars = example['chars'] seq_len = example['seq_len'] tok_len = example['tok_len'] # context = c['context'] return labels, tokens, shapes, chars, seq_len, tok_len # return labels, tokens, labels, labels, labels
Example #27
Source File: Datasets.py From Wave-U-Net with MIT License | 6 votes |
def parse_record(example_proto, source_names, shape): # Parse record from TFRecord file all_names = source_names + ["mix"] features = {key : tf.FixedLenSequenceFeature([], allow_missing=True, dtype=tf.float32) for key in all_names} features["length"] = tf.FixedLenFeature([], tf.int64) features["channels"] = tf.FixedLenFeature([], tf.int64) parsed_features = tf.parse_single_example(example_proto, features) # Reshape length = tf.cast(parsed_features["length"], tf.int64) channels = tf.constant(shape[-1], tf.int64) #tf.cast(parsed_features["channels"], tf.int64) sample = dict() for key in all_names: sample[key] = tf.reshape(parsed_features[key], tf.stack([length, channels])) sample["length"] = length sample["channels"] = channels return sample
Example #28
Source File: dump_tfrecord.py From cwavegan with MIT License | 6 votes |
def _mapper(example_proto): features = { 'samples': tf.FixedLenSequenceFeature([1], tf.float32, allow_missing=True), 'label': tf.FixedLenSequenceFeature([], tf.string, allow_missing=True) } example = tf.parse_single_example(example_proto, features) wav = example['samples'][:, 0] wav = wav[:16384] wav_len = tf.shape(wav)[0] wav = tf.pad(wav, [[0, 16384 - wav_len]]) label = tf.reduce_join(example['label'], 0) return wav, label
Example #29
Source File: inputs.py From uai-sdk with Apache License 2.0 | 5 votes |
def parse_sequence_example(serialized, image_feature, caption_feature): """Parses a tensorflow.SequenceExample into an image and caption. Args: serialized: A scalar string Tensor; a single serialized SequenceExample. image_feature: Name of SequenceExample context feature containing image data. caption_feature: Name of SequenceExample feature list containing integer captions. Returns: encoded_image: A scalar string Tensor containing a JPEG encoded image. caption: A 1-D uint64 Tensor with dynamically specified length. """ context, sequence = tf.parse_single_sequence_example( serialized, context_features={ image_feature: tf.FixedLenFeature([], dtype=tf.string) }, sequence_features={ caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64), }) encoded_image = context[image_feature] caption = sequence[caption_feature] return encoded_image, caption
Example #30
Source File: vqa.py From BERT with Apache License 2.0 | 5 votes |
def example_reading_spec(self): data_fields, data_items_to_decoders = {}, {} data_fields["image/feature"] = tf.FixedLenSequenceFeature( (), tf.float32, allow_missing=True) data_fields["image/spatial_feature"] = tf.FixedLenSequenceFeature( (), tf.float32, allow_missing=True) data_fields["image/image_id"] = tf.FixedLenFeature((), tf.int64) data_fields["image/question_id"] = tf.FixedLenFeature((), tf.int64) data_fields["image/question"] = tf.FixedLenSequenceFeature( (), tf.int64, allow_missing=True) data_fields["image/answer"] = tf.FixedLenSequenceFeature( (), tf.int64, allow_missing=True) data_items_to_decoders[ "inputs"] = tf.contrib.slim.tfexample_decoder.Tensor( "image/feature") data_items_to_decoders[ "question_id"] = tf.contrib.slim.tfexample_decoder.Tensor( "image/question_id") data_items_to_decoders[ "image_id"] = tf.contrib.slim.tfexample_decoder.Tensor( "image/image_id") data_items_to_decoders[ "spatial_feature"] = tf.contrib.slim.tfexample_decoder.Tensor( "image/spatial_feature") data_items_to_decoders[ "question"] = tf.contrib.slim.tfexample_decoder.Tensor( "image/question") data_items_to_decoders[ "targets"] = tf.contrib.slim.tfexample_decoder.Tensor( "image/answer") return data_fields, data_items_to_decoders