Python tensorflow.parse_example() Examples

The following are 30 code examples of tensorflow.parse_example(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: model.py    From cloudml-samples with Apache License 2.0 6 votes vote down vote up
def example_serving_input_fn(default_batch_size=None):
    """Build the serving inputs.

    Args:
      default_batch_size (int): Batch size for the tf.placeholder shape.

    Returns:
      A tuple of dictionaries.
    """
    feature_spec = {}
    for feat in CONTINUOUS_COLS:
        feature_spec[feat] = tf.FixedLenFeature(shape=[], dtype=tf.int64)

    for feat, _ in CATEGORICAL_COLS:
        feature_spec[feat] = tf.FixedLenFeature(shape=[], dtype=tf.string)

    example_bytestring = tf.placeholder(
        shape=[default_batch_size],
        dtype=tf.string,
    )
    features = tf.parse_example(example_bytestring, feature_spec)
    return features, {'example': example_bytestring} 
Example #2
Source File: inputs.py    From cloudml-samples with Apache License 2.0 6 votes vote down vote up
def example_serving_input_receiver_fn():
  """Creating an ServingInputReceiver object for TFRecords data.

  Returns:
    ServingInputReceiver
  """

  # Note that the inputs are raw features, not transformed features.
  receiver_tensors = tf.placeholder(shape=[None], dtype=tf.string)

  features = tf.parse_example(
    receiver_tensors,
    features=get_feature_spec(is_serving=True)
  )

  for key in features:
    features[key] = tf.expand_dims(features[key], -1)

  return tf.estimator.export.ServingInputReceiver(
    features=process_features(features),
    receiver_tensors={'example_proto': receiver_tensors}
  ) 
Example #3
Source File: inputs.py    From cloudml-samples with Apache License 2.0 6 votes vote down vote up
def example_evaluating_input_receiver_fn():
  """Creating an EvalInputReceiver object for TFRecords data.

  Returns:
      EvalInputReceiver
  """

  tf_example = tf.placeholder(shape=[None], dtype=tf.string)
  features = tf.parse_example(
    tf_example,
    features=get_feature_spec(is_serving=False))

  for key in features:
    features[key] = tf.expand_dims(features[key], -1)

  return tfma.export.EvalInputReceiver(
    features=process_features(features),
    receiver_tensors={'examples': tf_example},
    labels=features[metadata.TARGET_NAME]) 
Example #4
Source File: inputs.py    From cloudml-samples with Apache License 2.0 6 votes vote down vote up
def example_serving_input_receiver_fn():
  """Creating an ServingInputReceiver object for TFRecords data.

  Returns:
    ServingInputReceiver
  """

  # Note that the inputs are raw features, not transformed features.
  receiver_tensors = tf.placeholder(shape=[None], dtype=tf.string)

  features = tf.parse_example(
    receiver_tensors,
    features=get_feature_spec(is_serving=True)
  )

  for key in features:
    features[key] = tf.expand_dims(features[key], -1)

  return tf.estimator.export.ServingInputReceiver(
    features=process_features(features),
    receiver_tensors={'example_proto': receiver_tensors}
  ) 
Example #5
Source File: inputs.py    From cloudml-samples with Apache License 2.0 6 votes vote down vote up
def example_evaluating_input_receiver_fn():
  """Creating an EvalInputReceiver object for TFRecords data.

  Returns:
      EvalInputReceiver
  """

  tf_example = tf.placeholder(shape=[None], dtype=tf.string)
  features = tf.parse_example(
    tf_example,
    features=get_feature_spec(is_serving=False))

  for key in features:
    features[key] = tf.expand_dims(features[key], -1)

  return tfma.export.EvalInputReceiver(
    features=process_features(features),
    receiver_tensors={'examples': tf_example},
    labels=features[metadata.TARGET_NAME]) 
Example #6
Source File: inputs.py    From cloudml-samples with Apache License 2.0 6 votes vote down vote up
def example_evaluating_input_receiver_fn():
  """Creating an EvalInputReceiver object for TFRecords data.

  Returns:
      EvalInputReceiver
  """

  tf_example = tf.placeholder(shape=[None], dtype=tf.string)
  features = tf.parse_example(
    tf_example,
    features=get_feature_spec(is_serving=False))

  for key in features:
    features[key] = tf.expand_dims(features[key], -1)

  return tfma.export.EvalInputReceiver(
    features=process_features(features),
    receiver_tensors={'examples': tf_example},
    labels=features[metadata.TARGET_NAME]) 
Example #7
Source File: inputs.py    From cloudml-samples with Apache License 2.0 6 votes vote down vote up
def example_serving_input_receiver_fn():
  """Creating an ServingInputReceiver object for TFRecords data.

  Returns:
    ServingInputReceiver
  """

  # Note that the inputs are raw features, not transformed features.
  receiver_tensors = tf.placeholder(shape=[None], dtype=tf.string)

  features = tf.parse_example(
    receiver_tensors,
    features=get_feature_spec(is_serving=True)
  )

  for key in features:
    features[key] = tf.expand_dims(features[key], -1)

  return tf.estimator.export.ServingInputReceiver(
    features=process_features(features),
    receiver_tensors={'example_proto': receiver_tensors}
  ) 
Example #8
Source File: inputs.py    From cloudml-samples with Apache License 2.0 6 votes vote down vote up
def example_evaluating_input_receiver_fn():
  """Creating an EvalInputReceiver object for TFRecords data.

  Returns:
      EvalInputReceiver
  """

  tf_example = tf.placeholder(shape=[None], dtype=tf.string)
  features = tf.parse_example(
    tf_example,
    features=get_feature_spec(is_serving=False))

  for key in features:
    features[key] = tf.expand_dims(features[key], -1)

  return tfma.export.EvalInputReceiver(
    features=process_features(features),
    receiver_tensors={'examples': tf_example},
    labels=features[metadata.TARGET_NAME]) 
Example #9
Source File: inputs.py    From cloudml-samples with Apache License 2.0 6 votes vote down vote up
def example_serving_input_receiver_fn():
  """Creating an ServingInputReceiver object for TFRecords data.

  Returns:
    ServingInputReceiver
  """

  # Note that the inputs are raw features, not transformed features.
  receiver_tensors = tf.placeholder(shape=[None], dtype=tf.string)

  features = tf.parse_example(
    receiver_tensors,
    features=get_feature_spec(is_serving=True)
  )

  for key in features:
    features[key] = tf.expand_dims(features[key], -1)

  return tf.estimator.export.ServingInputReceiver(
    features=process_features(features),
    receiver_tensors={'example_proto': receiver_tensors}
  ) 
Example #10
Source File: readers.py    From youtube8mchallenge with Apache License 2.0 6 votes vote down vote up
def prepare_serialized_examples(self, serialized_examples):
    # set the mapping from the fields to data types in the proto
    num_features = len(self.feature_names)
    assert num_features > 0, "self.feature_names is empty!"
    assert len(self.feature_names) == len(self.feature_sizes), \
    "length of feature_names (={}) != length of feature_sizes (={})".format( \
    len(self.feature_names), len(self.feature_sizes))

    feature_map = {"id": tf.FixedLenFeature([], tf.string),
                   "labels": tf.VarLenFeature(tf.int64)}
    for feature_index in range(num_features):
      feature_map[self.feature_names[feature_index]] = tf.FixedLenFeature(
          [self.feature_sizes[feature_index]], tf.float32)

    features = tf.parse_example(serialized_examples, features=feature_map)
    labels = tf.sparse_to_indicator(features["labels"], self.num_classes)
    labels.set_shape([None, self.num_classes])
    concatenated_features = tf.concat([
        features[feature_name] for feature_name in self.feature_names], 1)

    return features["id"], concatenated_features, labels, tf.ones([tf.shape(serialized_examples)[0]]) 
Example #11
Source File: inputs.py    From cloudml-samples with Apache License 2.0 6 votes vote down vote up
def example_evaluating_input_receiver_fn():
  """Creating an EvalInputReceiver object for TFRecords data.

  Returns:
      EvalInputReceiver
  """

  tf_example = tf.placeholder(shape=[None], dtype=tf.string)
  features = tf.parse_example(
    tf_example,
    features=get_feature_spec(is_serving=False))

  for key in features:
    features[key] = tf.expand_dims(features[key], -1)

  return tfma.export.EvalInputReceiver(
    features=process_features(features),
    receiver_tensors={'examples': tf_example},
    labels=features[metadata.TARGET_NAME]) 
Example #12
Source File: input_ops.py    From S2V with Apache License 2.0 6 votes vote down vote up
def parse_example_batch(serialized):
  """Parses a batch of tf.Example protos.

  Args:
    serialized: A 1-D string Tensor; a batch of serialized tf.Example protos.
  Returns:
    encode: A SentenceBatch of encode sentences.
    decode_pre: A SentenceBatch of "previous" sentences to decode.
    decode_post: A SentenceBatch of "post" sentences to decode.
  """
  features = tf.parse_example(
    serialized,
    features={"features": tf.VarLenFeature(dtype=tf.int64)}
  )
  features = features["features"]

  def _sparse_to_batch(sparse):
    ids = tf.sparse_tensor_to_dense(sparse)  # Padding with zeroes.
    mask = tf.sparse_to_dense(sparse.indices, sparse.dense_shape,
                              tf.ones_like(sparse.values, dtype=tf.int32))
    return SentenceBatch(ids=ids, mask=mask)

  return _sparse_to_batch(features) 
Example #13
Source File: model.py    From code-snippets with Apache License 2.0 6 votes vote down vote up
def example_serving_input_fn():
  """Build the serving inputs."""
  example_bytestring = tf.placeholder(
      shape=[None],
      dtype=tf.string,
  )
  feature_scalars = tf.parse_example(
      example_bytestring,
      tf.feature_column.make_parse_example_spec(INPUT_COLUMNS)
  )
  return tf.estimator.export.ServingInputReceiver(
      features,
      {'example_proto': example_bytestring}
  )

# [START serving-function] 
Example #14
Source File: iterate_data.py    From BERT with Apache License 2.0 6 votes vote down vote up
def _decode_record(record, name_to_features):
	"""Decodes a record to a TensorFlow example.

	name_to_features = {
	            "input_ids":
	                    tf.FixedLenFeature([max_seq_length], tf.int64),
	            "input_mask":
	                    tf.FixedLenFeature([max_seq_length], tf.int64),
	            "segment_ids":
	                    tf.FixedLenFeature([max_seq_length], tf.int64),
	            "masked_lm_positions":
	                    tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
	            "masked_lm_ids":
	                    tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
	            "masked_lm_weights":
	                    tf.FixedLenFeature([max_predictions_per_seq], tf.float32),
	            "next_sentence_labels":
	                    tf.FixedLenFeature([1], tf.int64),
	    }

	"""
	example = tf.parse_example(record, name_to_features)
	return example 
Example #15
Source File: readers.py    From Youtube-8M-WILLOW with Apache License 2.0 6 votes vote down vote up
def prepare_serialized_examples(self, serialized_examples):
    # set the mapping from the fields to data types in the proto
    num_features = len(self.feature_names)
    assert num_features > 0, "self.feature_names is empty!"
    assert len(self.feature_names) == len(self.feature_sizes), \
    "length of feature_names (={}) != length of feature_sizes (={})".format( \
    len(self.feature_names), len(self.feature_sizes))

    feature_map = {"video_id": tf.FixedLenFeature([], tf.string),
                   "labels": tf.VarLenFeature(tf.int64)}
    for feature_index in range(num_features):
      feature_map[self.feature_names[feature_index]] = tf.FixedLenFeature(
          [self.feature_sizes[feature_index]], tf.float32)

    features = tf.parse_example(serialized_examples, features=feature_map)

    labels = tf.sparse_to_indicator(features["labels"], self.num_classes)
    labels.set_shape([None, self.num_classes])
    concatenated_features = tf.concat([
        features[feature_name] for feature_name in self.feature_names], 1)

    return features["video_id"], concatenated_features, labels, tf.ones([tf.shape(serialized_examples)[0]]) 
Example #16
Source File: itera_data.py    From BERT with Apache License 2.0 6 votes vote down vote up
def _decode_record(record, name_to_features):
	"""Decodes a record to a TensorFlow example.

	name_to_features = {
	            "input_ids":
	                    tf.FixedLenFeature([max_seq_length], tf.int64),
	            "input_mask":
	                    tf.FixedLenFeature([max_seq_length], tf.int64),
	            "segment_ids":
	                    tf.FixedLenFeature([max_seq_length], tf.int64),
	            "masked_lm_positions":
	                    tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
	            "masked_lm_ids":
	                    tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
	            "masked_lm_weights":
	                    tf.FixedLenFeature([max_predictions_per_seq], tf.float32),
	            "next_sentence_labels":
	                    tf.FixedLenFeature([1], tf.int64),
	    }

	"""
	example = tf.parse_example(record, name_to_features)
	return example 
Example #17
Source File: feature_reader.py    From BERT with Apache License 2.0 6 votes vote down vote up
def _decode_record(record, name_to_features):
        """Decodes a record to a TensorFlow example.

        name_to_features = {
                    "input_ids":
                            tf.FixedLenFeature([max_seq_length], tf.int64),
                    "input_mask":
                            tf.FixedLenFeature([max_seq_length], tf.int64),
                    "segment_ids":
                            tf.FixedLenFeature([max_seq_length], tf.int64),
                    "masked_lm_positions":
                            tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
                    "masked_lm_ids":
                            tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
                    "masked_lm_weights":
                            tf.FixedLenFeature([max_predictions_per_seq], tf.float32),
                    "next_sentence_labels":
                            tf.FixedLenFeature([1], tf.int64),
            }

        """
        example = tf.parse_example(record, name_to_features)
        return example 
Example #18
Source File: test_horovod_data_iterator.py    From BERT with Apache License 2.0 6 votes vote down vote up
def _decode_record(record, name_to_features):
        """Decodes a record to a TensorFlow example.

        name_to_features = {
                    "input_ids":
                            tf.FixedLenFeature([max_seq_length], tf.int64),
                    "input_mask":
                            tf.FixedLenFeature([max_seq_length], tf.int64),
                    "segment_ids":
                            tf.FixedLenFeature([max_seq_length], tf.int64),
                    "masked_lm_positions":
                            tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
                    "masked_lm_ids":
                            tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
                    "masked_lm_weights":
                            tf.FixedLenFeature([max_predictions_per_seq], tf.float32),
                    "next_sentence_labels":
                            tf.FixedLenFeature([1], tf.int64),
            }

        """
        example = tf.parse_example(record, name_to_features)
        return example 
Example #19
Source File: glyph_patches.py    From moonlight with Apache License 2.0 6 votes vote down vote up
def serving_fn():
  """Returns the ServingInputReceiver for the exported model.

  Returns:
    A ServingInputReceiver object which may be passed to
    `Estimator.export_savedmodel`. A model saved using this receiver may be used
    for running OMR.
  """
  examples = tf.placeholder(tf.string, shape=[None])
  patch_height, patch_width = read_patch_dimensions()
  parsed = tf.parse_example(examples, {
      'patch': tf.FixedLenFeature((patch_height, patch_width), tf.float32),
  })
  return tf.estimator.export.ServingInputReceiver(
      features={'patch': parsed['patch']},
      receiver_tensors=parsed['patch'],
      receiver_tensors_alternatives={
          'example': examples,
          'patch': parsed['patch']
      }) 
Example #20
Source File: tf_io_pipline_fast_tools.py    From CRNN_Tensorflow with MIT License 6 votes vote down vote up
def _extract_features_batch(serialized_batch):
        """

        :param serialized_batch:
        :return:
        """
        features = tf.parse_example(
            serialized_batch,
            features={'images': tf.FixedLenFeature([], tf.string),
                      'imagepaths': tf.FixedLenFeature([], tf.string),
                      'labels': tf.VarLenFeature(tf.int64),
                      }
        )
        bs = features['images'].shape[0]
        images = tf.decode_raw(features['images'], tf.uint8)
        w, h = tuple(CFG.ARCH.INPUT_SIZE)
        images = tf.cast(x=images, dtype=tf.float32)
        images = tf.reshape(images, [bs, h, w, CFG.ARCH.INPUT_CHANNELS])

        labels = features['labels']
        labels = tf.cast(labels, tf.int32)

        imagepaths = features['imagepaths']

        return images, labels, imagepaths 
Example #21
Source File: readers.py    From youtube-8m with Apache License 2.0 6 votes vote down vote up
def prepare_reader(self, filename_queue, batch_size=1024):

    reader = tf.TFRecordReader()
    _, serialized_examples = reader.read_up_to(filename_queue, batch_size)

    # set the mapping from the fields to data types in the proto
    num_features = len(self.feature_names)
    assert num_features > 0, "self.feature_names is empty!"
    assert len(self.feature_names) == len(self.feature_sizes), \
    "length of feature_names (={}) != length of feature_sizes (={})".format( \
    len(self.feature_names), len(self.feature_sizes))

    feature_map = {"video_id": tf.FixedLenFeature([], tf.string),
                   "labels": tf.VarLenFeature(tf.int64)}
    for feature_index in range(num_features):
      feature_map[self.feature_names[feature_index]] = tf.FixedLenFeature(
          [self.feature_sizes[feature_index]], tf.float32)

    features = tf.parse_example(serialized_examples, features=feature_map)
    labels = tf.sparse_to_indicator(features["labels"], self.num_classes)
    labels.set_shape([None, self.num_classes])
    concatenated_features = tf.concat([
        features[feature_name] for feature_name in self.feature_names], 1)

    return features["video_id"], concatenated_features, labels, tf.ones([tf.shape(serialized_examples)[0]]) 
Example #22
Source File: read_tfrecord.py    From 2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement with MIT License 6 votes vote down vote up
def _extract_features_batch(self, serialized_batch):
        features = tf.parse_example(
            serialized_batch,
            features={'images': tf.FixedLenFeature([], tf.string),
                'imagepaths': tf.FixedLenFeature([], tf.string),
                'labels': tf.VarLenFeature(tf.int64),
                 })

        bs = features['images'].shape[0]
        images = tf.decode_raw(features['images'], tf.uint8)
        w, h = tuple(CFG.ARCH.INPUT_SIZE)
        images = tf.cast(x=images, dtype=tf.float32)
        #images = tf.subtract(tf.divide(images, 128.0), 1.0)
        images = tf.reshape(images, [bs, h, -1, CFG.ARCH.INPUT_CHANNELS])

        labels = features['labels']
        labels = tf.cast(labels, tf.int32)

        imagepaths = features['imagepaths']

        return images, labels, imagepaths 
Example #23
Source File: input_ops.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def parse_example_batch(serialized):
  """Parses a batch of tf.Example protos.

  Args:
    serialized: A 1-D string Tensor; a batch of serialized tf.Example protos.
  Returns:
    encode: A SentenceBatch of encode sentences.
    decode_pre: A SentenceBatch of "previous" sentences to decode.
    decode_post: A SentenceBatch of "post" sentences to decode.
  """
  features = tf.parse_example(
      serialized,
      features={
          "encode": tf.VarLenFeature(dtype=tf.int64),
          "decode_pre": tf.VarLenFeature(dtype=tf.int64),
          "decode_post": tf.VarLenFeature(dtype=tf.int64),
      })

  def _sparse_to_batch(sparse):
    ids = tf.sparse_tensor_to_dense(sparse)  # Padding with zeroes.
    mask = tf.sparse_to_dense(sparse.indices, sparse.dense_shape,
                              tf.ones_like(sparse.values, dtype=tf.int32))
    return SentenceBatch(ids=ids, mask=mask)

  output_names = ("encode", "decode_pre", "decode_post")
  return tuple(_sparse_to_batch(features[x]) for x in output_names) 
Example #24
Source File: input_reader.py    From tpu_models with Apache License 2.0 5 votes vote down vote up
def create_parser_fn(self, params):
    """Create parse fn to extract tensors from tf.Example."""

    def _parser(serialized_example):
      """Parses a single tf.Example into image and label tensors."""
      features = tf.parse_example(
          [serialized_example],
          features={
              'image/encoded': tf.VarLenFeature(dtype=tf.float32),
              'image/segmentation/mask': tf.VarLenFeature(dtype=tf.float32),
          })
      image = features['image/encoded']
      if isinstance(image, tf.SparseTensor):
        image = tf.sparse_tensor_to_dense(image)
      gt_mask = features['image/segmentation/mask']
      if isinstance(gt_mask, tf.SparseTensor):
        gt_mask = tf.sparse_tensor_to_dense(gt_mask)

      image_size, label_size = self.get_input_shapes(params)
      image = tf.reshape(image, image_size)
      gt_mask = tf.reshape(gt_mask, label_size)

      if params.use_bfloat16:
        image = tf.cast(image, dtype=tf.bfloat16)
        gt_mask = tf.cast(gt_mask, dtype=tf.bfloat16)
      tf.logging.info('debug input %s %s', image, gt_mask)
      return image, gt_mask

    return _parser 
Example #25
Source File: lexnet_model.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def parse_tensorflow_examples(record, batch_size, path_to_index):
  """Reads TensorFlow examples from a RecordReader.

  Args:
    record: a record with TensorFlow examples.
    batch_size: the number of instances in a minibatch
    path_to_index: mapping from string path to index in the embeddings matrix.

  Returns:
    The word embeddings IDs, paths and counts
  """
  features = tf.parse_example(
      record, {
          'x_embedding_id': tf.FixedLenFeature([1], dtype=tf.int64),
          'y_embedding_id': tf.FixedLenFeature([1], dtype=tf.int64),
          'nc_embedding_id': tf.FixedLenFeature([1], dtype=tf.int64),
          'reprs': tf.FixedLenSequenceFeature(
              shape=(), dtype=tf.string, allow_missing=True),
          'counts': tf.FixedLenSequenceFeature(
              shape=(), dtype=tf.int64, allow_missing=True),
          'rel_id': tf.FixedLenFeature([1], dtype=tf.int64)
      })

  x_embedding_id = tf.squeeze(features['x_embedding_id'], [-1])
  y_embedding_id = tf.squeeze(features['y_embedding_id'], [-1])
  nc_embedding_id = tf.squeeze(features['nc_embedding_id'], [-1])
  labels = tf.squeeze(features['rel_id'], [-1])
  path_counts = tf.to_float(tf.reshape(features['counts'], [batch_size, -1]))

  path_embedding_id = None
  if path_to_index:
    path_embedding_id = path_to_index.lookup(features['reprs'])

  return (
      x_embedding_id, y_embedding_id, nc_embedding_id,
      path_embedding_id, path_counts, labels) 
Example #26
Source File: readers.py    From youtube-8m with Apache License 2.0 5 votes vote down vote up
def prepare_serialized_examples(self, serialized_examples):
    """Parse a single video-level TF Example."""
    # set the mapping from the fields to data types in the proto
    num_features = len(self.feature_names)
    assert num_features > 0, "self.feature_names is empty!"
    assert len(self.feature_names) == len(self.feature_sizes), \
    "length of feature_names (={}) != length of feature_sizes (={})".format(
        len(self.feature_names), len(self.feature_sizes))

    feature_map = {
        "id": tf.io.FixedLenFeature([], tf.string),
        "labels": tf.io.VarLenFeature(tf.int64)
    }
    for feature_index in range(num_features):
      feature_map[self.feature_names[feature_index]] = tf.FixedLenFeature(
          [self.feature_sizes[feature_index]], tf.float32)

    features = tf.parse_example(serialized_examples, features=feature_map)
    labels = tf.sparse_to_indicator(features["labels"], self.num_classes)
    labels.set_shape([None, self.num_classes])
    concatenated_features = tf.concat(
        [features[feature_name] for feature_name in self.feature_names], 1)

    output_dict = {
        "video_ids": features["id"],
        "video_matrix": concatenated_features,
        "labels": labels,
        "num_frames": tf.ones([tf.shape(serialized_examples)[0]])
    }

    return output_dict 
Example #27
Source File: __init__.py    From tf_estimator_barebone with MIT License 5 votes vote down vote up
def predict_input_fn():
  serialized_tf_example = tf.placeholder(
      dtype=tf.string, shape=[None], name='input_example_tensor')
  feature_spec = {
      'feature': tf.FixedLenFeature(shape=[1], dtype=tf.float32),
  }
  features = tf.parse_example(serialized_tf_example, feature_spec)
  return tf.estimator.export.ServingInputReceiver(
      features=features,
      receiver_tensors={
          tf.saved_model.signature_constants.REGRESS_INPUTS:
              serialized_tf_example
      }) 
Example #28
Source File: readers.py    From youtube-8m with Apache License 2.0 5 votes vote down vote up
def prepare_reader(self, filename_queue, batch_size=1024):
        """Creates a single reader thread for pre-aggregated YouTube 8M Examples.

        Args:
          filename_queue: A tensorflow queue of filename locations.

        Returns:
          A tuple of video indexes, features, labels, and padding data.
        """
        reader = tf.TFRecordReader()
        _, serialized_examples = reader.read_up_to(filename_queue, batch_size)

        # set the mapping from the fields to data types in the proto
        num_features = len(self.feature_names)
        assert num_features > 0, "self.feature_names is empty!"
        assert len(self.feature_names) == len(self.feature_sizes), \
            "length of feature_names (={}) != length of feature_sizes (={})".format( \
                len(self.feature_names), len(self.feature_sizes))

        feature_map = {"video_id": tf.FixedLenFeature([], tf.string),
                       "predictions": tf.FixedLenFeature([self.num_classes], tf.float32),
                       "labels": tf.VarLenFeature(tf.int64)}

        features = tf.parse_example(serialized_examples, features=feature_map)

        return features["predictions"] 
Example #29
Source File: lexnet_common.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def load_all_labels(records):
  """Reads TensorFlow examples from a RecordReader and returns only the labels.

  Args:
    records: a record list with TensorFlow examples.

  Returns:
    The labels
  """
  curr_features = tf.parse_example(records, {
      'rel_id': tf.FixedLenFeature([1], dtype=tf.int64),
  })

  labels = tf.squeeze(curr_features['rel_id'], [-1])
  return labels 
Example #30
Source File: input_ops.py    From parallax with Apache License 2.0 5 votes vote down vote up
def parse_example_batch(serialized):
  """Parses a batch of tf.Example protos.

  Args:
    serialized: A 1-D string Tensor; a batch of serialized tf.Example protos.
  Returns:
    encode: A SentenceBatch of encode sentences.
    decode_pre: A SentenceBatch of "previous" sentences to decode.
    decode_post: A SentenceBatch of "post" sentences to decode.
  """
  features = tf.parse_example(
      serialized,
      features={
          "encode": tf.VarLenFeature(dtype=tf.int64),
          "decode_pre": tf.VarLenFeature(dtype=tf.int64),
          "decode_post": tf.VarLenFeature(dtype=tf.int64),
      })

  def _sparse_to_batch(sparse):
    ids = tf.sparse_tensor_to_dense(sparse)  # Padding with zeroes.
    mask = tf.sparse_to_dense(sparse.indices, sparse.dense_shape,
                              tf.ones_like(sparse.values, dtype=tf.int32))
    return SentenceBatch(ids=ids, mask=mask)

  output_names = ("encode", "decode_pre", "decode_post")
  return tuple(_sparse_to_batch(features[x]) for x in output_names)