Python tensorflow.serialize_sparse() Examples

The following are 19 code examples of tensorflow.serialize_sparse(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: sparse_serialization_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testSerializeDeserializeMany(self):
    with self.test_session(use_gpu=False) as sess:
      sp_input0 = self._SparseTensorValue_5x6(np.arange(6))
      sp_input1 = self._SparseTensorValue_3x4(np.arange(6))
      serialized0 = tf.serialize_sparse(sp_input0)
      serialized1 = tf.serialize_sparse(sp_input1)
      serialized_concat = tf.stack([serialized0, serialized1])

      sp_deserialized = tf.deserialize_many_sparse(
          serialized_concat, dtype=tf.int32)

      combined_indices, combined_values, combined_shape = sess.run(
          sp_deserialized)

      self.assertAllEqual(combined_indices[:6, 0], [0] * 6)  # minibatch 0
      self.assertAllEqual(combined_indices[:6, 1:], sp_input0[0])
      self.assertAllEqual(combined_indices[6:, 0], [1] * 6)  # minibatch 1
      self.assertAllEqual(combined_indices[6:, 1:], sp_input1[0])
      self.assertAllEqual(combined_values[:6], sp_input0[1])
      self.assertAllEqual(combined_values[6:], sp_input1[1])
      self.assertAllEqual(combined_shape, [2, 5, 6]) 
Example #2
Source File: sparse_serialization_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testFeedSerializeDeserializeMany(self):
    with self.test_session(use_gpu=False) as sess:
      sp_input0 = self._SparseTensorPlaceholder()
      sp_input1 = self._SparseTensorPlaceholder()
      input0_val = self._SparseTensorValue_5x6(np.arange(6))
      input1_val = self._SparseTensorValue_3x4(np.arange(6))
      serialized0 = tf.serialize_sparse(sp_input0)
      serialized1 = tf.serialize_sparse(sp_input1)
      serialized_concat = tf.stack([serialized0, serialized1])

      sp_deserialized = tf.deserialize_many_sparse(
          serialized_concat, dtype=tf.int32)

      combined_indices, combined_values, combined_shape = sess.run(
          sp_deserialized, {sp_input0: input0_val, sp_input1: input1_val})

      self.assertAllEqual(combined_indices[:6, 0], [0] * 6)  # minibatch 0
      self.assertAllEqual(combined_indices[:6, 1:], input0_val[0])
      self.assertAllEqual(combined_indices[6:, 0], [1] * 6)  # minibatch 1
      self.assertAllEqual(combined_indices[6:, 1:], input1_val[0])
      self.assertAllEqual(combined_values[:6], input0_val[1])
      self.assertAllEqual(combined_values[6:], input1_val[1])
      self.assertAllEqual(combined_shape, [2, 5, 6]) 
Example #3
Source File: sparse_serialization_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testDeserializeFailsWrongType(self):
    with self.test_session(use_gpu=False) as sess:
      sp_input0 = self._SparseTensorPlaceholder()
      sp_input1 = self._SparseTensorPlaceholder()
      input0_val = self._SparseTensorValue_5x6(np.arange(6))
      input1_val = self._SparseTensorValue_3x4(np.arange(6))
      serialized0 = tf.serialize_sparse(sp_input0)
      serialized1 = tf.serialize_sparse(sp_input1)
      serialized_concat = tf.stack([serialized0, serialized1])

      sp_deserialized = tf.deserialize_many_sparse(
          serialized_concat, dtype=tf.int64)

      with self.assertRaisesOpError(
          r"Requested SparseTensor of type int64 but "
          r"SparseTensor\[0\].values.dtype\(\) == int32"):
        sess.run(
            sp_deserialized, {sp_input0: input0_val, sp_input1: input1_val}) 
Example #4
Source File: sparse_serialization_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testDeserializeFailsInconsistentRank(self):
    with self.test_session(use_gpu=False) as sess:
      sp_input0 = self._SparseTensorPlaceholder()
      sp_input1 = self._SparseTensorPlaceholder()
      input0_val = self._SparseTensorValue_5x6(np.arange(6))
      input1_val = self._SparseTensorValue_1x1x1()
      serialized0 = tf.serialize_sparse(sp_input0)
      serialized1 = tf.serialize_sparse(sp_input1)
      serialized_concat = tf.stack([serialized0, serialized1])

      sp_deserialized = tf.deserialize_many_sparse(
          serialized_concat, dtype=tf.int32)

      with self.assertRaisesOpError(
          r"Inconsistent rank across SparseTensors: rank prior to "
          r"SparseTensor\[1\] was: 3 but rank of SparseTensor\[1\] is: 4"):
        sess.run(
            sp_deserialized, {sp_input0: input0_val, sp_input1: input1_val}) 
Example #5
Source File: sparse_serialization_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testDeserializeFailsInvalidProto(self):
    with self.test_session(use_gpu=False) as sess:
      sp_input0 = self._SparseTensorPlaceholder()
      input0_val = self._SparseTensorValue_5x6(np.arange(6))
      serialized0 = tf.serialize_sparse(sp_input0)
      serialized1 = ["a", "b", "c"]
      serialized_concat = tf.stack([serialized0, serialized1])

      sp_deserialized = tf.deserialize_many_sparse(
          serialized_concat, dtype=tf.int32)

      with self.assertRaisesOpError(
          r"Could not parse serialized_sparse\[1, 0\]"):
        sess.run(sp_deserialized, {sp_input0: input0_val}) 
Example #6
Source File: mjsynth.py    From cnn_lstm_ctc_ocr_for_ICPR with GNU General Public License v3.0 5 votes vote down vote up
def _read_word_record(data_queue):

    reader = tf.TFRecordReader() # Construct a general reader
    key, example_serialized = reader.read(data_queue) 

    feature_map = {
        'image/encoded':  tf.FixedLenFeature( [], dtype=tf.string, 
                                              default_value='' ),
        'image/labels':   tf.VarLenFeature( dtype=tf.int64 ), 
        'image/width':    tf.FixedLenFeature( [1], dtype=tf.int64,
                                              default_value=1 ),
        'image/filename': tf.FixedLenFeature([], dtype=tf.string,
                                             default_value='' ),
        'text/string':     tf.FixedLenFeature([], dtype=tf.string,
                                             default_value='' ),
        'text/length':    tf.FixedLenFeature( [1], dtype=tf.int64,
                                              default_value=1 )
    }
    features = tf.parse_single_example( example_serialized, feature_map )

    image = tf.image.decode_jpeg( features['image/encoded'], channels=1 ) #gray
    width = tf.cast( features['image/width'], tf.int32) # for ctc_loss
    label = tf.serialize_sparse( features['image/labels'] ) # for batching
    length = features['text/length']
    text = features['text/string']
    filename = features['image/filename']
    return image,width,label,length,text,filename 
Example #7
Source File: mjsynth.py    From cnn_lstm_ctc_ocr with GNU General Public License v3.0 5 votes vote down vote up
def preprocess_fn( data ):
    """Parse the elements of the dataset"""

    feature_map = {
        'image/encoded'  :   tf.FixedLenFeature( [], dtype=tf.string, 
                                                 default_value='' ),
        'image/labels'   :   tf.VarLenFeature( dtype=tf.int64 ), 
        'image/width'    :   tf.FixedLenFeature( [1], dtype=tf.int64,
                                                 default_value=1 ),
        'image/filename' :   tf.FixedLenFeature( [], dtype=tf.string,
                                                 default_value='' ),
        'text/string'    :   tf.FixedLenFeature( [], dtype=tf.string,
                                                 default_value='' ),
        'text/length'    :   tf.FixedLenFeature( [1], dtype=tf.int64,
                                                 default_value=1 )
    }
    
    features = tf.parse_single_example( data, feature_map )
    
    # Initialize fields according to feature map

    # Convert to grayscale
    image = tf.image.decode_jpeg( features['image/encoded'], channels=1 ) 

    width = tf.cast( features['image/width'], tf.int32 ) # for ctc_loss
    label = tf.serialize_sparse( features['image/labels'] ) # for batching
    length = features['text/length']
    text = features['text/string']

    image = preprocess_image( image )

    return image, width, label, length, text 
Example #8
Source File: vgsl_input.py    From Action_Recognition_Zoo with MIT License 4 votes vote down vote up
def _ReadExamples(filename_queue, shape, using_ctc, reader=None):
  """Builds network input tensor ops for TF Example.

  Args:
    filename_queue: Queue of filenames, from tf.train.string_input_producer
    shape:          ImageShape with the desired shape of the input.
    using_ctc:      Take the unpadded_class labels instead of padded.
    reader:         Function that returns an actual reader to read Examples from
      input files. If None, uses tf.TFRecordReader().
  Returns:
    image:   Float Tensor containing the input image scaled to [-1.28, 1.27].
    height:  Tensor int64 containing the height of the image.
    width:   Tensor int64 containing the width of the image.
    labels:  Serialized SparseTensor containing the int64 labels.
    text:    Tensor string of the utf8 truth text.
  """
  if reader:
    reader = reader()
  else:
    reader = tf.TFRecordReader()
  _, example_serialized = reader.read(filename_queue)
  example_serialized = tf.reshape(example_serialized, shape=[])
  features = tf.parse_single_example(
      example_serialized,
      {'image/encoded': parsing_ops.FixedLenFeature(
          [1], dtype=tf.string, default_value=''),
       'image/text': parsing_ops.FixedLenFeature(
           [1], dtype=tf.string, default_value=''),
       'image/class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/unpadded_class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/height': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1),
       'image/width': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1)})
  if using_ctc:
    labels = features['image/unpadded_class']
  else:
    labels = features['image/class']
  labels = tf.serialize_sparse(labels)
  image = tf.reshape(features['image/encoded'], shape=[], name='encoded')
  image = _ImageProcessing(image, shape)
  height = tf.reshape(features['image/height'], [-1])
  width = tf.reshape(features['image/width'], [-1])
  text = tf.reshape(features['image/text'], shape=[])

  return image, height, width, labels, text 
Example #9
Source File: vgsl_input.py    From multilabel-image-classification-tensorflow with MIT License 4 votes vote down vote up
def _ReadExamples(filename_queue, shape, using_ctc, reader=None):
  """Builds network input tensor ops for TF Example.

  Args:
    filename_queue: Queue of filenames, from tf.train.string_input_producer
    shape:          ImageShape with the desired shape of the input.
    using_ctc:      Take the unpadded_class labels instead of padded.
    reader:         Function that returns an actual reader to read Examples from
      input files. If None, uses tf.TFRecordReader().
  Returns:
    image:   Float Tensor containing the input image scaled to [-1.28, 1.27].
    height:  Tensor int64 containing the height of the image.
    width:   Tensor int64 containing the width of the image.
    labels:  Serialized SparseTensor containing the int64 labels.
    text:    Tensor string of the utf8 truth text.
  """
  if reader:
    reader = reader()
  else:
    reader = tf.TFRecordReader()
  _, example_serialized = reader.read(filename_queue)
  example_serialized = tf.reshape(example_serialized, shape=[])
  features = tf.parse_single_example(
      example_serialized,
      {'image/encoded': parsing_ops.FixedLenFeature(
          [1], dtype=tf.string, default_value=''),
       'image/text': parsing_ops.FixedLenFeature(
           [1], dtype=tf.string, default_value=''),
       'image/class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/unpadded_class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/height': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1),
       'image/width': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1)})
  if using_ctc:
    labels = features['image/unpadded_class']
  else:
    labels = features['image/class']
  labels = tf.serialize_sparse(labels)
  image = tf.reshape(features['image/encoded'], shape=[], name='encoded')
  image = _ImageProcessing(image, shape)
  height = tf.reshape(features['image/height'], [-1])
  width = tf.reshape(features['image/width'], [-1])
  text = tf.reshape(features['image/text'], shape=[])

  return image, height, width, labels, text 
Example #10
Source File: vgsl_input.py    From models with Apache License 2.0 4 votes vote down vote up
def _ReadExamples(filename_queue, shape, using_ctc, reader=None):
  """Builds network input tensor ops for TF Example.

  Args:
    filename_queue: Queue of filenames, from tf.train.string_input_producer
    shape:          ImageShape with the desired shape of the input.
    using_ctc:      Take the unpadded_class labels instead of padded.
    reader:         Function that returns an actual reader to read Examples from
      input files. If None, uses tf.TFRecordReader().
  Returns:
    image:   Float Tensor containing the input image scaled to [-1.28, 1.27].
    height:  Tensor int64 containing the height of the image.
    width:   Tensor int64 containing the width of the image.
    labels:  Serialized SparseTensor containing the int64 labels.
    text:    Tensor string of the utf8 truth text.
  """
  if reader:
    reader = reader()
  else:
    reader = tf.TFRecordReader()
  _, example_serialized = reader.read(filename_queue)
  example_serialized = tf.reshape(example_serialized, shape=[])
  features = tf.parse_single_example(
      example_serialized,
      {'image/encoded': parsing_ops.FixedLenFeature(
          [1], dtype=tf.string, default_value=''),
       'image/text': parsing_ops.FixedLenFeature(
           [1], dtype=tf.string, default_value=''),
       'image/class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/unpadded_class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/height': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1),
       'image/width': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1)})
  if using_ctc:
    labels = features['image/unpadded_class']
  else:
    labels = features['image/class']
  labels = tf.serialize_sparse(labels)
  image = tf.reshape(features['image/encoded'], shape=[], name='encoded')
  image = _ImageProcessing(image, shape)
  height = tf.reshape(features['image/height'], [-1])
  width = tf.reshape(features['image/width'], [-1])
  text = tf.reshape(features['image/text'], shape=[])

  return image, height, width, labels, text 
Example #11
Source File: vgsl_input.py    From g-tensorflow-models with Apache License 2.0 4 votes vote down vote up
def _ReadExamples(filename_queue, shape, using_ctc, reader=None):
  """Builds network input tensor ops for TF Example.

  Args:
    filename_queue: Queue of filenames, from tf.train.string_input_producer
    shape:          ImageShape with the desired shape of the input.
    using_ctc:      Take the unpadded_class labels instead of padded.
    reader:         Function that returns an actual reader to read Examples from
      input files. If None, uses tf.TFRecordReader().
  Returns:
    image:   Float Tensor containing the input image scaled to [-1.28, 1.27].
    height:  Tensor int64 containing the height of the image.
    width:   Tensor int64 containing the width of the image.
    labels:  Serialized SparseTensor containing the int64 labels.
    text:    Tensor string of the utf8 truth text.
  """
  if reader:
    reader = reader()
  else:
    reader = tf.TFRecordReader()
  _, example_serialized = reader.read(filename_queue)
  example_serialized = tf.reshape(example_serialized, shape=[])
  features = tf.parse_single_example(
      example_serialized,
      {'image/encoded': parsing_ops.FixedLenFeature(
          [1], dtype=tf.string, default_value=''),
       'image/text': parsing_ops.FixedLenFeature(
           [1], dtype=tf.string, default_value=''),
       'image/class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/unpadded_class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/height': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1),
       'image/width': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1)})
  if using_ctc:
    labels = features['image/unpadded_class']
  else:
    labels = features['image/class']
  labels = tf.serialize_sparse(labels)
  image = tf.reshape(features['image/encoded'], shape=[], name='encoded')
  image = _ImageProcessing(image, shape)
  height = tf.reshape(features['image/height'], [-1])
  width = tf.reshape(features['image/width'], [-1])
  text = tf.reshape(features['image/text'], shape=[])

  return image, height, width, labels, text 
Example #12
Source File: vgsl_input.py    From HumanRecognition with MIT License 4 votes vote down vote up
def _ReadExamples(filename_queue, shape, using_ctc, reader=None):
  """Builds network input tensor ops for TF Example.

  Args:
    filename_queue: Queue of filenames, from tf.train.string_input_producer
    shape:          ImageShape with the desired shape of the input.
    using_ctc:      Take the unpadded_class labels instead of padded.
    reader:         Function that returns an actual reader to read Examples from
      input files. If None, uses tf.TFRecordReader().
  Returns:
    image:   Float Tensor containing the input image scaled to [-1.28, 1.27].
    height:  Tensor int64 containing the height of the image.
    width:   Tensor int64 containing the width of the image.
    labels:  Serialized SparseTensor containing the int64 labels.
    text:    Tensor string of the utf8 truth text.
  """
  if reader:
    reader = reader()
  else:
    reader = tf.TFRecordReader()
  _, example_serialized = reader.read(filename_queue)
  example_serialized = tf.reshape(example_serialized, shape=[])
  features = tf.parse_single_example(
      example_serialized,
      {'image/encoded': parsing_ops.FixedLenFeature(
          [1], dtype=tf.string, default_value=''),
       'image/text': parsing_ops.FixedLenFeature(
           [1], dtype=tf.string, default_value=''),
       'image/class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/unpadded_class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/height': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1),
       'image/width': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1)})
  if using_ctc:
    labels = features['image/unpadded_class']
  else:
    labels = features['image/class']
  labels = tf.serialize_sparse(labels)
  image = tf.reshape(features['image/encoded'], shape=[], name='encoded')
  image = _ImageProcessing(image, shape)
  height = tf.reshape(features['image/height'], [-1])
  width = tf.reshape(features['image/width'], [-1])
  text = tf.reshape(features['image/text'], shape=[])

  return image, height, width, labels, text 
Example #13
Source File: vgsl_input.py    From object_detection_with_tensorflow with MIT License 4 votes vote down vote up
def _ReadExamples(filename_queue, shape, using_ctc, reader=None):
  """Builds network input tensor ops for TF Example.

  Args:
    filename_queue: Queue of filenames, from tf.train.string_input_producer
    shape:          ImageShape with the desired shape of the input.
    using_ctc:      Take the unpadded_class labels instead of padded.
    reader:         Function that returns an actual reader to read Examples from
      input files. If None, uses tf.TFRecordReader().
  Returns:
    image:   Float Tensor containing the input image scaled to [-1.28, 1.27].
    height:  Tensor int64 containing the height of the image.
    width:   Tensor int64 containing the width of the image.
    labels:  Serialized SparseTensor containing the int64 labels.
    text:    Tensor string of the utf8 truth text.
  """
  if reader:
    reader = reader()
  else:
    reader = tf.TFRecordReader()
  _, example_serialized = reader.read(filename_queue)
  example_serialized = tf.reshape(example_serialized, shape=[])
  features = tf.parse_single_example(
      example_serialized,
      {'image/encoded': parsing_ops.FixedLenFeature(
          [1], dtype=tf.string, default_value=''),
       'image/text': parsing_ops.FixedLenFeature(
           [1], dtype=tf.string, default_value=''),
       'image/class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/unpadded_class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/height': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1),
       'image/width': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1)})
  if using_ctc:
    labels = features['image/unpadded_class']
  else:
    labels = features['image/class']
  labels = tf.serialize_sparse(labels)
  image = tf.reshape(features['image/encoded'], shape=[], name='encoded')
  image = _ImageProcessing(image, shape)
  height = tf.reshape(features['image/height'], [-1])
  width = tf.reshape(features['image/width'], [-1])
  text = tf.reshape(features['image/text'], shape=[])

  return image, height, width, labels, text 
Example #14
Source File: vgsl_input.py    From object_detection_kitti with Apache License 2.0 4 votes vote down vote up
def _ReadExamples(filename_queue, shape, using_ctc, reader=None):
  """Builds network input tensor ops for TF Example.

  Args:
    filename_queue: Queue of filenames, from tf.train.string_input_producer
    shape:          ImageShape with the desired shape of the input.
    using_ctc:      Take the unpadded_class labels instead of padded.
    reader:         Function that returns an actual reader to read Examples from
      input files. If None, uses tf.TFRecordReader().
  Returns:
    image:   Float Tensor containing the input image scaled to [-1.28, 1.27].
    height:  Tensor int64 containing the height of the image.
    width:   Tensor int64 containing the width of the image.
    labels:  Serialized SparseTensor containing the int64 labels.
    text:    Tensor string of the utf8 truth text.
  """
  if reader:
    reader = reader()
  else:
    reader = tf.TFRecordReader()
  _, example_serialized = reader.read(filename_queue)
  example_serialized = tf.reshape(example_serialized, shape=[])
  features = tf.parse_single_example(
      example_serialized,
      {'image/encoded': parsing_ops.FixedLenFeature(
          [1], dtype=tf.string, default_value=''),
       'image/text': parsing_ops.FixedLenFeature(
           [1], dtype=tf.string, default_value=''),
       'image/class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/unpadded_class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/height': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1),
       'image/width': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1)})
  if using_ctc:
    labels = features['image/unpadded_class']
  else:
    labels = features['image/class']
  labels = tf.serialize_sparse(labels)
  image = tf.reshape(features['image/encoded'], shape=[], name='encoded')
  image = _ImageProcessing(image, shape)
  height = tf.reshape(features['image/height'], [-1])
  width = tf.reshape(features['image/width'], [-1])
  text = tf.reshape(features['image/text'], shape=[])

  return image, height, width, labels, text 
Example #15
Source File: vgsl_input.py    From hands-detection with MIT License 4 votes vote down vote up
def _ReadExamples(filename_queue, shape, using_ctc, reader=None):
  """Builds network input tensor ops for TF Example.

  Args:
    filename_queue: Queue of filenames, from tf.train.string_input_producer
    shape:          ImageShape with the desired shape of the input.
    using_ctc:      Take the unpadded_class labels instead of padded.
    reader:         Function that returns an actual reader to read Examples from
      input files. If None, uses tf.TFRecordReader().
  Returns:
    image:   Float Tensor containing the input image scaled to [-1.28, 1.27].
    height:  Tensor int64 containing the height of the image.
    width:   Tensor int64 containing the width of the image.
    labels:  Serialized SparseTensor containing the int64 labels.
    text:    Tensor string of the utf8 truth text.
  """
  if reader:
    reader = reader()
  else:
    reader = tf.TFRecordReader()
  _, example_serialized = reader.read(filename_queue)
  example_serialized = tf.reshape(example_serialized, shape=[])
  features = tf.parse_single_example(
      example_serialized,
      {'image/encoded': parsing_ops.FixedLenFeature(
          [1], dtype=tf.string, default_value=''),
       'image/text': parsing_ops.FixedLenFeature(
           [1], dtype=tf.string, default_value=''),
       'image/class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/unpadded_class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/height': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1),
       'image/width': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1)})
  if using_ctc:
    labels = features['image/unpadded_class']
  else:
    labels = features['image/class']
  labels = tf.serialize_sparse(labels)
  image = tf.reshape(features['image/encoded'], shape=[], name='encoded')
  image = _ImageProcessing(image, shape)
  height = tf.reshape(features['image/height'], [-1])
  width = tf.reshape(features['image/width'], [-1])
  text = tf.reshape(features['image/text'], shape=[])

  return image, height, width, labels, text 
Example #16
Source File: vgsl_input.py    From ECO-pytorch with BSD 2-Clause "Simplified" License 4 votes vote down vote up
def _ReadExamples(filename_queue, shape, using_ctc, reader=None):
  """Builds network input tensor ops for TF Example.

  Args:
    filename_queue: Queue of filenames, from tf.train.string_input_producer
    shape:          ImageShape with the desired shape of the input.
    using_ctc:      Take the unpadded_class labels instead of padded.
    reader:         Function that returns an actual reader to read Examples from
      input files. If None, uses tf.TFRecordReader().
  Returns:
    image:   Float Tensor containing the input image scaled to [-1.28, 1.27].
    height:  Tensor int64 containing the height of the image.
    width:   Tensor int64 containing the width of the image.
    labels:  Serialized SparseTensor containing the int64 labels.
    text:    Tensor string of the utf8 truth text.
  """
  if reader:
    reader = reader()
  else:
    reader = tf.TFRecordReader()
  _, example_serialized = reader.read(filename_queue)
  example_serialized = tf.reshape(example_serialized, shape=[])
  features = tf.parse_single_example(
      example_serialized,
      {'image/encoded': parsing_ops.FixedLenFeature(
          [1], dtype=tf.string, default_value=''),
       'image/text': parsing_ops.FixedLenFeature(
           [1], dtype=tf.string, default_value=''),
       'image/class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/unpadded_class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/height': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1),
       'image/width': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1)})
  if using_ctc:
    labels = features['image/unpadded_class']
  else:
    labels = features['image/class']
  labels = tf.serialize_sparse(labels)
  image = tf.reshape(features['image/encoded'], shape=[], name='encoded')
  image = _ImageProcessing(image, shape)
  height = tf.reshape(features['image/height'], [-1])
  width = tf.reshape(features['image/width'], [-1])
  text = tf.reshape(features['image/text'], shape=[])

  return image, height, width, labels, text 
Example #17
Source File: vgsl_input.py    From DOTA_models with Apache License 2.0 4 votes vote down vote up
def _ReadExamples(filename_queue, shape, using_ctc, reader=None):
  """Builds network input tensor ops for TF Example.

  Args:
    filename_queue: Queue of filenames, from tf.train.string_input_producer
    shape:          ImageShape with the desired shape of the input.
    using_ctc:      Take the unpadded_class labels instead of padded.
    reader:         Function that returns an actual reader to read Examples from
      input files. If None, uses tf.TFRecordReader().
  Returns:
    image:   Float Tensor containing the input image scaled to [-1.28, 1.27].
    height:  Tensor int64 containing the height of the image.
    width:   Tensor int64 containing the width of the image.
    labels:  Serialized SparseTensor containing the int64 labels.
    text:    Tensor string of the utf8 truth text.
  """
  if reader:
    reader = reader()
  else:
    reader = tf.TFRecordReader()
  _, example_serialized = reader.read(filename_queue)
  example_serialized = tf.reshape(example_serialized, shape=[])
  features = tf.parse_single_example(
      example_serialized,
      {'image/encoded': parsing_ops.FixedLenFeature(
          [1], dtype=tf.string, default_value=''),
       'image/text': parsing_ops.FixedLenFeature(
           [1], dtype=tf.string, default_value=''),
       'image/class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/unpadded_class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/height': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1),
       'image/width': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1)})
  if using_ctc:
    labels = features['image/unpadded_class']
  else:
    labels = features['image/class']
  labels = tf.serialize_sparse(labels)
  image = tf.reshape(features['image/encoded'], shape=[], name='encoded')
  image = _ImageProcessing(image, shape)
  height = tf.reshape(features['image/height'], [-1])
  width = tf.reshape(features['image/width'], [-1])
  text = tf.reshape(features['image/text'], shape=[])

  return image, height, width, labels, text 
Example #18
Source File: vgsl_input.py    From Gun-Detector with Apache License 2.0 4 votes vote down vote up
def _ReadExamples(filename_queue, shape, using_ctc, reader=None):
  """Builds network input tensor ops for TF Example.

  Args:
    filename_queue: Queue of filenames, from tf.train.string_input_producer
    shape:          ImageShape with the desired shape of the input.
    using_ctc:      Take the unpadded_class labels instead of padded.
    reader:         Function that returns an actual reader to read Examples from
      input files. If None, uses tf.TFRecordReader().
  Returns:
    image:   Float Tensor containing the input image scaled to [-1.28, 1.27].
    height:  Tensor int64 containing the height of the image.
    width:   Tensor int64 containing the width of the image.
    labels:  Serialized SparseTensor containing the int64 labels.
    text:    Tensor string of the utf8 truth text.
  """
  if reader:
    reader = reader()
  else:
    reader = tf.TFRecordReader()
  _, example_serialized = reader.read(filename_queue)
  example_serialized = tf.reshape(example_serialized, shape=[])
  features = tf.parse_single_example(
      example_serialized,
      {'image/encoded': parsing_ops.FixedLenFeature(
          [1], dtype=tf.string, default_value=''),
       'image/text': parsing_ops.FixedLenFeature(
           [1], dtype=tf.string, default_value=''),
       'image/class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/unpadded_class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/height': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1),
       'image/width': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1)})
  if using_ctc:
    labels = features['image/unpadded_class']
  else:
    labels = features['image/class']
  labels = tf.serialize_sparse(labels)
  image = tf.reshape(features['image/encoded'], shape=[], name='encoded')
  image = _ImageProcessing(image, shape)
  height = tf.reshape(features['image/height'], [-1])
  width = tf.reshape(features['image/width'], [-1])
  text = tf.reshape(features['image/text'], shape=[])

  return image, height, width, labels, text 
Example #19
Source File: vgsl_input.py    From yolo_v2 with Apache License 2.0 4 votes vote down vote up
def _ReadExamples(filename_queue, shape, using_ctc, reader=None):
  """Builds network input tensor ops for TF Example.

  Args:
    filename_queue: Queue of filenames, from tf.train.string_input_producer
    shape:          ImageShape with the desired shape of the input.
    using_ctc:      Take the unpadded_class labels instead of padded.
    reader:         Function that returns an actual reader to read Examples from
      input files. If None, uses tf.TFRecordReader().
  Returns:
    image:   Float Tensor containing the input image scaled to [-1.28, 1.27].
    height:  Tensor int64 containing the height of the image.
    width:   Tensor int64 containing the width of the image.
    labels:  Serialized SparseTensor containing the int64 labels.
    text:    Tensor string of the utf8 truth text.
  """
  if reader:
    reader = reader()
  else:
    reader = tf.TFRecordReader()
  _, example_serialized = reader.read(filename_queue)
  example_serialized = tf.reshape(example_serialized, shape=[])
  features = tf.parse_single_example(
      example_serialized,
      {'image/encoded': parsing_ops.FixedLenFeature(
          [1], dtype=tf.string, default_value=''),
       'image/text': parsing_ops.FixedLenFeature(
           [1], dtype=tf.string, default_value=''),
       'image/class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/unpadded_class': parsing_ops.VarLenFeature(dtype=tf.int64),
       'image/height': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1),
       'image/width': parsing_ops.FixedLenFeature(
           [1], dtype=tf.int64, default_value=1)})
  if using_ctc:
    labels = features['image/unpadded_class']
  else:
    labels = features['image/class']
  labels = tf.serialize_sparse(labels)
  image = tf.reshape(features['image/encoded'], shape=[], name='encoded')
  image = _ImageProcessing(image, shape)
  height = tf.reshape(features['image/height'], [-1])
  width = tf.reshape(features['image/width'], [-1])
  text = tf.reshape(features['image/text'], shape=[])

  return image, height, width, labels, text