Python tensorflow.python.ops.data_flow_ops.FIFOQueue() Examples

The following are 30 code examples of tensorflow.python.ops.data_flow_ops.FIFOQueue(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.data_flow_ops , or try the search function .
Example #1
Source File: data_from_fnames.py    From neuralgym with MIT License 6 votes vote down vote up
def create_queue(self, shared_name=None, name=None):
        from tensorflow.python.ops import data_flow_ops, logging_ops, math_ops
        from tensorflow.python.framework import dtypes
        assert self.dtypes is not None and self.shapes is not None
        assert len(self.dtypes) == len(self.shapes)
        capacity = self.queue_size
        self._queue = data_flow_ops.FIFOQueue(
            capacity=capacity,
            dtypes=self.dtypes,
            shapes=self.shapes,
            shared_name=shared_name,
            name=name)

        enq = self._queue.enqueue_many(self.batch_phs)
        # create a queue runner
        queue_runner.add_queue_runner(queue_runner.QueueRunner(
            self._queue, [enq]*self.nthreads,
            feed_dict_op=[lambda: self.next_batch()],
            feed_dict_key=self.batch_phs))
        # summary_name = 'fraction_of_%d_full' % capacity
        # logging_ops.scalar_summary("queue/%s/%s" % (
            # self._queue.name, summary_name), math_ops.cast(
                # self._queue.size(), dtypes.float32) * (1. / capacity)) 
Example #2
Source File: sequence_queueing_state_saver.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def close(self, cancel_pending_enqueues=False, name=None):
    """Closes the barrier and the FIFOQueue.

    This operation signals that no more segments of new sequences will be
    enqueued. New segments of already inserted sequences may still be enqueued
    and dequeued if there is a sufficient number filling a batch or
    allow_small_batch is true. Otherwise dequeue operations will fail
    immediately.

    Args:
      cancel_pending_enqueues: (Optional.) A boolean, defaulting to
        `False`. If `True`, all pending enqueues to the underlying queues will
        be cancelled, and completing already started sequences is not possible.
      name: Optional name for the op.

    Returns:
      The operation that closes the barrier and the FIFOQueue.
    """
    with ops.name_scope(name, "SQSSClose", [self._prefetch_op]) as name:
      barrier_close = self.barrier.close(
          cancel_pending_enqueues, "BarrierClose")
      fifo_queue_close = self._capacity_queue.close(
          cancel_pending_enqueues, "FIFOClose")
      return control_flow_ops.group(barrier_close, fifo_queue_close, name=name) 
Example #3
Source File: sequence_queueing_state_saver.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def close(self, cancel_pending_enqueues=False, name=None):
    """Closes the barrier and the FIFOQueue.

    This operation signals that no more segments of new sequences will be
    enqueued. New segments of already inserted sequences may still be enqueued
    and dequeued if there is a sufficient number filling a batch or
    allow_small_batch is true. Otherwise dequeue operations will fail
    immediately.

    Args:
      cancel_pending_enqueues: (Optional.) A boolean, defaulting to
        `False`. If `True`, all pending enqueues to the underlying queues will
        be cancelled, and completing already started sequences is not possible.
      name: Optional name for the op.

    Returns:
      The operation that closes the barrier and the FIFOQueue.
    """
    with ops.name_scope(name, "SQSSClose", [self._prefetch_op]) as name:
      barrier_close = self.barrier.close(cancel_pending_enqueues,
                                         "BarrierClose")
      fifo_queue_close = self._capacity_queue.close(cancel_pending_enqueues,
                                                    "FIFOClose")
      return control_flow_ops.group(barrier_close, fifo_queue_close, name=name) 
Example #4
Source File: feeder.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def __init__(
      self, dtypes, shapes=None, capacity=10, shared_name='feeding_queue'):
    self._dtypes = dtypes
    self._shapes = shapes
    self._shared_name = shared_name
    self._capacity = capacity
    self._local_q = data_flow_ops.FIFOQueue(capacity=self._capacity,
                                            dtypes=self._dtypes,
                                            shapes=self._shapes,
                                            name=self._shared_name,
                                            shared_name=self._shared_name)
    self._num_remote_feeds = 0

    # Fake do-nothing operation that's used to prevent remote queues
    # from being closed, and as a workaround for b/32749157
    self._fake_op = array_ops.constant('dummy close', name='feeder_fake_op').op
    self._feeding_event = threading.Event() 
Example #5
Source File: sequence_queueing_state_saver.py    From lambda-packs with MIT License 6 votes vote down vote up
def close(self, cancel_pending_enqueues=False, name=None):
    """Closes the barrier and the FIFOQueue.

    This operation signals that no more segments of new sequences will be
    enqueued. New segments of already inserted sequences may still be enqueued
    and dequeued if there is a sufficient number filling a batch or
    allow_small_batch is true. Otherwise dequeue operations will fail
    immediately.

    Args:
      cancel_pending_enqueues: (Optional.) A boolean, defaulting to
        `False`. If `True`, all pending enqueues to the underlying queues will
        be cancelled, and completing already started sequences is not possible.
      name: Optional name for the op.

    Returns:
      The operation that closes the barrier and the FIFOQueue.
    """
    with ops.name_scope(name, "SQSSClose", [self._prefetch_op]) as name:
      barrier_close = self.barrier.close(cancel_pending_enqueues,
                                         "BarrierClose")
      fifo_queue_close = self._capacity_queue.close(cancel_pending_enqueues,
                                                    "FIFOClose")
      return control_flow_ops.group(barrier_close, fifo_queue_close, name=name) 
Example #6
Source File: feeder.py    From lambda-packs with MIT License 6 votes vote down vote up
def __init__(
      self, dtypes, shapes=None, capacity=10, shared_name='feeding_queue'):
    self._dtypes = dtypes
    self._shapes = shapes
    self._shared_name = shared_name
    self._capacity = capacity
    self._local_q = data_flow_ops.FIFOQueue(capacity=self._capacity,
                                            dtypes=self._dtypes,
                                            shapes=self._shapes,
                                            name=self._shared_name,
                                            shared_name=self._shared_name)
    self._num_remote_feeds = 0

    # Fake do-nothing operation that's used to prevent remote queues
    # from being closed, and as a workaround for b/32749157
    self._fake_op = array_ops.constant('dummy close', name='feeder_fake_op').op
    self._feeding_event = threading.Event() 
Example #7
Source File: session_debug_testlib.py    From lambda-packs with MIT License 6 votes vote down vote up
def testDebugQueueOpsDoesNotoErrorOut(self):
    with session.Session() as sess:
      q = data_flow_ops.FIFOQueue(3, "float", name="fifo_queue")
      q_init = q.enqueue_many(([101.0, 202.0, 303.0],), name="enqueue_many")

      run_metadata = config_pb2.RunMetadata()
      run_options = config_pb2.RunOptions(output_partition_graphs=True)
      debug_utils.watch_graph(
          run_options,
          sess.graph,
          debug_urls=self._debug_urls())

      sess.run(q_init, options=run_options, run_metadata=run_metadata)

      dump = debug_data.DebugDumpDir(
          self._dump_root, partition_graphs=run_metadata.partition_graphs)
      self.assertTrue(dump.loaded_partition_graphs())

      fifo_queue_tensor = dump.get_tensors("fifo_queue", 0, "DebugIdentity")[0]
      self.assertIsInstance(fifo_queue_tensor,
                            debug_data.InconvertibleTensorProto)
      self.assertTrue(fifo_queue_tensor.initialized)
      self.assertAllClose(
          [101.0, 202.0, 303.0],
          dump.get_tensors("enqueue_many/component_0", 0, "DebugIdentity")[0]) 
Example #8
Source File: feeder.py    From keras-lambda with MIT License 6 votes vote down vote up
def __init__(
      self, dtypes, shapes=None, capacity=10, shared_name='feeding_queue'):
    self._dtypes = dtypes
    self._shapes = shapes
    self._shared_name = shared_name
    self._capacity = capacity
    self._local_q = data_flow_ops.FIFOQueue(capacity=self._capacity,
                                            dtypes=self._dtypes,
                                            shapes=self._shapes,
                                            name=self._shared_name,
                                            shared_name=self._shared_name)
    self._num_remote_feeds = 0

    # Fake do-nothing operation that's used to prevent remote queues
    # from being closed, and as a workaround for b/32749157
    self._fake_op = array_ops.constant('dummy close', name='feeder_fake_op').op
    self._feeding_event = threading.Event() 
Example #9
Source File: sequence_queueing_state_saver.py    From keras-lambda with MIT License 6 votes vote down vote up
def close(self, cancel_pending_enqueues=False, name=None):
    """Closes the barrier and the FIFOQueue.

    This operation signals that no more segments of new sequences will be
    enqueued. New segments of already inserted sequences may still be enqueued
    and dequeued if there is a sufficient number filling a batch or
    allow_small_batch is true. Otherwise dequeue operations will fail
    immediately.

    Args:
      cancel_pending_enqueues: (Optional.) A boolean, defaulting to
        `False`. If `True`, all pending enqueues to the underlying queues will
        be cancelled, and completing already started sequences is not possible.
      name: Optional name for the op.

    Returns:
      The operation that closes the barrier and the FIFOQueue.
    """
    with ops.name_scope(name, "SQSSClose", [self._prefetch_op]) as name:
      barrier_close = self.barrier.close(cancel_pending_enqueues,
                                         "BarrierClose")
      fifo_queue_close = self._capacity_queue.close(cancel_pending_enqueues,
                                                    "FIFOClose")
      return control_flow_ops.group(barrier_close, fifo_queue_close, name=name) 
Example #10
Source File: validate_on_lfw.py    From facenet-demo with MIT License 5 votes vote down vote up
def main(args):
  
    with tf.Graph().as_default():
      
        with tf.Session() as sess:
            
            # Read the file containing the pairs used for testing
            pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))

            # Get the paths for the corresponding images
            paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)
            
            image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
            labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
            batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
            control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
            phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
 
            nrof_preprocess_threads = 4
            image_size = (args.image_size, args.image_size)
            eval_input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
                                        dtypes=[tf.string, tf.int32, tf.int32],
                                        shapes=[(1,), (1,), (1,)],
                                        shared_name=None, name=None)
            eval_enqueue_op = eval_input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='eval_enqueue_op')
            image_batch, label_batch = facenet.create_input_pipeline(eval_input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder)
     
            # Load the model
            input_map = {'image_batch': image_batch, 'label_batch': label_batch, 'phase_train': phase_train_placeholder}
            facenet.load_model(args.model, input_map=input_map)

            # Get output tensor
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
#              
            coord = tf.train.Coordinator()
            tf.train.start_queue_runners(coord=coord, sess=sess)

            evaluate(sess, eval_enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder,
                embeddings, label_batch, paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, args.distance_metric, args.subtract_mean,
                args.use_flipped_images, args.use_fixed_image_standardization) 
Example #11
Source File: input.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _which_queue(dynamic_pad):
  return (data_flow_ops.PaddingFIFOQueue if dynamic_pad
          else data_flow_ops.FIFOQueue)


# Batching functions ---------------------------------------------------------- 
Example #12
Source File: validate_on_lfw.py    From facenet with MIT License 5 votes vote down vote up
def main(args):
  
    with tf.Graph().as_default():
      
        with tf.Session() as sess:
            
            # Read the file containing the pairs used for testing
            pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))

            # Get the paths for the corresponding images
            paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)
            
            image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
            labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
            batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
            control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
            phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
 
            nrof_preprocess_threads = 4
            image_size = (args.image_size, args.image_size)
            eval_input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
                                        dtypes=[tf.string, tf.int32, tf.int32],
                                        shapes=[(1,), (1,), (1,)],
                                        shared_name=None, name=None)
            eval_enqueue_op = eval_input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='eval_enqueue_op')
            image_batch, label_batch = facenet.create_input_pipeline(eval_input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder)
     
            # Load the model
            input_map = {'image_batch': image_batch, 'label_batch': label_batch, 'phase_train': phase_train_placeholder}
            facenet.load_model(args.model, input_map=input_map)

            # Get output tensor
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
#              
            coord = tf.train.Coordinator()
            tf.train.start_queue_runners(coord=coord, sess=sess)

            evaluate(sess, eval_enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder,
                embeddings, label_batch, paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, args.distance_metric, args.subtract_mean,
                args.use_flipped_images, args.use_fixed_image_standardization) 
Example #13
Source File: session_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testTimeoutWithShortOperations(self):
    num_epochs = 5
    q = data_flow_ops.FIFOQueue(
        capacity=50, dtypes=[dtypes.int32], shapes=[()])
    enqueue_op = q.enqueue_many(constant_op.constant([1, 2]))

    # Use a 10-second timeout, which should be longer than any
    # non-blocking enqueue_many op.
    config = config_pb2.ConfigProto(operation_timeout_in_ms=10000)
    with session.Session(config=config) as sess:
      for _ in range(num_epochs):
        sess.run(enqueue_op)
      self.assertEqual(sess.run(q.size()), num_epochs * 2) 
Example #14
Source File: prefetch_queue.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def _which_queue(dynamic_pad):
  return (data_flow_ops.PaddingFIFOQueue
          if dynamic_pad else data_flow_ops.FIFOQueue) 
Example #15
Source File: session_debug_testlib.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def testDebugQueueOpsDoesNotoErrorOut(self):
    with session.Session() as sess:
      q = data_flow_ops.FIFOQueue(3, "float", name="fifo_queue")
      q_init = q.enqueue_many(([101.0, 202.0, 303.0],), name="enqueue_many")

      _, dump = self._debug_run_and_get_dump(sess, q_init)
      self.assertTrue(dump.loaded_partition_graphs())

      fifo_queue_tensor = dump.get_tensors("fifo_queue", 0, "DebugIdentity")[0]
      self.assertIsInstance(fifo_queue_tensor,
                            debug_data.InconvertibleTensorProto)
      self.assertTrue(fifo_queue_tensor.initialized)
      self.assertAllClose(
          [101.0, 202.0, 303.0],
          dump.get_tensors("enqueue_many/component_0", 0, "DebugIdentity")[0]) 
Example #16
Source File: parallel_reader_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testReadUpToFromFIFOQueue(self):
    shared_queue = data_flow_ops.FIFOQueue(
        capacity=99,
        dtypes=[dtypes_lib.string, dtypes_lib.string],
        shapes=[[], []])
    self._verify_read_up_to_out(shared_queue) 
Example #17
Source File: parallel_reader_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testFIFOSharedQueue(self):
    shared_queue = data_flow_ops.FIFOQueue(
        capacity=256, dtypes=[dtypes_lib.string, dtypes_lib.string])
    self._verify_all_data_sources_read(shared_queue) 
Example #18
Source File: validate_on_lfw.py    From tindetheus with MIT License 5 votes vote down vote up
def main(args):
  
    with tf.Graph().as_default():
      
        with tf.Session() as sess:
            
            # Read the file containing the pairs used for testing
            pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))

            # Get the paths for the corresponding images
            paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)
            
            image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
            labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
            batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
            control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
            phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
 
            nrof_preprocess_threads = 4
            image_size = (args.image_size, args.image_size)
            eval_input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
                                        dtypes=[tf.string, tf.int32, tf.int32],
                                        shapes=[(1,), (1,), (1,)],
                                        shared_name=None, name=None)
            eval_enqueue_op = eval_input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='eval_enqueue_op')
            image_batch, label_batch = facenet.create_input_pipeline(eval_input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder)
     
            # Load the model
            input_map = {'image_batch': image_batch, 'label_batch': label_batch, 'phase_train': phase_train_placeholder}
            facenet.load_model(args.model, input_map=input_map)

            # Get output tensor
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
#              
            coord = tf.train.Coordinator()
            tf.train.start_queue_runners(coord=coord, sess=sess)

            evaluate(sess, eval_enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder,
                embeddings, label_batch, paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, args.distance_metric, args.subtract_mean,
                args.use_flipped_images, args.use_fixed_image_standardization) 
Example #19
Source File: input.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _which_queue(dynamic_pad):
  return (data_flow_ops.PaddingFIFOQueue if dynamic_pad
          else data_flow_ops.FIFOQueue) 
Example #20
Source File: validate_on_lfw.py    From uai-sdk with Apache License 2.0 5 votes vote down vote up
def main(args):
  
    with tf.Graph().as_default():
      
        with tf.Session() as sess:
            
            # Read the file containing the pairs used for testing
            pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))

            # Get the paths for the corresponding images
            paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)
            
            image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
            labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
            batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
            control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
            phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
 
            nrof_preprocess_threads = 4
            image_size = (args.image_size, args.image_size)
            eval_input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
                                        dtypes=[tf.string, tf.int32, tf.int32],
                                        shapes=[(1,), (1,), (1,)],
                                        shared_name=None, name=None)
            eval_enqueue_op = eval_input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='eval_enqueue_op')
            image_batch, label_batch = facenet.create_input_pipeline(eval_input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder)
     
            # Load the model
            input_map = {'image_batch': image_batch, 'label_batch': label_batch, 'phase_train': phase_train_placeholder}
            facenet.load_model(args.model, input_map=input_map)

            # Get output tensor
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
#              
            coord = tf.train.Coordinator()
            tf.train.start_queue_runners(coord=coord, sess=sess)

            evaluate(sess, eval_enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder,
                embeddings, label_batch, paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, args.distance_metric, args.subtract_mean,
                args.use_flipped_images, args.use_fixed_image_standardization) 
Example #21
Source File: input.py    From keras-lambda with MIT License 5 votes vote down vote up
def _which_queue(dynamic_pad):
  return (data_flow_ops.PaddingFIFOQueue if dynamic_pad
          else data_flow_ops.FIFOQueue) 
Example #22
Source File: parallel_reader_test.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def testFIFOSharedQueue(self):
    shared_queue = data_flow_ops.FIFOQueue(
        capacity=256, dtypes=[dtypes_lib.string, dtypes_lib.string])
    self._verify_all_data_sources_read(shared_queue) 
Example #23
Source File: input.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _which_queue(dynamic_pad):
  return (data_flow_ops.PaddingFIFOQueue if dynamic_pad
          else data_flow_ops.FIFOQueue) 
Example #24
Source File: parallel_reader_test.py    From keras-lambda with MIT License 5 votes vote down vote up
def testFIFOSharedQueue(self):
    shared_queue = data_flow_ops.FIFOQueue(
        capacity=256, dtypes=[dtypes_lib.string, dtypes_lib.string])
    self._verify_all_data_sources_read(shared_queue) 
Example #25
Source File: validate_on_lfw.py    From TNT with GNU General Public License v3.0 5 votes vote down vote up
def main(args):
  
    with tf.Graph().as_default():
      
        with tf.Session() as sess:
            
            # Read the file containing the pairs used for testing
            pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))

            # Get the paths for the corresponding images
            paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)
            
            image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
            labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
            batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
            control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
            phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
 
            nrof_preprocess_threads = 4
            image_size = (args.image_size, args.image_size)
            eval_input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
                                        dtypes=[tf.string, tf.int32, tf.int32],
                                        shapes=[(1,), (1,), (1,)],
                                        shared_name=None, name=None)
            eval_enqueue_op = eval_input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='eval_enqueue_op')
            image_batch, label_batch = facenet.create_input_pipeline(eval_input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder)
     
            # Load the model
            input_map = {'image_batch': image_batch, 'label_batch': label_batch, 'phase_train': phase_train_placeholder}
            facenet.load_model(args.model, input_map=input_map)

            # Get output tensor
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
#              
            coord = tf.train.Coordinator()
            tf.train.start_queue_runners(coord=coord, sess=sess)

            evaluate(sess, eval_enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder,
                embeddings, label_batch, paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, args.distance_metric, args.subtract_mean,
                args.use_flipped_images, args.use_fixed_image_standardization) 
Example #26
Source File: validate_on_lfw.py    From facenet_mtcnn_to_mobile with MIT License 5 votes vote down vote up
def main(args):
  
    with tf.Graph().as_default():
      
        with tf.Session() as sess:
            
            # Read the file containing the pairs used for testing
            pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))

            # Get the paths for the corresponding images
            paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)
            
            image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
            labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
            batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
            control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
            phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
 
            nrof_preprocess_threads = 4
            image_size = (args.image_size, args.image_size)
            eval_input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
                                        dtypes=[tf.string, tf.int32, tf.int32],
                                        shapes=[(1,), (1,), (1,)],
                                        shared_name=None, name=None)
            eval_enqueue_op = eval_input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='eval_enqueue_op')
            image_batch, label_batch = facenet.create_input_pipeline(eval_input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder)
     
            # Load the model
            input_map = {'image_batch': image_batch, 'label_batch': label_batch, 'phase_train': phase_train_placeholder}
            facenet.load_model(args.model, input_map=input_map)

            # Get output tensor
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
#              
            coord = tf.train.Coordinator()
            tf.train.start_queue_runners(coord=coord, sess=sess)

            evaluate(sess, eval_enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder,
                embeddings, label_batch, paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, args.distance_metric, args.subtract_mean,
                args.use_flipped_images, args.use_fixed_image_standardization) 
Example #27
Source File: input.py    From lambda-packs with MIT License 5 votes vote down vote up
def _which_queue(dynamic_pad):
  return (data_flow_ops.PaddingFIFOQueue if dynamic_pad
          else data_flow_ops.FIFOQueue) 
Example #28
Source File: tracklet_utils_3d_online.py    From TNT with GNU General Public License v3.0 4 votes vote down vote up
def init_triplet_model():
    global track_struct
    global triplet_graph
    global triplet_sess
    
    global eval_enqueue_op
    global image_paths_placeholder
    global labels_placeholder
    global phase_train_placeholder
    global batch_size_placeholder
    global control_placeholder
    global embeddings
    global label_batch
    global distance_metric
    f_image_size = 160 
    distance_metric = 0 

    triplet_graph = tf.Graph()
    with triplet_graph.as_default():
        image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
        labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
        control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        nrof_preprocess_threads = 4
        image_size = (f_image_size, f_image_size)
        eval_input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
                                    dtypes=[tf.string, tf.int32, tf.int32],
                                    shapes=[(1,), (1,), (1,)],
                                    shared_name=None, name=None)
        eval_enqueue_op = eval_input_queue.enqueue_many([image_paths_placeholder, 
                                                         labels_placeholder, control_placeholder], 
                                                        name='eval_enqueue_op')
        image_batch, label_batch = facenet.create_input_pipeline(eval_input_queue, image_size, 
                                                                 nrof_preprocess_threads, batch_size_placeholder)
    triplet_sess = tf.Session(graph=triplet_graph)   
    with triplet_sess.as_default():
        with triplet_graph.as_default():
            # Load the model
            input_map = {'image_batch': image_batch, 'label_batch': label_batch, 'phase_train': phase_train_placeholder}
            facenet.load_model(track_struct['file_path']['triplet_model'], input_map=input_map)
            
            # Get output tensor
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            coord = tf.train.Coordinator()
            tf.train.start_queue_runners(coord=coord, sess=triplet_sess)
    return 
Example #29
Source File: sequence_queueing_state_saver.py    From keras-lambda with MIT License 4 votes vote down vote up
def _prepare_barrier_reads(self):
    """Creates ops for reading the barrier, as used by properties like `length`.
    """
    # Ops for reading from the barrier.  These ops must be run in a
    # different thread than the prefetcher op to avoid blocking.
    received = self._barrier.take_many(
        self._batch_size, self._allow_small_batch, name="BarrierTakeMany")

    self._received_indices = received[0]
    self._received_keys = received[1]
    received_values = received[2]

    self._received_sequence = received_values[self._get_barrier_sequence_index(
    )]
    self._received_sequence_count = received_values[
        self._get_barrier_sequence_count_index()]
    self._received_next_key = received_values[self._get_barrier_next_key_index(
    )]
    self._received_length = received_values[self._get_barrier_length_index()]
    self._received_total_length = received_values[
        self._get_barrier_total_length_index()]
    self._received_context = collections.OrderedDict(
        (name, received_values[self._get_barrier_index("context", name)])
        for name in self._sorted_context.keys())
    self._received_sequences = collections.OrderedDict(
        (name, received_values[self._get_barrier_index("sequence", name)])
        for name in self._sorted_sequences.keys())

    self._received_batch_size = array_ops.squeeze(
        array_ops.shape(self._received_length))

    # Which examples are we done with?
    self._sequence_is_done = (
        self._received_sequence + 1 >= self._received_sequence_count)

    # Compute the number of finished sequences and dequeue as many tokens from
    # the capacity queue.
    finished_sequences = (math_ops.reduce_sum(
        math_ops.cast(self._sequence_is_done, dtypes.int32)))
    # TODO(ebrevdo): convert to dequeue_up_to when FIFOQueue supports it.
    dequeue_op = self._capacity_queue.dequeue_many(finished_sequences)

    # Tie the dequeue_op to the received_state, such that it is definitely
    # carried out.
    with ops.control_dependencies([dequeue_op]):
      self._received_states = collections.OrderedDict(
          (name, array_ops.identity(received_values[self._get_barrier_index(
              "state", name)])) for name in self._sorted_states.keys())
    self._next_batch = NextQueuedSequenceBatch(self) 
Example #30
Source File: parallel_reader.py    From keras-lambda with MIT License 4 votes vote down vote up
def __init__(self,
               reader_class,
               common_queue,
               num_readers=4,
               reader_kwargs=None):
    """ParallelReader creates num_readers instances of the reader_class.

    Each instance is created by calling the `reader_class` function passing
    the arguments specified in `reader_kwargs` as in:
      reader_class(**read_kwargs)

    When you read from a ParallelReader, with its `read()` method,
    you just dequeue examples from the `common_queue`.

    The readers will read different files in parallel, asynchronously enqueueing
    their output into `common_queue`. The `common_queue.dtypes` must be
    [tf.string, tf.string]

    Because each reader can read from a different file, the examples in the
    `common_queue` could be from different files. Due to the asynchronous
    reading there is no guarantee that all the readers will read the same
    number of examples.

    If the `common_queue` is a shuffling queue, then the examples are shuffled.

    Usage:
      common_queue = tf.RandomShuffleQueue(
          capacity=256,
          min_after_dequeue=128,
          dtypes=[tf.string, tf.string])
      p_reader = ParallelReader(tf.TFRecordReader, common_queue)

      common_queue = tf.FIFOQueue(
          capacity=256,
          dtypes=[tf.string, tf.string])
      p_reader = ParallelReader(readers, common_queue, num_readers=2)


    Args:
      reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader
      common_queue: a Queue to hold (key, value pairs) with `dtypes` equal to
        [tf.string, tf.string]. Must be one of the data_flow_ops.Queues
        instances, ex. `tf.FIFOQueue()`, `tf.RandomShuffleQueue()`, ...
      num_readers: a integer, number of instances of reader_class to create.
      reader_kwargs: an optional dict of kwargs to create the readers.

    Raises:
      TypeError: if `common_queue.dtypes` is not [tf.string, tf.string].
    """
    if len(common_queue.dtypes) != 2:
      raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')
    for dtype in common_queue.dtypes:
      if not dtype.is_compatible_with(tf_dtypes.string):
        raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')

    reader_kwargs = reader_kwargs or {}
    self._readers = [reader_class(**reader_kwargs) for _ in range(num_readers)]
    self._common_queue = common_queue