Python object_detection.utils.dataset_util.make_initializable_iterator() Examples

The following are 30 code examples of object_detection.utils.dataset_util.make_initializable_iterator(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module object_detection.utils.dataset_util , or try the search function .
Example #1
Source File: dataset_builder_test.py    From ros_tensorflow with Apache License 2.0 6 votes vote down vote up
def test_build_tf_record_input_reader_and_load_instance_masks(self):
    tf_record_path = self.create_tf_record()

    input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      load_instance_masks: true
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
    input_reader_proto = input_reader_pb2.InputReader()
    text_format.Merge(input_reader_text_proto, input_reader_proto)
    tensor_dict = dataset_util.make_initializable_iterator(
        dataset_builder.build(input_reader_proto, batch_size=1)).get_next()

    sv = tf.train.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)
      output_dict = sess.run(tensor_dict)
    self.assertAllEqual(
        (1, 1, 4, 5),
        output_dict[fields.InputDataFields.groundtruth_instance_masks].shape) 
Example #2
Source File: dataset_builder_test.py    From ros_people_object_detection_tensorflow with Apache License 2.0 6 votes vote down vote up
def test_build_tf_record_input_reader_and_load_instance_masks(self):
    tf_record_path = self.create_tf_record()

    input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      load_instance_masks: true
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
    input_reader_proto = input_reader_pb2.InputReader()
    text_format.Merge(input_reader_text_proto, input_reader_proto)
    tensor_dict = dataset_util.make_initializable_iterator(
        dataset_builder.build(input_reader_proto, batch_size=1)).get_next()

    sv = tf.train.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)
      output_dict = sess.run(tensor_dict)
    self.assertAllEqual(
        (1, 1, 4, 5),
        output_dict[fields.InputDataFields.groundtruth_instance_masks].shape) 
Example #3
Source File: dataset_builder_test.py    From AniSeg with Apache License 2.0 6 votes vote down vote up
def test_build_tf_record_input_reader_and_load_instance_masks(self):
    tf_record_path = self.create_tf_record()

    input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      load_instance_masks: true
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
    input_reader_proto = input_reader_pb2.InputReader()
    text_format.Merge(input_reader_text_proto, input_reader_proto)
    tensor_dict = dataset_util.make_initializable_iterator(
        dataset_builder.build(input_reader_proto)).get_next()

    sv = tf.train.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)
      output_dict = sess.run(tensor_dict)
    self.assertAllEqual(
        (1, 4, 5),
        output_dict[fields.InputDataFields.groundtruth_instance_masks].shape) 
Example #4
Source File: dataset_builder_test.py    From Person-Detection-and-Tracking with MIT License 6 votes vote down vote up
def test_build_tf_record_input_reader_and_load_instance_masks(self):
    tf_record_path = self.create_tf_record()

    input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      load_instance_masks: true
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
    input_reader_proto = input_reader_pb2.InputReader()
    text_format.Merge(input_reader_text_proto, input_reader_proto)
    tensor_dict = dataset_util.make_initializable_iterator(
        dataset_builder.build(input_reader_proto, batch_size=1)).get_next()

    sv = tf.train.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)
      output_dict = sess.run(tensor_dict)
    self.assertAllEqual(
        (1, 1, 4, 5),
        output_dict[fields.InputDataFields.groundtruth_instance_masks].shape) 
Example #5
Source File: dataset_builder_test.py    From Elphas with Apache License 2.0 6 votes vote down vote up
def test_build_tf_record_input_reader_and_load_instance_masks(self):
    tf_record_path = self.create_tf_record()

    input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      load_instance_masks: true
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
    input_reader_proto = input_reader_pb2.InputReader()
    text_format.Merge(input_reader_text_proto, input_reader_proto)
    tensor_dict = dataset_util.make_initializable_iterator(
        dataset_builder.build(input_reader_proto)).get_next()

    sv = tf.train.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)
      output_dict = sess.run(tensor_dict)
    self.assertAllEqual(
        (1, 4, 5),
        output_dict[fields.InputDataFields.groundtruth_instance_masks].shape) 
Example #6
Source File: dataset_builder_test.py    From Person-Detection-and-Tracking with MIT License 6 votes vote down vote up
def test_build_tf_record_input_reader_with_additional_channels(self):
    tf_record_path = self.create_tf_record(has_additional_channels=True)

    input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
    input_reader_proto = input_reader_pb2.InputReader()
    text_format.Merge(input_reader_text_proto, input_reader_proto)
    tensor_dict = dataset_util.make_initializable_iterator(
        dataset_builder.build(
            input_reader_proto, batch_size=2,
            num_additional_channels=2)).get_next()

    sv = tf.train.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)
      output_dict = sess.run(tensor_dict)

    self.assertEquals((2, 4, 5, 5),
                      output_dict[fields.InputDataFields.image].shape) 
Example #7
Source File: dataset_builder_test.py    From Gun-Detector with Apache License 2.0 6 votes vote down vote up
def test_build_tf_record_input_reader_and_load_instance_masks(self):
    tf_record_path = self.create_tf_record()

    input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      load_instance_masks: true
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
    input_reader_proto = input_reader_pb2.InputReader()
    text_format.Merge(input_reader_text_proto, input_reader_proto)
    tensor_dict = dataset_util.make_initializable_iterator(
        dataset_builder.build(input_reader_proto, batch_size=1)).get_next()

    sv = tf.train.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)
      output_dict = sess.run(tensor_dict)
    self.assertAllEqual(
        (1, 1, 4, 5),
        output_dict[fields.InputDataFields.groundtruth_instance_masks].shape) 
Example #8
Source File: dataset_builder_test.py    From Traffic-Rule-Violation-Detection-System with MIT License 6 votes vote down vote up
def test_build_tf_record_input_reader_and_load_instance_masks(self):
    tf_record_path = self.create_tf_record()

    input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      load_instance_masks: true
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
    input_reader_proto = input_reader_pb2.InputReader()
    text_format.Merge(input_reader_text_proto, input_reader_proto)
    tensor_dict = dataset_util.make_initializable_iterator(
        dataset_builder.build(input_reader_proto)).get_next()

    sv = tf.train.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)
      output_dict = sess.run(tensor_dict)
    self.assertAllEqual(
        (1, 4, 5),
        output_dict[fields.InputDataFields.groundtruth_instance_masks].shape) 
Example #9
Source File: dataset_builder_test.py    From Traffic-Rule-Violation-Detection-System with MIT License 5 votes vote down vote up
def test_build_tf_record_input_reader(self):
    tf_record_path = self.create_tf_record()

    input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
    input_reader_proto = input_reader_pb2.InputReader()
    text_format.Merge(input_reader_text_proto, input_reader_proto)
    tensor_dict = dataset_util.make_initializable_iterator(
        dataset_builder.build(input_reader_proto)).get_next()

    sv = tf.train.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)
      output_dict = sess.run(tensor_dict)

    self.assertTrue(
        fields.InputDataFields.groundtruth_instance_masks not in output_dict)
    self.assertEquals((4, 5, 3),
                      output_dict[fields.InputDataFields.image].shape)
    self.assertEquals([2],
                      output_dict[fields.InputDataFields.groundtruth_classes])
    self.assertEquals(
        (1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
    self.assertAllEqual(
        [0.0, 0.0, 1.0, 1.0],
        output_dict[fields.InputDataFields.groundtruth_boxes][0]) 
Example #10
Source File: dataset_util_test.py    From Elphas with Apache License 2.0 5 votes vote down vote up
def test_make_initializable_iterator_with_hashTable(self):
    keys = [1, 0, -1]
    dataset = tf.data.Dataset.from_tensor_slices([[1, 2, -1, 5]])
    table = tf.contrib.lookup.HashTable(
        initializer=tf.contrib.lookup.KeyValueTensorInitializer(
            keys=keys,
            values=list(reversed(keys))),
        default_value=100)
    dataset = dataset.map(table.lookup)
    data = dataset_util.make_initializable_iterator(dataset).get_next()
    init = tf.tables_initializer()

    with self.test_session() as sess:
      sess.run(init)
      self.assertAllEqual(sess.run(data), [-1, 100, 1, 100]) 
Example #11
Source File: dataset_builder_test.py    From ros_tensorflow with Apache License 2.0 5 votes vote down vote up
def test_build_tf_record_input_reader(self):
    tf_record_path = self.create_tf_record()

    input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
    input_reader_proto = input_reader_pb2.InputReader()
    text_format.Merge(input_reader_text_proto, input_reader_proto)
    tensor_dict = dataset_util.make_initializable_iterator(
        dataset_builder.build(input_reader_proto, batch_size=1)).get_next()

    sv = tf.train.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)
      output_dict = sess.run(tensor_dict)

    self.assertTrue(
        fields.InputDataFields.groundtruth_instance_masks not in output_dict)
    self.assertEquals((1, 4, 5, 3),
                      output_dict[fields.InputDataFields.image].shape)
    self.assertAllEqual([[2]],
                        output_dict[fields.InputDataFields.groundtruth_classes])
    self.assertEquals(
        (1, 1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
    self.assertAllEqual(
        [0.0, 0.0, 1.0, 1.0],
        output_dict[fields.InputDataFields.groundtruth_boxes][0][0]) 
Example #12
Source File: dataset_builder_test.py    From Elphas with Apache License 2.0 5 votes vote down vote up
def test_build_tf_record_input_reader(self):
    tf_record_path = self.create_tf_record()

    input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
    input_reader_proto = input_reader_pb2.InputReader()
    text_format.Merge(input_reader_text_proto, input_reader_proto)
    tensor_dict = dataset_util.make_initializable_iterator(
        dataset_builder.build(input_reader_proto)).get_next()

    sv = tf.train.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)
      output_dict = sess.run(tensor_dict)

    self.assertTrue(
        fields.InputDataFields.groundtruth_instance_masks not in output_dict)
    self.assertEquals((4, 5, 3),
                      output_dict[fields.InputDataFields.image].shape)
    self.assertEquals([2],
                      output_dict[fields.InputDataFields.groundtruth_classes])
    self.assertEquals(
        (1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
    self.assertAllEqual(
        [0.0, 0.0, 1.0, 1.0],
        output_dict[fields.InputDataFields.groundtruth_boxes][0]) 
Example #13
Source File: dataset_util_test.py    From ros_tensorflow with Apache License 2.0 5 votes vote down vote up
def test_make_initializable_iterator_with_hashTable(self):
    keys = [1, 0, -1]
    dataset = tf.data.Dataset.from_tensor_slices([[1, 2, -1, 5]])
    table = tf.contrib.lookup.HashTable(
        initializer=tf.contrib.lookup.KeyValueTensorInitializer(
            keys=keys,
            values=list(reversed(keys))),
        default_value=100)
    dataset = dataset.map(table.lookup)
    data = dataset_util.make_initializable_iterator(dataset).get_next()
    init = tf.tables_initializer()

    with self.test_session() as sess:
      sess.run(init)
      self.assertAllEqual(sess.run(data), [-1, 100, 1, 100]) 
Example #14
Source File: dataset_builder_test.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def test_build_tf_record_input_reader(self):
    tf_record_path = self.create_tf_record()

    input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
    input_reader_proto = input_reader_pb2.InputReader()
    text_format.Merge(input_reader_text_proto, input_reader_proto)
    tensor_dict = dataset_util.make_initializable_iterator(
        dataset_builder.build(input_reader_proto, batch_size=1)).get_next()

    sv = tf.train.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)
      output_dict = sess.run(tensor_dict)

    self.assertTrue(
        fields.InputDataFields.groundtruth_instance_masks not in output_dict)
    self.assertEquals((1, 4, 5, 3),
                      output_dict[fields.InputDataFields.image].shape)
    self.assertAllEqual([[2]],
                        output_dict[fields.InputDataFields.groundtruth_classes])
    self.assertEquals(
        (1, 1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
    self.assertAllEqual(
        [0.0, 0.0, 1.0, 1.0],
        output_dict[fields.InputDataFields.groundtruth_boxes][0][0]) 
Example #15
Source File: dataset_util_test.py    From ros_people_object_detection_tensorflow with Apache License 2.0 5 votes vote down vote up
def test_make_initializable_iterator_with_hashTable(self):
    keys = [1, 0, -1]
    dataset = tf.data.Dataset.from_tensor_slices([[1, 2, -1, 5]])
    table = tf.contrib.lookup.HashTable(
        initializer=tf.contrib.lookup.KeyValueTensorInitializer(
            keys=keys,
            values=list(reversed(keys))),
        default_value=100)
    dataset = dataset.map(table.lookup)
    data = dataset_util.make_initializable_iterator(dataset).get_next()
    init = tf.tables_initializer()

    with self.test_session() as sess:
      sess.run(init)
      self.assertAllEqual(sess.run(data), [-1, 100, 1, 100]) 
Example #16
Source File: dataset_util_test.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def test_make_initializable_iterator_with_hashTable(self):
    keys = [1, 0, -1]
    dataset = tf.data.Dataset.from_tensor_slices([[1, 2, -1, 5]])
    table = tf.contrib.lookup.HashTable(
        initializer=tf.contrib.lookup.KeyValueTensorInitializer(
            keys=keys,
            values=list(reversed(keys))),
        default_value=100)
    dataset = dataset.map(table.lookup)
    data = dataset_util.make_initializable_iterator(dataset).get_next()
    init = tf.tables_initializer()

    with self.test_session() as sess:
      sess.run(init)
      self.assertAllEqual(sess.run(data), [-1, 100, 1, 100]) 
Example #17
Source File: dataset_util_test.py    From AniSeg with Apache License 2.0 5 votes vote down vote up
def test_make_initializable_iterator_with_hashTable(self):
    keys = [1, 0, -1]
    dataset = tf.data.Dataset.from_tensor_slices([[1, 2, -1, 5]])
    table = tf.contrib.lookup.HashTable(
        initializer=tf.contrib.lookup.KeyValueTensorInitializer(
            keys=keys,
            values=list(reversed(keys))),
        default_value=100)
    dataset = dataset.map(table.lookup)
    data = dataset_util.make_initializable_iterator(dataset).get_next()
    init = tf.tables_initializer()

    with self.test_session() as sess:
      sess.run(init)
      self.assertAllEqual(sess.run(data), [-1, 100, 1, 100]) 
Example #18
Source File: dataset_util_test.py    From Traffic-Rule-Violation-Detection-System with MIT License 5 votes vote down vote up
def test_make_initializable_iterator_with_hashTable(self):
    keys = [1, 0, -1]
    dataset = tf.data.Dataset.from_tensor_slices([[1, 2, -1, 5]])
    table = tf.contrib.lookup.HashTable(
        initializer=tf.contrib.lookup.KeyValueTensorInitializer(
            keys=keys,
            values=list(reversed(keys))),
        default_value=100)
    dataset = dataset.map(table.lookup)
    data = dataset_util.make_initializable_iterator(dataset).get_next()
    init = tf.tables_initializer()

    with self.test_session() as sess:
      sess.run(init)
      self.assertAllEqual(sess.run(data), [-1, 100, 1, 100]) 
Example #19
Source File: object_detection_trainer.py    From CVTron with Apache License 2.0 5 votes vote down vote up
def get_next(self, config):
        return dataset_util.make_initializable_iterator(
            dataset_builder.build(config)).get_next() 
Example #20
Source File: dataset_builder_test.py    From AniSeg with Apache License 2.0 5 votes vote down vote up
def test_build_tf_record_input_reader(self):
    tf_record_path = self.create_tf_record()

    input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
    input_reader_proto = input_reader_pb2.InputReader()
    text_format.Merge(input_reader_text_proto, input_reader_proto)
    tensor_dict = dataset_util.make_initializable_iterator(
        dataset_builder.build(input_reader_proto)).get_next()

    sv = tf.train.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)
      output_dict = sess.run(tensor_dict)

    self.assertTrue(
        fields.InputDataFields.groundtruth_instance_masks not in output_dict)
    self.assertEquals((4, 5, 3),
                      output_dict[fields.InputDataFields.image].shape)
    self.assertEquals([2],
                      output_dict[fields.InputDataFields.groundtruth_classes])
    self.assertEquals(
        (1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
    self.assertAllEqual(
        [0.0, 0.0, 1.0, 1.0],
        output_dict[fields.InputDataFields.groundtruth_boxes][0]) 
Example #21
Source File: dataset_builder_test.py    From Person-Detection-and-Tracking with MIT License 5 votes vote down vote up
def test_build_tf_record_input_reader(self):
    tf_record_path = self.create_tf_record()

    input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
    input_reader_proto = input_reader_pb2.InputReader()
    text_format.Merge(input_reader_text_proto, input_reader_proto)
    tensor_dict = dataset_util.make_initializable_iterator(
        dataset_builder.build(input_reader_proto, batch_size=1)).get_next()

    sv = tf.train.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)
      output_dict = sess.run(tensor_dict)

    self.assertTrue(
        fields.InputDataFields.groundtruth_instance_masks not in output_dict)
    self.assertEquals((1, 4, 5, 3),
                      output_dict[fields.InputDataFields.image].shape)
    self.assertAllEqual([[2]],
                        output_dict[fields.InputDataFields.groundtruth_classes])
    self.assertEquals(
        (1, 1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
    self.assertAllEqual(
        [0.0, 0.0, 1.0, 1.0],
        output_dict[fields.InputDataFields.groundtruth_boxes][0][0]) 
Example #22
Source File: dataset_util_test.py    From Person-Detection-and-Tracking with MIT License 5 votes vote down vote up
def test_make_initializable_iterator_with_hashTable(self):
    keys = [1, 0, -1]
    dataset = tf.data.Dataset.from_tensor_slices([[1, 2, -1, 5]])
    table = tf.contrib.lookup.HashTable(
        initializer=tf.contrib.lookup.KeyValueTensorInitializer(
            keys=keys,
            values=list(reversed(keys))),
        default_value=100)
    dataset = dataset.map(table.lookup)
    data = dataset_util.make_initializable_iterator(dataset).get_next()
    init = tf.tables_initializer()

    with self.test_session() as sess:
      sess.run(init)
      self.assertAllEqual(sess.run(data), [-1, 100, 1, 100]) 
Example #23
Source File: dataset_builder_test.py    From ros_people_object_detection_tensorflow with Apache License 2.0 5 votes vote down vote up
def test_build_tf_record_input_reader(self):
    tf_record_path = self.create_tf_record()

    input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
    input_reader_proto = input_reader_pb2.InputReader()
    text_format.Merge(input_reader_text_proto, input_reader_proto)
    tensor_dict = dataset_util.make_initializable_iterator(
        dataset_builder.build(input_reader_proto, batch_size=1)).get_next()

    sv = tf.train.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)
      output_dict = sess.run(tensor_dict)

    self.assertTrue(
        fields.InputDataFields.groundtruth_instance_masks not in output_dict)
    self.assertEquals((1, 4, 5, 3),
                      output_dict[fields.InputDataFields.image].shape)
    self.assertAllEqual([[2]],
                        output_dict[fields.InputDataFields.groundtruth_classes])
    self.assertEquals(
        (1, 1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
    self.assertAllEqual(
        [0.0, 0.0, 1.0, 1.0],
        output_dict[fields.InputDataFields.groundtruth_boxes][0][0]) 
Example #24
Source File: dataset_builder_test.py    From AniSeg with Apache License 2.0 5 votes vote down vote up
def test_build_tf_record_input_reader_with_batch_size_two_and_masks(self):
    tf_record_path = self.create_tf_record()

    input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      load_instance_masks: true
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
    input_reader_proto = input_reader_pb2.InputReader()
    text_format.Merge(input_reader_text_proto, input_reader_proto)

    def one_hot_class_encoding_fn(tensor_dict):
      tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot(
          tensor_dict[fields.InputDataFields.groundtruth_classes] - 1, depth=3)
      return tensor_dict

    tensor_dict = dataset_util.make_initializable_iterator(
        dataset_builder.build(
            input_reader_proto,
            transform_input_data_fn=one_hot_class_encoding_fn,
            batch_size=2,
            max_num_boxes=2,
            num_classes=3,
            spatial_image_shape=[4, 5])).get_next()

    sv = tf.train.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)
      output_dict = sess.run(tensor_dict)

    self.assertAllEqual(
        [2, 2, 4, 5],
        output_dict[fields.InputDataFields.groundtruth_instance_masks].shape) 
Example #25
Source File: dataset_builder_test.py    From Elphas with Apache License 2.0 4 votes vote down vote up
def test_build_tf_record_input_reader_with_batch_size_two(self):
    tf_record_path = self.create_tf_record()

    input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
    input_reader_proto = input_reader_pb2.InputReader()
    text_format.Merge(input_reader_text_proto, input_reader_proto)

    def one_hot_class_encoding_fn(tensor_dict):
      tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot(
          tensor_dict[fields.InputDataFields.groundtruth_classes] - 1, depth=3)
      return tensor_dict

    tensor_dict = dataset_util.make_initializable_iterator(
        dataset_builder.build(
            input_reader_proto,
            transform_input_data_fn=one_hot_class_encoding_fn,
            batch_size=2,
            max_num_boxes=2,
            num_classes=3,
            spatial_image_shape=[4, 5])).get_next()

    sv = tf.train.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)
      output_dict = sess.run(tensor_dict)

    self.assertAllEqual([2, 4, 5, 3],
                        output_dict[fields.InputDataFields.image].shape)
    self.assertAllEqual([2, 2, 3],
                        output_dict[fields.InputDataFields.groundtruth_classes].
                        shape)
    self.assertAllEqual([2, 2, 4],
                        output_dict[fields.InputDataFields.groundtruth_boxes].
                        shape)
    self.assertAllEqual(
        [[[0.0, 0.0, 1.0, 1.0],
          [0.0, 0.0, 0.0, 0.0]],
         [[0.0, 0.0, 1.0, 1.0],
          [0.0, 0.0, 0.0, 0.0]]],
        output_dict[fields.InputDataFields.groundtruth_boxes]) 
Example #26
Source File: eval.py    From Gun-Detector with Apache License 2.0 4 votes vote down vote up
def main(unused_argv):
  assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
  assert FLAGS.eval_dir, '`eval_dir` is missing.'
  tf.gfile.MakeDirs(FLAGS.eval_dir)
  if FLAGS.pipeline_config_path:
    configs = config_util.get_configs_from_pipeline_file(
        FLAGS.pipeline_config_path)
    tf.gfile.Copy(FLAGS.pipeline_config_path,
                  os.path.join(FLAGS.eval_dir, 'pipeline.config'),
                  overwrite=True)
  else:
    configs = config_util.get_configs_from_multiple_files(
        model_config_path=FLAGS.model_config_path,
        eval_config_path=FLAGS.eval_config_path,
        eval_input_config_path=FLAGS.input_config_path)
    for name, config in [('model.config', FLAGS.model_config_path),
                         ('eval.config', FLAGS.eval_config_path),
                         ('input.config', FLAGS.input_config_path)]:
      tf.gfile.Copy(config,
                    os.path.join(FLAGS.eval_dir, name),
                    overwrite=True)

  model_config = configs['model']
  eval_config = configs['eval_config']
  input_config = configs['eval_input_config']
  if FLAGS.eval_training_data:
    input_config = configs['train_input_config']

  model_fn = functools.partial(
      model_builder.build,
      model_config=model_config,
      is_training=False)

  def get_next(config):
    return dataset_util.make_initializable_iterator(
        dataset_builder.build(config)).get_next()

  create_input_dict_fn = functools.partial(get_next, input_config)

  label_map = label_map_util.load_labelmap(input_config.label_map_path)
  max_num_classes = max([item.id for item in label_map.item])
  categories = label_map_util.convert_label_map_to_categories(
      label_map, max_num_classes)

  if FLAGS.run_once:
    eval_config.max_evals = 1

  evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, categories,
                     FLAGS.checkpoint_dir, FLAGS.eval_dir) 
Example #27
Source File: dataset_builder_test.py    From AniSeg with Apache License 2.0 4 votes vote down vote up
def test_build_tf_record_input_reader_with_batch_size_two(self):
    tf_record_path = self.create_tf_record()

    input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
    input_reader_proto = input_reader_pb2.InputReader()
    text_format.Merge(input_reader_text_proto, input_reader_proto)

    def one_hot_class_encoding_fn(tensor_dict):
      tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot(
          tensor_dict[fields.InputDataFields.groundtruth_classes] - 1, depth=3)
      return tensor_dict

    tensor_dict = dataset_util.make_initializable_iterator(
        dataset_builder.build(
            input_reader_proto,
            transform_input_data_fn=one_hot_class_encoding_fn,
            batch_size=2,
            max_num_boxes=2,
            num_classes=3,
            spatial_image_shape=[4, 5])).get_next()

    sv = tf.train.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)
      output_dict = sess.run(tensor_dict)

    self.assertAllEqual([2, 4, 5, 3],
                        output_dict[fields.InputDataFields.image].shape)
    self.assertAllEqual([2, 2, 3],
                        output_dict[fields.InputDataFields.groundtruth_classes].
                        shape)
    self.assertAllEqual([2, 2, 4],
                        output_dict[fields.InputDataFields.groundtruth_boxes].
                        shape)
    self.assertAllEqual(
        [[[0.0, 0.0, 1.0, 1.0],
          [0.0, 0.0, 0.0, 0.0]],
         [[0.0, 0.0, 1.0, 1.0],
          [0.0, 0.0, 0.0, 0.0]]],
        output_dict[fields.InputDataFields.groundtruth_boxes]) 
Example #28
Source File: eval.py    From Elphas with Apache License 2.0 4 votes vote down vote up
def main(unused_argv):
    assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
    assert FLAGS.eval_dir, '`eval_dir` is missing.'
    tf.gfile.MakeDirs(FLAGS.eval_dir)
    if FLAGS.pipeline_config_path:
        configs = config_util.get_configs_from_pipeline_file(
            FLAGS.pipeline_config_path)
        tf.gfile.Copy(FLAGS.pipeline_config_path,
                      os.path.join(FLAGS.eval_dir, 'pipeline.config'),
                      overwrite=True)
    else:
        configs = config_util.get_configs_from_multiple_files(
            model_config_path=FLAGS.model_config_path,
            eval_config_path=FLAGS.eval_config_path,
            eval_input_config_path=FLAGS.input_config_path)
        for name, config in [('model.config', FLAGS.model_config_path),
                             ('eval.config', FLAGS.eval_config_path),
                             ('input.config', FLAGS.input_config_path)]:
            tf.gfile.Copy(config,
                          os.path.join(FLAGS.eval_dir, name),
                          overwrite=True)

    model_config = configs['model']
    eval_config = configs['eval_config']
    input_config = configs['eval_input_config']
    if FLAGS.eval_training_data:
        input_config = configs['train_input_config']

    model_fn = functools.partial(
        model_builder.build,
        model_config=model_config,
        is_training=False)

    def get_next(config):
        return dataset_util.make_initializable_iterator(
            dataset_builder.build(config)).get_next()

    create_input_dict_fn = functools.partial(get_next, input_config)

    label_map = label_map_util.load_labelmap(input_config.label_map_path)
    max_num_classes = max([item.id for item in label_map.item])
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes)

    if FLAGS.run_once:
        eval_config.max_evals = 1

    evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, categories,
                       FLAGS.checkpoint_dir, FLAGS.eval_dir) 
Example #29
Source File: dataset_builder_test.py    From ros_tensorflow with Apache License 2.0 4 votes vote down vote up
def test_build_tf_record_input_reader_with_batch_size_two(self):
    tf_record_path = self.create_tf_record()

    input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
    input_reader_proto = input_reader_pb2.InputReader()
    text_format.Merge(input_reader_text_proto, input_reader_proto)

    def one_hot_class_encoding_fn(tensor_dict):
      tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot(
          tensor_dict[fields.InputDataFields.groundtruth_classes] - 1, depth=3)
      return tensor_dict

    tensor_dict = dataset_util.make_initializable_iterator(
        dataset_builder.build(
            input_reader_proto,
            transform_input_data_fn=one_hot_class_encoding_fn,
            batch_size=2,
            max_num_boxes=2,
            num_classes=3,
            spatial_image_shape=[4, 5])).get_next()

    sv = tf.train.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)
      output_dict = sess.run(tensor_dict)

    self.assertAllEqual([2, 4, 5, 3],
                        output_dict[fields.InputDataFields.image].shape)
    self.assertAllEqual([2, 2, 3],
                        output_dict[fields.InputDataFields.groundtruth_classes].
                        shape)
    self.assertAllEqual([2, 2, 4],
                        output_dict[fields.InputDataFields.groundtruth_boxes].
                        shape)
    self.assertAllEqual(
        [[[0.0, 0.0, 1.0, 1.0],
          [0.0, 0.0, 0.0, 0.0]],
         [[0.0, 0.0, 1.0, 1.0],
          [0.0, 0.0, 0.0, 0.0]]],
        output_dict[fields.InputDataFields.groundtruth_boxes]) 
Example #30
Source File: eval.py    From ros_tensorflow with Apache License 2.0 4 votes vote down vote up
def main(unused_argv):
  assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
  assert FLAGS.eval_dir, '`eval_dir` is missing.'
  tf.gfile.MakeDirs(FLAGS.eval_dir)
  if FLAGS.pipeline_config_path:
    configs = config_util.get_configs_from_pipeline_file(
        FLAGS.pipeline_config_path)
    tf.gfile.Copy(FLAGS.pipeline_config_path,
                  os.path.join(FLAGS.eval_dir, 'pipeline.config'),
                  overwrite=True)
  else:
    configs = config_util.get_configs_from_multiple_files(
        model_config_path=FLAGS.model_config_path,
        eval_config_path=FLAGS.eval_config_path,
        eval_input_config_path=FLAGS.input_config_path)
    for name, config in [('model.config', FLAGS.model_config_path),
                         ('eval.config', FLAGS.eval_config_path),
                         ('input.config', FLAGS.input_config_path)]:
      tf.gfile.Copy(config,
                    os.path.join(FLAGS.eval_dir, name),
                    overwrite=True)

  model_config = configs['model']
  eval_config = configs['eval_config']
  input_config = configs['eval_input_config']
  if FLAGS.eval_training_data:
    input_config = configs['train_input_config']

  model_fn = functools.partial(
      model_builder.build,
      model_config=model_config,
      is_training=False)

  def get_next(config):
    return dataset_util.make_initializable_iterator(
        dataset_builder.build(config)).get_next()

  create_input_dict_fn = functools.partial(get_next, input_config)

  label_map = label_map_util.load_labelmap(input_config.label_map_path)
  max_num_classes = max([item.id for item in label_map.item])
  categories = label_map_util.convert_label_map_to_categories(
      label_map, max_num_classes)

  if FLAGS.run_once:
    eval_config.max_evals = 1

  evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, categories,
                     FLAGS.checkpoint_dir, FLAGS.eval_dir)