Python object_detection.models.feature_map_generators.multi_resolution_feature_maps() Examples
The following are 30
code examples of object_detection.models.feature_map_generators.multi_resolution_feature_maps().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
object_detection.models.feature_map_generators
, or try the search function
.
Example #1
Source File: feature_map_generators_test.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def _build_feature_map_generator(self, feature_map_layout, use_keras, pool_residual=False): if use_keras: return feature_map_generators.KerasMultiResolutionFeatureMaps( feature_map_layout=feature_map_layout, depth_multiplier=1, min_depth=32, insert_1x1_conv=True, freeze_batchnorm=False, is_training=True, conv_hyperparams=self._build_conv_hyperparams(), name='FeatureMaps' ) else: def feature_map_generator(image_features): return feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=1, min_depth=32, insert_1x1_conv=True, image_features=image_features, pool_residual=pool_residual) return feature_map_generator
Example #2
Source File: ssd_mobilenet_v1_feature_extractor.py From tensorflow with BSD 2-Clause "Simplified" License | 5 votes |
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs.get_shape().assert_has_rank(4) shape_assert = tf.Assert( tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.']) feature_map_layout = { 'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', '', ''], 'layer_depth': [-1, -1, 512, 256, 256, 128], } with tf.control_dependencies([shape_assert]): with slim.arg_scope(self._conv_hyperparams): with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: _, image_features = mobilenet_v1.mobilenet_v1_base( preprocessed_inputs, final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return feature_maps.values()
Example #3
Source File: feature_map_generators_test.py From ros_tensorflow with Apache License 2.0 | 5 votes |
def test_get_expected_feature_map_shapes_with_embedded_ssd_mobilenet_v1( self): image_features = { 'Conv2d_11_pointwise': tf.random_uniform([4, 16, 16, 512], dtype=tf.float32), 'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32), } feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=EMBEDDED_SSD_MOBILENET_V1_LAYOUT, depth_multiplier=1, min_depth=32, insert_1x1_conv=True, image_features=image_features) expected_feature_map_shapes = { 'Conv2d_11_pointwise': (4, 16, 16, 512), 'Conv2d_13_pointwise': (4, 8, 8, 1024), 'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_512': (4, 4, 4, 512), 'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 2, 2, 256), 'Conv2d_13_pointwise_2_Conv2d_4_2x2_s2_256': (4, 1, 1, 256)} init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #4
Source File: feature_map_generators_test.py From ros_tensorflow with Apache License 2.0 | 5 votes |
def test_get_expected_feature_map_shapes_with_inception_v2(self): image_features = { 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) } feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=INCEPTION_V2_LAYOUT, depth_multiplier=1, min_depth=32, insert_1x1_conv=True, image_features=image_features) expected_feature_map_shapes = { 'Mixed_3c': (4, 28, 28, 256), 'Mixed_4c': (4, 14, 14, 576), 'Mixed_5c': (4, 7, 7, 1024), 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)} init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #5
Source File: feature_map_generators_test.py From moveo_ros with MIT License | 5 votes |
def test_get_expected_feature_map_shapes_with_inception_v2(self): image_features = { 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) } feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=INCEPTION_V2_LAYOUT, depth_multiplier=1, min_depth=32, insert_1x1_conv=True, image_features=image_features) expected_feature_map_shapes = { 'Mixed_3c': (4, 28, 28, 256), 'Mixed_4c': (4, 14, 14, 576), 'Mixed_5c': (4, 7, 7, 1024), 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)} init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.iteritems()) self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #6
Source File: feature_map_generators_test.py From moveo_ros with MIT License | 5 votes |
def test_get_expected_feature_map_shapes_with_inception_v3(self): image_features = { 'Mixed_5d': tf.random_uniform([4, 35, 35, 256], dtype=tf.float32), 'Mixed_6e': tf.random_uniform([4, 17, 17, 576], dtype=tf.float32), 'Mixed_7c': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32) } feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=INCEPTION_V3_LAYOUT, depth_multiplier=1, min_depth=32, insert_1x1_conv=True, image_features=image_features) expected_feature_map_shapes = { 'Mixed_5d': (4, 35, 35, 256), 'Mixed_6e': (4, 17, 17, 576), 'Mixed_7c': (4, 8, 8, 1024), 'Mixed_7c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), 'Mixed_7c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), 'Mixed_7c_2_Conv2d_5_3x3_s2_128': (4, 1, 1, 128)} init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.iteritems()) self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #7
Source File: ssd_mobilenet_v1_feature_extractor.py From moveo_ros with MIT License | 5 votes |
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs.get_shape().assert_has_rank(4) shape_assert = tf.Assert( tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.']) feature_map_layout = { 'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', '', ''], 'layer_depth': [-1, -1, 512, 256, 256, 128], } with tf.control_dependencies([shape_assert]): with slim.arg_scope(self._conv_hyperparams): with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: _, image_features = mobilenet_v1.mobilenet_v1_base( preprocessed_inputs, final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return feature_maps.values()
Example #8
Source File: ssd_inception_v2_feature_extractor.py From moveo_ros with MIT License | 5 votes |
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs.get_shape().assert_has_rank(4) shape_assert = tf.Assert( tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.']) feature_map_layout = { 'from_layer': ['Mixed_4c', 'Mixed_5c', '', '', '', ''], 'layer_depth': [-1, -1, 512, 256, 256, 128], } with tf.control_dependencies([shape_assert]): with slim.arg_scope(self._conv_hyperparams): with tf.variable_scope('InceptionV2', reuse=self._reuse_weights) as scope: _, image_features = inception_v2.inception_v2_base( preprocessed_inputs, final_endpoint='Mixed_5c', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return feature_maps.values()
Example #9
Source File: feature_map_generators_test.py From hands-detection with MIT License | 5 votes |
def test_get_expected_feature_map_shapes_with_inception_v2(self): image_features = { 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) } feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=INCEPTION_V2_LAYOUT, depth_multiplier=1, min_depth=32, insert_1x1_conv=True, image_features=image_features) expected_feature_map_shapes = { 'Mixed_3c': (4, 28, 28, 256), 'Mixed_4c': (4, 14, 14, 576), 'Mixed_5c': (4, 7, 7, 1024), 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)} init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #10
Source File: feature_map_generators_test.py From hands-detection with MIT License | 5 votes |
def test_get_expected_feature_map_shapes_with_inception_v3(self): image_features = { 'Mixed_5d': tf.random_uniform([4, 35, 35, 256], dtype=tf.float32), 'Mixed_6e': tf.random_uniform([4, 17, 17, 576], dtype=tf.float32), 'Mixed_7c': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32) } feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=INCEPTION_V3_LAYOUT, depth_multiplier=1, min_depth=32, insert_1x1_conv=True, image_features=image_features) expected_feature_map_shapes = { 'Mixed_5d': (4, 35, 35, 256), 'Mixed_6e': (4, 17, 17, 576), 'Mixed_7c': (4, 8, 8, 1024), 'Mixed_7c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), 'Mixed_7c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), 'Mixed_7c_2_Conv2d_5_3x3_s2_128': (4, 1, 1, 128)} init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #11
Source File: ssd_mobilenet_v1_feature_extractor.py From hands-detection with MIT License | 5 votes |
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs.get_shape().assert_has_rank(4) shape_assert = tf.Assert( tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.']) feature_map_layout = { 'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', '', ''], 'layer_depth': [-1, -1, 512, 256, 256, 128], } with tf.control_dependencies([shape_assert]): with slim.arg_scope(self._conv_hyperparams): with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: _, image_features = mobilenet_v1.mobilenet_v1_base( preprocessed_inputs, final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return feature_maps.values()
Example #12
Source File: ssd_inception_v2_feature_extractor.py From hands-detection with MIT License | 5 votes |
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs.get_shape().assert_has_rank(4) shape_assert = tf.Assert( tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.']) feature_map_layout = { 'from_layer': ['Mixed_4c', 'Mixed_5c', '', '', '', ''], 'layer_depth': [-1, -1, 512, 256, 256, 128], } with tf.control_dependencies([shape_assert]): with slim.arg_scope(self._conv_hyperparams): with tf.variable_scope('InceptionV2', reuse=self._reuse_weights) as scope: _, image_features = inception_v2.inception_v2_base( preprocessed_inputs, final_endpoint='Mixed_5c', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return feature_maps.values()
Example #13
Source File: ssd_inception_v3_feature_extractor.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 5 votes |
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) feature_map_layout = { 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128], 'use_explicit_padding': self._use_explicit_padding, 'use_depthwise': self._use_depthwise, } with slim.arg_scope(self._conv_hyperparams_fn()): with tf.variable_scope('InceptionV3', reuse=self._reuse_weights) as scope: _, image_features = inception_v3.inception_v3_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Mixed_7c', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return feature_maps.values()
Example #14
Source File: feature_map_generators_test.py From Gun-Detector with Apache License 2.0 | 5 votes |
def test_get_expected_feature_map_shapes_with_inception_v3(self): image_features = { 'Mixed_5d': tf.random_uniform([4, 35, 35, 256], dtype=tf.float32), 'Mixed_6e': tf.random_uniform([4, 17, 17, 576], dtype=tf.float32), 'Mixed_7c': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32) } feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=INCEPTION_V3_LAYOUT, depth_multiplier=1, min_depth=32, insert_1x1_conv=True, image_features=image_features) expected_feature_map_shapes = { 'Mixed_5d': (4, 35, 35, 256), 'Mixed_6e': (4, 17, 17, 576), 'Mixed_7c': (4, 8, 8, 1024), 'Mixed_7c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), 'Mixed_7c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), 'Mixed_7c_2_Conv2d_5_3x3_s2_128': (4, 1, 1, 128)} init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #15
Source File: feature_map_generators_test.py From Gun-Detector with Apache License 2.0 | 5 votes |
def test_get_expected_feature_map_shapes_with_inception_v2(self): image_features = { 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) } feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=INCEPTION_V2_LAYOUT, depth_multiplier=1, min_depth=32, insert_1x1_conv=True, image_features=image_features) expected_feature_map_shapes = { 'Mixed_3c': (4, 28, 28, 256), 'Mixed_4c': (4, 14, 14, 576), 'Mixed_5c': (4, 7, 7, 1024), 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)} init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #16
Source File: ssd_inception_v2_feature_extractor.py From tensorflow with BSD 2-Clause "Simplified" License | 5 votes |
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs.get_shape().assert_has_rank(4) shape_assert = tf.Assert( tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.']) feature_map_layout = { 'from_layer': ['Mixed_4c', 'Mixed_5c', '', '', '', ''], 'layer_depth': [-1, -1, 512, 256, 256, 128], } with tf.control_dependencies([shape_assert]): with slim.arg_scope(self._conv_hyperparams): with tf.variable_scope('InceptionV2', reuse=self._reuse_weights) as scope: _, image_features = inception_v2.inception_v2_base( preprocessed_inputs, final_endpoint='Mixed_5c', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return feature_maps.values()
Example #17
Source File: ssd_inception_v3_feature_extractor.py From Gun-Detector with Apache License 2.0 | 5 votes |
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) feature_map_layout = { 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128], 'use_explicit_padding': self._use_explicit_padding, 'use_depthwise': self._use_depthwise, } with slim.arg_scope(self._conv_hyperparams_fn()): with tf.variable_scope('InceptionV3', reuse=self._reuse_weights) as scope: _, image_features = inception_v3.inception_v3_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Mixed_7c', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return feature_maps.values()
Example #18
Source File: feature_map_generators_test.py From tensorflow with BSD 2-Clause "Simplified" License | 5 votes |
def test_get_expected_feature_map_shapes_with_inception_v3(self): image_features = { 'Mixed_5d': tf.random_uniform([4, 35, 35, 256], dtype=tf.float32), 'Mixed_6e': tf.random_uniform([4, 17, 17, 576], dtype=tf.float32), 'Mixed_7c': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32) } feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=INCEPTION_V3_LAYOUT, depth_multiplier=1, min_depth=32, insert_1x1_conv=True, image_features=image_features) expected_feature_map_shapes = { 'Mixed_5d': (4, 35, 35, 256), 'Mixed_6e': (4, 17, 17, 576), 'Mixed_7c': (4, 8, 8, 1024), 'Mixed_7c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), 'Mixed_7c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), 'Mixed_7c_2_Conv2d_5_3x3_s2_128': (4, 1, 1, 128)} init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #19
Source File: feature_map_generators_test.py From tensorflow with BSD 2-Clause "Simplified" License | 5 votes |
def test_get_expected_feature_map_shapes_with_inception_v2(self): image_features = { 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) } feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=INCEPTION_V2_LAYOUT, depth_multiplier=1, min_depth=32, insert_1x1_conv=True, image_features=image_features) expected_feature_map_shapes = { 'Mixed_3c': (4, 28, 28, 256), 'Mixed_4c': (4, 14, 14, 576), 'Mixed_5c': (4, 7, 7, 1024), 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)} init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #20
Source File: ssd_inception_v2_feature_extractor.py From Hands-On-Machine-Learning-with-OpenCV-4 with MIT License | 5 votes |
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs.get_shape().assert_has_rank(4) shape_assert = tf.Assert( tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.']) feature_map_layout = { 'from_layer': ['Mixed_4c', 'Mixed_5c', '', '', '', ''], 'layer_depth': [-1, -1, 512, 256, 256, 128], } with tf.control_dependencies([shape_assert]): with slim.arg_scope(self._conv_hyperparams): with tf.variable_scope('InceptionV2', reuse=self._reuse_weights) as scope: _, image_features = inception_v2.inception_v2_base( preprocessed_inputs, final_endpoint='Mixed_5c', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return feature_maps.values()
Example #21
Source File: ssd_mobilenet_v1_feature_extractor.py From Hands-On-Machine-Learning-with-OpenCV-4 with MIT License | 5 votes |
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs.get_shape().assert_has_rank(4) shape_assert = tf.Assert( tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.']) feature_map_layout = { 'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', '', ''], 'layer_depth': [-1, -1, 512, 256, 256, 128], } with tf.control_dependencies([shape_assert]): with slim.arg_scope(self._conv_hyperparams): with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: _, image_features = mobilenet_v1.mobilenet_v1_base( preprocessed_inputs, final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return feature_maps.values()
Example #22
Source File: feature_map_generators_test.py From Hands-On-Machine-Learning-with-OpenCV-4 with MIT License | 5 votes |
def test_get_expected_feature_map_shapes_with_inception_v3(self): image_features = { 'Mixed_5d': tf.random_uniform([4, 35, 35, 256], dtype=tf.float32), 'Mixed_6e': tf.random_uniform([4, 17, 17, 576], dtype=tf.float32), 'Mixed_7c': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32) } feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=INCEPTION_V3_LAYOUT, depth_multiplier=1, min_depth=32, insert_1x1_conv=True, image_features=image_features) expected_feature_map_shapes = { 'Mixed_5d': (4, 35, 35, 256), 'Mixed_6e': (4, 17, 17, 576), 'Mixed_7c': (4, 8, 8, 1024), 'Mixed_7c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), 'Mixed_7c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), 'Mixed_7c_2_Conv2d_5_3x3_s2_128': (4, 1, 1, 128)} init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #23
Source File: feature_map_generators_test.py From Hands-On-Machine-Learning-with-OpenCV-4 with MIT License | 5 votes |
def test_get_expected_feature_map_shapes_with_inception_v2(self): image_features = { 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) } feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=INCEPTION_V2_LAYOUT, depth_multiplier=1, min_depth=32, insert_1x1_conv=True, image_features=image_features) expected_feature_map_shapes = { 'Mixed_3c': (4, 28, 28, 256), 'Mixed_4c': (4, 14, 14, 576), 'Mixed_5c': (4, 7, 7, 1024), 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)} init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #24
Source File: ssd_inception_v3_feature_extractor.py From Traffic-Rule-Violation-Detection-System with MIT License | 5 votes |
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) feature_map_layout = { 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128], 'use_explicit_padding': self._use_explicit_padding, 'use_depthwise': self._use_depthwise, } with slim.arg_scope(self._conv_hyperparams): with tf.variable_scope('InceptionV3', reuse=self._reuse_weights) as scope: _, image_features = inception_v3.inception_v3_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Mixed_7c', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return feature_maps.values()
Example #25
Source File: ssd_mobilenet_v1_feature_extractor.py From Traffic-Rule-Violation-Detection-System with MIT License | 5 votes |
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) feature_map_layout = { 'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', '', ''], 'layer_depth': [-1, -1, 512, 256, 256, 128], 'use_explicit_padding': self._use_explicit_padding, 'use_depthwise': self._use_depthwise, } with slim.arg_scope(self._conv_hyperparams): # TODO: Enable fused batch norm once quantization supports it. with slim.arg_scope([slim.batch_norm], fused=False): with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: _, image_features = mobilenet_v1.mobilenet_v1_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return feature_maps.values()
Example #26
Source File: feature_map_generators_test.py From Traffic-Rule-Violation-Detection-System with MIT License | 5 votes |
def test_get_expected_feature_map_shapes_with_embedded_ssd_mobilenet_v1( self): image_features = { 'Conv2d_11_pointwise': tf.random_uniform([4, 16, 16, 512], dtype=tf.float32), 'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32), } feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=EMBEDDED_SSD_MOBILENET_V1_LAYOUT, depth_multiplier=1, min_depth=32, insert_1x1_conv=True, image_features=image_features) expected_feature_map_shapes = { 'Conv2d_11_pointwise': (4, 16, 16, 512), 'Conv2d_13_pointwise': (4, 8, 8, 1024), 'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_512': (4, 4, 4, 512), 'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 2, 2, 256), 'Conv2d_13_pointwise_2_Conv2d_4_2x2_s2_256': (4, 1, 1, 256)} init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #27
Source File: feature_map_generators_test.py From Traffic-Rule-Violation-Detection-System with MIT License | 5 votes |
def test_get_expected_feature_map_shapes_with_inception_v3(self): image_features = { 'Mixed_5d': tf.random_uniform([4, 35, 35, 256], dtype=tf.float32), 'Mixed_6e': tf.random_uniform([4, 17, 17, 576], dtype=tf.float32), 'Mixed_7c': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32) } feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=INCEPTION_V3_LAYOUT, depth_multiplier=1, min_depth=32, insert_1x1_conv=True, image_features=image_features) expected_feature_map_shapes = { 'Mixed_5d': (4, 35, 35, 256), 'Mixed_6e': (4, 17, 17, 576), 'Mixed_7c': (4, 8, 8, 1024), 'Mixed_7c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), 'Mixed_7c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), 'Mixed_7c_2_Conv2d_5_3x3_s2_128': (4, 1, 1, 128)} init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #28
Source File: feature_map_generators_test.py From Traffic-Rule-Violation-Detection-System with MIT License | 5 votes |
def test_get_expected_feature_map_shapes_with_inception_v2(self): image_features = { 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) } feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=INCEPTION_V2_LAYOUT, depth_multiplier=1, min_depth=32, insert_1x1_conv=True, image_features=image_features) expected_feature_map_shapes = { 'Mixed_3c': (4, 28, 28, 256), 'Mixed_4c': (4, 14, 14, 576), 'Mixed_5c': (4, 7, 7, 1024), 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)} init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #29
Source File: ssd_inception_v3_feature_extractor.py From yolo_v2 with Apache License 2.0 | 5 votes |
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs.get_shape().assert_has_rank(4) shape_assert = tf.Assert( tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.']) feature_map_layout = { 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128], } with tf.control_dependencies([shape_assert]): with slim.arg_scope(self._conv_hyperparams): with tf.variable_scope('InceptionV3', reuse=self._reuse_weights) as scope: _, image_features = inception_v3.inception_v3_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Mixed_7c', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return feature_maps.values()
Example #30
Source File: feature_map_generators_test.py From yolo_v2 with Apache License 2.0 | 5 votes |
def test_get_expected_feature_map_shapes_with_embedded_ssd_mobilenet_v1( self): image_features = { 'Conv2d_11_pointwise': tf.random_uniform([4, 16, 16, 512], dtype=tf.float32), 'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32), } feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=EMBEDDED_SSD_MOBILENET_V1_LAYOUT, depth_multiplier=1, min_depth=32, insert_1x1_conv=True, image_features=image_features) expected_feature_map_shapes = { 'Conv2d_11_pointwise': (4, 16, 16, 512), 'Conv2d_13_pointwise': (4, 8, 8, 1024), 'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_512': (4, 4, 4, 512), 'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 2, 2, 256), 'Conv2d_13_pointwise_2_Conv2d_4_2x2_s2_256': (4, 1, 1, 256)} init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)