Python object_detection.builders.box_predictor_builder.build_convolutional_keras_box_predictor() Examples

The following are 21 code examples of object_detection.builders.box_predictor_builder.build_convolutional_keras_box_predictor(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module object_detection.builders.box_predictor_builder , or try the search function .
Example #1
Source File: convolutional_keras_box_predictor_test.py    From vehicle_counting_tensorflow with MIT License 5 votes vote down vote up
def test_get_boxes_for_one_aspect_ratio_per_location(self):
    def graph_fn(image_features):
      conv_box_predictor = (
          box_predictor_builder.build_convolutional_keras_box_predictor(
              is_training=False,
              num_classes=0,
              conv_hyperparams=self._build_conv_hyperparams(),
              freeze_batchnorm=False,
              inplace_batchnorm_update=False,
              num_predictions_per_location_list=[1],
              min_depth=0,
              max_depth=32,
              num_layers_before_predictor=1,
              use_dropout=True,
              dropout_keep_prob=0.8,
              kernel_size=1,
              box_code_size=4
          ))
      box_predictions = conv_box_predictor([image_features])
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      objectness_predictions = tf.concat(box_predictions[
          box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1)
      return (box_encodings, objectness_predictions)
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    (box_encodings, objectness_predictions) = self.execute(graph_fn,
                                                           [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 64, 1, 4])
    self.assertAllEqual(objectness_predictions.shape, [4, 64, 1]) 
Example #2
Source File: convolutional_keras_box_predictor_test.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def test_get_multi_class_predictions_for_five_aspect_ratios_per_location(
      self):
    num_classes_without_background = 6
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    def graph_fn(image_features):
      conv_box_predictor = (
          box_predictor_builder.build_convolutional_keras_box_predictor(
              is_training=False,
              num_classes=num_classes_without_background,
              conv_hyperparams=self._build_conv_hyperparams(),
              freeze_batchnorm=False,
              inplace_batchnorm_update=False,
              num_predictions_per_location_list=[5],
              min_depth=0,
              max_depth=32,
              num_layers_before_predictor=1,
              use_dropout=True,
              dropout_keep_prob=0.8,
              kernel_size=1,
              box_code_size=4
          ))
      box_predictions = conv_box_predictor([image_features])
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      class_predictions_with_background = tf.concat(
          box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
          axis=1)
      return (box_encodings, class_predictions_with_background)
    (box_encodings,
     class_predictions_with_background) = self.execute(graph_fn,
                                                       [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])
    self.assertAllEqual(class_predictions_with_background.shape,
                        [4, 320, num_classes_without_background+1]) 
Example #3
Source File: convolutional_keras_box_predictor_test.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def test_get_boxes_for_one_aspect_ratio_per_location(self):
    def graph_fn(image_features):
      conv_box_predictor = (
          box_predictor_builder.build_convolutional_keras_box_predictor(
              is_training=False,
              num_classes=0,
              conv_hyperparams=self._build_conv_hyperparams(),
              freeze_batchnorm=False,
              inplace_batchnorm_update=False,
              num_predictions_per_location_list=[1],
              min_depth=0,
              max_depth=32,
              num_layers_before_predictor=1,
              use_dropout=True,
              dropout_keep_prob=0.8,
              kernel_size=1,
              box_code_size=4
          ))
      box_predictions = conv_box_predictor([image_features])
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      objectness_predictions = tf.concat(box_predictions[
          box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1)
      return (box_encodings, objectness_predictions)
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    (box_encodings, objectness_predictions) = self.execute(graph_fn,
                                                           [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 64, 1, 4])
    self.assertAllEqual(objectness_predictions.shape, [4, 64, 1]) 
Example #4
Source File: convolutional_keras_box_predictor_test.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def test_get_boxes_for_five_aspect_ratios_per_location(self):
    def graph_fn(image_features):
      conv_box_predictor = (
          box_predictor_builder.build_convolutional_keras_box_predictor(
              is_training=False,
              num_classes=0,
              conv_hyperparams=self._build_conv_hyperparams(),
              freeze_batchnorm=False,
              inplace_batchnorm_update=False,
              num_predictions_per_location_list=[5],
              min_depth=0,
              max_depth=32,
              num_layers_before_predictor=1,
              use_dropout=True,
              dropout_keep_prob=0.8,
              kernel_size=1,
              box_code_size=4
          ))
      box_predictions = conv_box_predictor([image_features])
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      objectness_predictions = tf.concat(
          box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
          axis=1)
      return (box_encodings, objectness_predictions)
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    (box_encodings, objectness_predictions) = self.execute(graph_fn,
                                                           [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])
    self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) 
Example #5
Source File: convolutional_keras_box_predictor_tf2_test.py    From models with Apache License 2.0 5 votes vote down vote up
def test_get_multi_class_predictions_for_five_aspect_ratios_per_location(
      self):
    num_classes_without_background = 6
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    conv_box_predictor = (
        box_predictor_builder.build_convolutional_keras_box_predictor(
            is_training=False,
            num_classes=num_classes_without_background,
            conv_hyperparams=self._build_conv_hyperparams(),
            freeze_batchnorm=False,
            inplace_batchnorm_update=False,
            num_predictions_per_location_list=[5],
            min_depth=0,
            max_depth=32,
            num_layers_before_predictor=1,
            use_dropout=True,
            dropout_keep_prob=0.8,
            kernel_size=1,
            box_code_size=4
        ))
    def graph_fn(image_features):
      box_predictions = conv_box_predictor([image_features])
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      class_predictions_with_background = tf.concat(
          box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
          axis=1)
      return (box_encodings, class_predictions_with_background)
    (box_encodings,
     class_predictions_with_background) = self.execute(graph_fn,
                                                       [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])
    self.assertAllEqual(class_predictions_with_background.shape,
                        [4, 320, num_classes_without_background+1]) 
Example #6
Source File: convolutional_keras_box_predictor_tf2_test.py    From models with Apache License 2.0 5 votes vote down vote up
def test_get_boxes_for_one_aspect_ratio_per_location(self):
    conv_box_predictor = (
        box_predictor_builder.build_convolutional_keras_box_predictor(
            is_training=False,
            num_classes=0,
            conv_hyperparams=self._build_conv_hyperparams(),
            freeze_batchnorm=False,
            inplace_batchnorm_update=False,
            num_predictions_per_location_list=[1],
            min_depth=0,
            max_depth=32,
            num_layers_before_predictor=1,
            use_dropout=True,
            dropout_keep_prob=0.8,
            kernel_size=1,
            box_code_size=4
        ))
    def graph_fn(image_features):
      box_predictions = conv_box_predictor([image_features])
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      objectness_predictions = tf.concat(box_predictions[
          box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1)
      return (box_encodings, objectness_predictions)
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    (box_encodings, objectness_predictions) = self.execute(graph_fn,
                                                           [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 64, 1, 4])
    self.assertAllEqual(objectness_predictions.shape, [4, 64, 1]) 
Example #7
Source File: convolutional_keras_box_predictor_tf2_test.py    From models with Apache License 2.0 5 votes vote down vote up
def test_get_boxes_for_five_aspect_ratios_per_location(self):
    conv_box_predictor = (
        box_predictor_builder.build_convolutional_keras_box_predictor(
            is_training=False,
            num_classes=0,
            conv_hyperparams=self._build_conv_hyperparams(),
            freeze_batchnorm=False,
            inplace_batchnorm_update=False,
            num_predictions_per_location_list=[5],
            min_depth=0,
            max_depth=32,
            num_layers_before_predictor=1,
            use_dropout=True,
            dropout_keep_prob=0.8,
            kernel_size=1,
            box_code_size=4
        ))
    def graph_fn(image_features):
      box_predictions = conv_box_predictor([image_features])
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      objectness_predictions = tf.concat(
          box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
          axis=1)
      return (box_encodings, objectness_predictions)
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    (box_encodings, objectness_predictions) = self.execute(graph_fn,
                                                           [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])
    self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) 
Example #8
Source File: convolutional_keras_box_predictor_test.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def test_get_multi_class_predictions_for_five_aspect_ratios_per_location(
      self):
    num_classes_without_background = 6
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    def graph_fn(image_features):
      conv_box_predictor = (
          box_predictor_builder.build_convolutional_keras_box_predictor(
              is_training=False,
              num_classes=num_classes_without_background,
              conv_hyperparams=self._build_conv_hyperparams(),
              freeze_batchnorm=False,
              inplace_batchnorm_update=False,
              num_predictions_per_location_list=[5],
              min_depth=0,
              max_depth=32,
              num_layers_before_predictor=1,
              use_dropout=True,
              dropout_keep_prob=0.8,
              kernel_size=1,
              box_code_size=4
          ))
      box_predictions = conv_box_predictor([image_features])
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      class_predictions_with_background = tf.concat(
          box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
          axis=1)
      return (box_encodings, class_predictions_with_background)
    (box_encodings,
     class_predictions_with_background) = self.execute(graph_fn,
                                                       [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])
    self.assertAllEqual(class_predictions_with_background.shape,
                        [4, 320, num_classes_without_background+1]) 
Example #9
Source File: convolutional_keras_box_predictor_test.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def test_get_boxes_for_one_aspect_ratio_per_location(self):
    def graph_fn(image_features):
      conv_box_predictor = (
          box_predictor_builder.build_convolutional_keras_box_predictor(
              is_training=False,
              num_classes=0,
              conv_hyperparams=self._build_conv_hyperparams(),
              freeze_batchnorm=False,
              inplace_batchnorm_update=False,
              num_predictions_per_location_list=[1],
              min_depth=0,
              max_depth=32,
              num_layers_before_predictor=1,
              use_dropout=True,
              dropout_keep_prob=0.8,
              kernel_size=1,
              box_code_size=4
          ))
      box_predictions = conv_box_predictor([image_features])
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      objectness_predictions = tf.concat(box_predictions[
          box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1)
      return (box_encodings, objectness_predictions)
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    (box_encodings, objectness_predictions) = self.execute(graph_fn,
                                                           [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 64, 1, 4])
    self.assertAllEqual(objectness_predictions.shape, [4, 64, 1]) 
Example #10
Source File: convolutional_keras_box_predictor_test.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def test_get_boxes_for_five_aspect_ratios_per_location(self):
    def graph_fn(image_features):
      conv_box_predictor = (
          box_predictor_builder.build_convolutional_keras_box_predictor(
              is_training=False,
              num_classes=0,
              conv_hyperparams=self._build_conv_hyperparams(),
              freeze_batchnorm=False,
              inplace_batchnorm_update=False,
              num_predictions_per_location_list=[5],
              min_depth=0,
              max_depth=32,
              num_layers_before_predictor=1,
              use_dropout=True,
              dropout_keep_prob=0.8,
              kernel_size=1,
              box_code_size=4
          ))
      box_predictions = conv_box_predictor([image_features])
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      objectness_predictions = tf.concat(
          box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
          axis=1)
      return (box_encodings, objectness_predictions)
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    (box_encodings, objectness_predictions) = self.execute(graph_fn,
                                                           [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])
    self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) 
Example #11
Source File: convolutional_keras_box_predictor_test.py    From MAX-Object-Detector with Apache License 2.0 5 votes vote down vote up
def test_get_multi_class_predictions_for_five_aspect_ratios_per_location(
      self):
    num_classes_without_background = 6
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    def graph_fn(image_features):
      conv_box_predictor = (
          box_predictor_builder.build_convolutional_keras_box_predictor(
              is_training=False,
              num_classes=num_classes_without_background,
              conv_hyperparams=self._build_conv_hyperparams(),
              freeze_batchnorm=False,
              inplace_batchnorm_update=False,
              num_predictions_per_location_list=[5],
              min_depth=0,
              max_depth=32,
              num_layers_before_predictor=1,
              use_dropout=True,
              dropout_keep_prob=0.8,
              kernel_size=1,
              box_code_size=4
          ))
      box_predictions = conv_box_predictor([image_features])
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      class_predictions_with_background = tf.concat(
          box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
          axis=1)
      return (box_encodings, class_predictions_with_background)
    (box_encodings,
     class_predictions_with_background) = self.execute(graph_fn,
                                                       [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])
    self.assertAllEqual(class_predictions_with_background.shape,
                        [4, 320, num_classes_without_background+1]) 
Example #12
Source File: convolutional_keras_box_predictor_test.py    From MAX-Object-Detector with Apache License 2.0 5 votes vote down vote up
def test_get_boxes_for_one_aspect_ratio_per_location(self):
    def graph_fn(image_features):
      conv_box_predictor = (
          box_predictor_builder.build_convolutional_keras_box_predictor(
              is_training=False,
              num_classes=0,
              conv_hyperparams=self._build_conv_hyperparams(),
              freeze_batchnorm=False,
              inplace_batchnorm_update=False,
              num_predictions_per_location_list=[1],
              min_depth=0,
              max_depth=32,
              num_layers_before_predictor=1,
              use_dropout=True,
              dropout_keep_prob=0.8,
              kernel_size=1,
              box_code_size=4
          ))
      box_predictions = conv_box_predictor([image_features])
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      objectness_predictions = tf.concat(box_predictions[
          box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1)
      return (box_encodings, objectness_predictions)
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    (box_encodings, objectness_predictions) = self.execute(graph_fn,
                                                           [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 64, 1, 4])
    self.assertAllEqual(objectness_predictions.shape, [4, 64, 1]) 
Example #13
Source File: convolutional_keras_box_predictor_test.py    From MAX-Object-Detector with Apache License 2.0 5 votes vote down vote up
def test_get_boxes_for_five_aspect_ratios_per_location(self):
    def graph_fn(image_features):
      conv_box_predictor = (
          box_predictor_builder.build_convolutional_keras_box_predictor(
              is_training=False,
              num_classes=0,
              conv_hyperparams=self._build_conv_hyperparams(),
              freeze_batchnorm=False,
              inplace_batchnorm_update=False,
              num_predictions_per_location_list=[5],
              min_depth=0,
              max_depth=32,
              num_layers_before_predictor=1,
              use_dropout=True,
              dropout_keep_prob=0.8,
              kernel_size=1,
              box_code_size=4
          ))
      box_predictions = conv_box_predictor([image_features])
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      objectness_predictions = tf.concat(
          box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
          axis=1)
      return (box_encodings, objectness_predictions)
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    (box_encodings, objectness_predictions) = self.execute(graph_fn,
                                                           [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])
    self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) 
Example #14
Source File: convolutional_keras_box_predictor_test.py    From vehicle_counting_tensorflow with MIT License 5 votes vote down vote up
def test_get_multi_class_predictions_for_five_aspect_ratios_per_location(
      self):
    num_classes_without_background = 6
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    def graph_fn(image_features):
      conv_box_predictor = (
          box_predictor_builder.build_convolutional_keras_box_predictor(
              is_training=False,
              num_classes=num_classes_without_background,
              conv_hyperparams=self._build_conv_hyperparams(),
              freeze_batchnorm=False,
              inplace_batchnorm_update=False,
              num_predictions_per_location_list=[5],
              min_depth=0,
              max_depth=32,
              num_layers_before_predictor=1,
              use_dropout=True,
              dropout_keep_prob=0.8,
              kernel_size=1,
              box_code_size=4
          ))
      box_predictions = conv_box_predictor([image_features])
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      class_predictions_with_background = tf.concat(
          box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
          axis=1)
      return (box_encodings, class_predictions_with_background)
    (box_encodings,
     class_predictions_with_background) = self.execute(graph_fn,
                                                       [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])
    self.assertAllEqual(class_predictions_with_background.shape,
                        [4, 320, num_classes_without_background+1]) 
Example #15
Source File: convolutional_keras_box_predictor_test.py    From vehicle_counting_tensorflow with MIT License 5 votes vote down vote up
def test_get_boxes_for_five_aspect_ratios_per_location(self):
    def graph_fn(image_features):
      conv_box_predictor = (
          box_predictor_builder.build_convolutional_keras_box_predictor(
              is_training=False,
              num_classes=0,
              conv_hyperparams=self._build_conv_hyperparams(),
              freeze_batchnorm=False,
              inplace_batchnorm_update=False,
              num_predictions_per_location_list=[5],
              min_depth=0,
              max_depth=32,
              num_layers_before_predictor=1,
              use_dropout=True,
              dropout_keep_prob=0.8,
              kernel_size=1,
              box_code_size=4
          ))
      box_predictions = conv_box_predictor([image_features])
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      objectness_predictions = tf.concat(
          box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
          axis=1)
      return (box_encodings, objectness_predictions)
    image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
    (box_encodings, objectness_predictions) = self.execute(graph_fn,
                                                           [image_features])
    self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])
    self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) 
Example #16
Source File: convolutional_keras_box_predictor_test.py    From MAX-Object-Detector with Apache License 2.0 4 votes vote down vote up
def test_get_predictions_with_feature_maps_of_dynamic_shape(
      self):
    image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64])
    conv_box_predictor = (
        box_predictor_builder.build_convolutional_keras_box_predictor(
            is_training=False,
            num_classes=0,
            conv_hyperparams=self._build_conv_hyperparams(),
            freeze_batchnorm=False,
            inplace_batchnorm_update=False,
            num_predictions_per_location_list=[5],
            min_depth=0,
            max_depth=32,
            num_layers_before_predictor=1,
            use_dropout=True,
            dropout_keep_prob=0.8,
            kernel_size=1,
            box_code_size=4
        ))
    box_predictions = conv_box_predictor([image_features])
    box_encodings = tf.concat(
        box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
    objectness_predictions = tf.concat(
        box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
        axis=1)
    init_op = tf.global_variables_initializer()

    resolution = 32
    expected_num_anchors = resolution*resolution*5
    with self.test_session() as sess:
      sess.run(init_op)
      (box_encodings_shape,
       objectness_predictions_shape) = sess.run(
           [tf.shape(box_encodings), tf.shape(objectness_predictions)],
           feed_dict={image_features:
                      np.random.rand(4, resolution, resolution, 64)})
      actual_variable_set = set(
          [var.op.name for var in tf.trainable_variables()])
      self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4])
      self.assertAllEqual(objectness_predictions_shape,
                          [4, expected_num_anchors, 1])
    expected_variable_set = set([
        'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/bias',
        'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/kernel',
        'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/bias',
        'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/kernel',
        'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/bias',
        'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/kernel'])
    self.assertEqual(expected_variable_set, actual_variable_set)
    self.assertEqual(conv_box_predictor._sorted_head_names,
                     ['box_encodings', 'class_predictions_with_background'])

  # TODO(kaftan): Remove conditional after CMLE moves to TF 1.10 
Example #17
Source File: convolutional_keras_box_predictor_test.py    From g-tensorflow-models with Apache License 2.0 4 votes vote down vote up
def test_get_predictions_with_feature_maps_of_dynamic_shape(
      self):
    image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64])
    conv_box_predictor = (
        box_predictor_builder.build_convolutional_keras_box_predictor(
            is_training=False,
            num_classes=0,
            conv_hyperparams=self._build_conv_hyperparams(),
            freeze_batchnorm=False,
            inplace_batchnorm_update=False,
            num_predictions_per_location_list=[5],
            min_depth=0,
            max_depth=32,
            num_layers_before_predictor=1,
            use_dropout=True,
            dropout_keep_prob=0.8,
            kernel_size=1,
            box_code_size=4
        ))
    box_predictions = conv_box_predictor([image_features])
    box_encodings = tf.concat(
        box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
    objectness_predictions = tf.concat(
        box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
        axis=1)
    init_op = tf.global_variables_initializer()

    resolution = 32
    expected_num_anchors = resolution*resolution*5
    with self.test_session() as sess:
      sess.run(init_op)
      (box_encodings_shape,
       objectness_predictions_shape) = sess.run(
           [tf.shape(box_encodings), tf.shape(objectness_predictions)],
           feed_dict={image_features:
                      np.random.rand(4, resolution, resolution, 64)})
      actual_variable_set = set(
          [var.op.name for var in tf.trainable_variables()])
      self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4])
      self.assertAllEqual(objectness_predictions_shape,
                          [4, expected_num_anchors, 1])
    expected_variable_set = set([
        'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/bias',
        'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/kernel',
        'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/bias',
        'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/kernel',
        'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/bias',
        'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/kernel'])
    self.assertEqual(expected_variable_set, actual_variable_set)
    self.assertEqual(conv_box_predictor._sorted_head_names,
                     ['box_encodings', 'class_predictions_with_background'])

  # TODO(kaftan): Remove conditional after CMLE moves to TF 1.10 
Example #18
Source File: convolutional_keras_box_predictor_tf2_test.py    From models with Apache License 2.0 4 votes vote down vote up
def test_get_predictions_with_feature_maps_of_dynamic_shape(
      self):
    tf.keras.backend.clear_session()
    conv_box_predictor = (
        box_predictor_builder.build_convolutional_keras_box_predictor(
            is_training=False,
            num_classes=0,
            conv_hyperparams=self._build_conv_hyperparams(),
            freeze_batchnorm=False,
            inplace_batchnorm_update=False,
            num_predictions_per_location_list=[5],
            min_depth=0,
            max_depth=32,
            num_layers_before_predictor=1,
            use_dropout=True,
            dropout_keep_prob=0.8,
            kernel_size=1,
            box_code_size=4
        ))
    variables = []
    def graph_fn(image_features):
      box_predictions = conv_box_predictor([image_features])
      variables.extend(list(conv_box_predictor.variables))
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      objectness_predictions = tf.concat(
          box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
          axis=1)
      return box_encodings, objectness_predictions
    resolution = 32
    expected_num_anchors = resolution*resolution*5
    box_encodings, objectness_predictions = self.execute(
        graph_fn, [np.random.rand(4, resolution, resolution, 64)])

    actual_variable_set = set([var.name.split(':')[0] for var in variables])
    self.assertAllEqual(box_encodings.shape, [4, expected_num_anchors, 1, 4])
    self.assertAllEqual(objectness_predictions.shape,
                        [4, expected_num_anchors, 1])
    expected_variable_set = set([
        'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/bias',
        'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/kernel',
        'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/bias',
        'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/kernel',
        'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/bias',
        'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/kernel'])
    self.assertEqual(expected_variable_set, actual_variable_set)
    self.assertEqual(conv_box_predictor._sorted_head_names,
                     ['box_encodings', 'class_predictions_with_background']) 
Example #19
Source File: convolutional_keras_box_predictor_tf2_test.py    From models with Apache License 2.0 4 votes vote down vote up
def test_use_depthwise_convolution(self):
    tf.keras.backend.clear_session()
    conv_box_predictor = (
        box_predictor_builder.build_convolutional_keras_box_predictor(
            is_training=False,
            num_classes=0,
            conv_hyperparams=self._build_conv_hyperparams(),
            freeze_batchnorm=False,
            inplace_batchnorm_update=False,
            num_predictions_per_location_list=[5],
            min_depth=0,
            max_depth=32,
            num_layers_before_predictor=1,
            use_dropout=True,
            dropout_keep_prob=0.8,
            kernel_size=3,
            box_code_size=4,
            use_depthwise=True
        ))
    variables = []
    def graph_fn(image_features):
      box_predictions = conv_box_predictor([image_features])
      variables.extend(list(conv_box_predictor.variables))
      box_encodings = tf.concat(
          box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
      objectness_predictions = tf.concat(
          box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
          axis=1)
      return box_encodings, objectness_predictions

    resolution = 32
    expected_num_anchors = resolution*resolution*5
    box_encodings, objectness_predictions = self.execute(
        graph_fn, [np.random.rand(4, resolution, resolution, 64)])

    actual_variable_set = set([var.name.split(':')[0] for var in variables])
    self.assertAllEqual(box_encodings.shape, [4, expected_num_anchors, 1, 4])
    self.assertAllEqual(objectness_predictions.shape,
                        [4, expected_num_anchors, 1])
    expected_variable_set = set([
        'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/bias',
        'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/kernel',

        'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor_depthwise/'
        'bias',

        'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor_depthwise/'
        'depthwise_kernel',

        'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/bias',
        'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/kernel',
        'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor_depthwise/bias',

        'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor_depthwise/'
        'depthwise_kernel',

        'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/bias',
        'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/kernel'])
    self.assertEqual(expected_variable_set, actual_variable_set)
    self.assertEqual(conv_box_predictor._sorted_head_names,
                     ['box_encodings', 'class_predictions_with_background']) 
Example #20
Source File: convolutional_keras_box_predictor_test.py    From vehicle_counting_tensorflow with MIT License 4 votes vote down vote up
def test_get_predictions_with_feature_maps_of_dynamic_shape(
      self):
    image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64])
    conv_box_predictor = (
        box_predictor_builder.build_convolutional_keras_box_predictor(
            is_training=False,
            num_classes=0,
            conv_hyperparams=self._build_conv_hyperparams(),
            freeze_batchnorm=False,
            inplace_batchnorm_update=False,
            num_predictions_per_location_list=[5],
            min_depth=0,
            max_depth=32,
            num_layers_before_predictor=1,
            use_dropout=True,
            dropout_keep_prob=0.8,
            kernel_size=1,
            box_code_size=4
        ))
    box_predictions = conv_box_predictor([image_features])
    box_encodings = tf.concat(
        box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
    objectness_predictions = tf.concat(
        box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
        axis=1)
    init_op = tf.global_variables_initializer()

    resolution = 32
    expected_num_anchors = resolution*resolution*5
    with self.test_session() as sess:
      sess.run(init_op)
      (box_encodings_shape,
       objectness_predictions_shape) = sess.run(
           [tf.shape(box_encodings), tf.shape(objectness_predictions)],
           feed_dict={image_features:
                      np.random.rand(4, resolution, resolution, 64)})
      actual_variable_set = set(
          [var.op.name for var in tf.trainable_variables()])
      self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4])
      self.assertAllEqual(objectness_predictions_shape,
                          [4, expected_num_anchors, 1])
    expected_variable_set = set([
        'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/bias',
        'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/kernel',
        'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/bias',
        'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/kernel',
        'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/bias',
        'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/kernel'])
    self.assertEqual(expected_variable_set, actual_variable_set)

  # TODO(kaftan): Remove conditional after CMLE moves to TF 1.10 
Example #21
Source File: convolutional_keras_box_predictor_test.py    From multilabel-image-classification-tensorflow with MIT License 4 votes vote down vote up
def test_get_predictions_with_feature_maps_of_dynamic_shape(
      self):
    image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64])
    conv_box_predictor = (
        box_predictor_builder.build_convolutional_keras_box_predictor(
            is_training=False,
            num_classes=0,
            conv_hyperparams=self._build_conv_hyperparams(),
            freeze_batchnorm=False,
            inplace_batchnorm_update=False,
            num_predictions_per_location_list=[5],
            min_depth=0,
            max_depth=32,
            num_layers_before_predictor=1,
            use_dropout=True,
            dropout_keep_prob=0.8,
            kernel_size=1,
            box_code_size=4
        ))
    box_predictions = conv_box_predictor([image_features])
    box_encodings = tf.concat(
        box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
    objectness_predictions = tf.concat(
        box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
        axis=1)
    init_op = tf.global_variables_initializer()

    resolution = 32
    expected_num_anchors = resolution*resolution*5
    with self.test_session() as sess:
      sess.run(init_op)
      (box_encodings_shape,
       objectness_predictions_shape) = sess.run(
           [tf.shape(box_encodings), tf.shape(objectness_predictions)],
           feed_dict={image_features:
                      np.random.rand(4, resolution, resolution, 64)})
      actual_variable_set = set(
          [var.op.name for var in tf.trainable_variables()])
      self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4])
      self.assertAllEqual(objectness_predictions_shape,
                          [4, expected_num_anchors, 1])
    expected_variable_set = set([
        'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/bias',
        'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/kernel',
        'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/bias',
        'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/kernel',
        'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/bias',
        'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/kernel'])
    self.assertEqual(expected_variable_set, actual_variable_set)

  # TODO(kaftan): Remove conditional after CMLE moves to TF 1.10