Python tensorflow.contrib.slim.conv2d_in_plane() Examples

The following are 5 code examples of tensorflow.contrib.slim.conv2d_in_plane(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.contrib.slim , or try the search function .
Example #1
Source File: test_tf_converter.py    From tf-coreml with Apache License 2.0 5 votes vote down vote up
def test_slim_plane_conv(self):
    graph = tf.Graph()
    with graph.as_default() as g:
      inputs = tf.placeholder(tf.float32, shape=[None,16,16,3],
          name='test_slim_plane_conv2d/input')
      with slim.arg_scope([slim.separable_conv2d], padding='SAME',
          weights_initializer=tf.truncated_normal_initializer(stddev=0.3)):
        net = slim.conv2d_in_plane(inputs, 2, [3, 3], scope='conv1')

    output_name = [net.op.name]
    self._test_tf_model(graph,
        {"test_slim_plane_conv2d/input:0":[1,16,16,3]},
        output_name, delta=1e-2)

  # TODO - this fails due to unsupported op "Tile" 
Example #2
Source File: network.py    From MSDS-RCNN with MIT License 4 votes vote down vote up
def create_architecture_demo(self, mode, num_classes, tag=None,
                          anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2)):
    assert mode == 'TEST', 'only for demo'

    self._image = tf.placeholder(tf.float32, shape=[1, None, None, 3])
    self._lwir = tf.placeholder(tf.float32, shape=[1, None, None, 3])
    self._im_info = tf.placeholder(tf.float32, shape=[3])
    self._tag = tag

    self._num_classes = num_classes
    self._mode = mode
    self._anchor_scales = anchor_scales
    self._num_scales = len(anchor_scales)

    self._anchor_ratios = anchor_ratios
    self._num_ratios = len(anchor_ratios)

    self._num_anchors = self._num_scales * self._num_ratios

    training = mode == 'TRAIN'
    testing = mode == 'TEST'

    assert tag != None

    # handle most of the regularizers here
    weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY)
    if cfg.TRAIN.BIAS_DECAY:
      biases_regularizer = weights_regularizer
    else:
      biases_regularizer = tf.no_regularizer

    # select initializers
    if cfg.TRAIN.TRUNCATED:
      initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.01)
      initializer_bbox = tf.truncated_normal_initializer(mean=0.0, stddev=0.001)
    else:
      initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)
      initializer_bbox = tf.random_normal_initializer(mean=0.0, stddev=0.001)

    # list as many types of layers as possible, even if they are not used now
    with arg_scope([slim.conv2d, slim.conv2d_in_plane,
                    slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected], 
                    weights_regularizer=weights_regularizer,
                    biases_regularizer=biases_regularizer, 
                    biases_initializer=tf.constant_initializer(0.0)):
      self._build_network(training, initializer, initializer_bbox)

    layers_to_output = {}

    return layers_to_output 
Example #3
Source File: network.py    From iter-reason with MIT License 4 votes vote down vote up
def create_architecture(self, mode, num_classes, tag=None):
    self._image = tf.placeholder(tf.float32, shape=[1, None, None, 3])
    self._im_info = tf.placeholder(tf.float32, shape=[3])
    self._memory_size = tf.placeholder(tf.int32, shape=[2])
    self._gt_boxes = tf.placeholder(tf.float32, shape=[None, 5])
    self._num_gt = tf.placeholder(tf.int32, shape=[])
    self._tag = tag

    self._num_classes = num_classes
    self._mode = mode

    training = mode == 'TRAIN'
    testing = mode == 'TEST'

    assert tag is not None

    # handle most of the regularizers here
    weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY)
    if cfg.TRAIN.BIAS_DECAY:
      biases_regularizer = weights_regularizer
    else:
      biases_regularizer = tf.no_regularizer

    # list as many types of layers as possible, even if they are not used now
    with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane, \
                    slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected], 
                    weights_regularizer=weights_regularizer,
                    biases_regularizer=biases_regularizer, 
                    biases_initializer=tf.constant_initializer(0.0)): 
      rois, cls_prob = self._build_network(training)

    layers_to_output = {'rois': rois}

    if not testing:
      self._add_losses()
      layers_to_output.update(self._losses)
      val_summaries = []
      with tf.device("/cpu:0"):
        val_summaries.append(self._add_gt_image_summary())
        val_summaries.append(self._add_pred_summary())
        for key, var in self._event_summaries.items():
          val_summaries.append(tf.summary.scalar(key, var))
        for key, var in self._score_summaries.items():
          self._add_score_summary(key, var)
        for var in self._act_summaries:
          self._add_act_summary(var)

      self._summary_op = tf.summary.merge_all()
      self._summary_op_val = tf.summary.merge(val_summaries)

    layers_to_output.update(self._predictions)

    return layers_to_output 
Example #4
Source File: base_memory.py    From iter-reason with MIT License 4 votes vote down vote up
def create_architecture(self, mode, num_classes, tag=None):
    self._image = tf.placeholder(tf.float32, shape=[1, None, None, 3])
    self._im_info = tf.placeholder(tf.float32, shape=[3])
    self._memory_size = tf.placeholder(tf.int32, shape=[2])
    self._gt_boxes = tf.placeholder(tf.float32, shape=[None, 5])
    self._count_base = tf.ones([1, cfg.MEM.CROP_SIZE, cfg.MEM.CROP_SIZE, 1])
    self._num_gt = tf.placeholder(tf.int32, shape=[])
    self._tag = tag

    self._num_classes = num_classes
    self._mode = mode

    training = mode == 'TRAIN'
    testing = mode == 'TEST'

    assert tag is not None

    # handle most of the regularizers here
    weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY)
    if cfg.TRAIN.BIAS_DECAY:
      biases_regularizer = weights_regularizer
    else:
      biases_regularizer = tf.no_regularizer

    # list as many types of layers as possible, even if they are not used now
    with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane, \
                        slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected], 
                        weights_regularizer=weights_regularizer,
                        biases_regularizer=biases_regularizer, 
                        biases_initializer=tf.constant_initializer(0.0)): 
      rois = self._build_memory(training, testing)

    layers_to_output = {'rois': rois}

    if not testing:
      self._add_memory_losses("loss")
      layers_to_output.update(self._losses)
      self._create_summary()

    layers_to_output.update(self._predictions)

    return layers_to_output

  # take the last predicted output 
Example #5
Source File: base.py    From PoseFix_RELEASE with MIT License 4 votes vote down vote up
def _make_graph(self):
        self.logger.info("Generating training graph on {} GPUs ...".format(self.cfg.num_gpus))

        weights_initializer = slim.xavier_initializer()
        biases_initializer = tf.constant_initializer(0.)
        biases_regularizer = tf.no_regularizer
        weights_regularizer = tf.contrib.layers.l2_regularizer(self.cfg.weight_decay)

        tower_grads = []
        with tf.variable_scope(tf.get_variable_scope()):
            for i in range(self.cfg.num_gpus):
                with tf.device('/gpu:%d' % i):
                    with tf.name_scope('tower_%d' % i) as name_scope:
                        # Force all Variables to reside on the CPU.
                        with slim.arg_scope([slim.model_variable, slim.variable], device='/device:CPU:0'):
                            with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane, \
                                                 slim.conv2d_transpose, slim.separable_conv2d,
                                                 slim.fully_connected],
                                                weights_regularizer=weights_regularizer,
                                                biases_regularizer=biases_regularizer,
                                                weights_initializer=weights_initializer,
                                                biases_initializer=biases_initializer):
                                # loss over single GPU
                                self.net.make_network(is_train=True)
                                if i == self.cfg.num_gpus - 1:
                                    loss = self.net.get_loss(include_wd=True)
                                else:
                                    loss = self.net.get_loss()
                                self._input_list.append( self.net.get_inputs() )

                        tf.get_variable_scope().reuse_variables()

                        if i == 0:
                            if self.cfg.num_gpus > 1 and self.cfg.bn_train is True:
                                self.logger.warning("BN is calculated only on single GPU.")
                            extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope)
                            with tf.control_dependencies(extra_update_ops):
                                grads = self._optimizer.compute_gradients(loss)
                        else:
                            grads = self._optimizer.compute_gradients(loss)
                        final_grads = []
                        with tf.variable_scope('Gradient_Mult') as scope:
                            for grad, var in grads:
                                final_grads.append((grad, var))
                        tower_grads.append(final_grads)

        if len(tower_grads) > 1:
            grads = average_gradients(tower_grads)
        else:
            grads = tower_grads[0]

        apply_gradient_op = self._optimizer.apply_gradients(grads)
        train_op = tf.group(apply_gradient_op, *extra_update_ops)

        return train_op