Python tensorflow.no_regularizer() Examples

The following are 7 code examples of tensorflow.no_regularizer(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: base_memory.py    From iter-reason with MIT License 6 votes vote down vote up
def _mem_init(self, is_training, name):
    mem_initializer = tf.constant_initializer(0.0)
    # Kinda like bias
    if cfg.TRAIN.BIAS_DECAY:
      mem_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY)
    else:
      mem_regularizer = tf.no_regularizer

    with tf.variable_scope('SMN'):
      with tf.variable_scope(name):
        mem_init = tf.get_variable('mem_init', 
                                   [1, cfg.MEM.INIT_H, cfg.MEM.INIT_W, cfg.MEM.C], 
                                   initializer=mem_initializer, 
                                   trainable=is_training,
                                   regularizer=mem_regularizer)
        self._score_summaries[0].append(mem_init)
        # resize it to the image-specific size
        mem_init = tf.image.resize_bilinear(mem_init, self._memory_size, 
                                            name="resize_init")

    return mem_init 
Example #2
Source File: vgg.py    From mtl-ssl with Apache License 2.0 6 votes vote down vote up
def vgg_arg_scope(weight_decay=0.0005):
  """Defines the VGG arg scope.

  Args:
    weight_decay: The l2 regularization coefficient.

  Returns:
    An arg_scope.
  """
  with slim.arg_scope([slim.conv2d, slim.fully_connected],
                      activation_fn=tf.nn.relu,
                      weights_regularizer=slim.l2_regularizer(weight_decay),
                      biases_regularizer=tf.no_regularizer,
                      biases_initializer=tf.zeros_initializer()):
    with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
      return arg_sc 
Example #3
Source File: variable_scope_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testVarScopeRegularizer(self):
    with self.test_session() as sess:
      init = tf.constant_initializer(0.3)
      def regularizer1(v):
        return tf.reduce_mean(v) + 0.1
      def regularizer2(v):
        return tf.reduce_mean(v) + 0.2
      with tf.variable_scope("tower", regularizer=regularizer1) as tower:
        with tf.variable_scope("foo", initializer=init):
          v = tf.get_variable("v", [])
          sess.run(tf.initialize_variables([v]))
          losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
          self.assertEqual(1, len(losses))
          self.assertAllClose(losses[0].eval(), 0.4)
        with tf.variable_scope(tower, initializer=init) as vs:
          u = tf.get_variable("u", [])
          vs.set_regularizer(regularizer2)
          w = tf.get_variable("w", [])
          # Next 3 variable not regularized to test disabling regularization.
          x = tf.get_variable("x", [], regularizer=tf.no_regularizer)
          with tf.variable_scope("baz", regularizer=tf.no_regularizer):
            y = tf.get_variable("y", [])
          vs.set_regularizer(tf.no_regularizer)
          z = tf.get_variable("z", [])
          # Check results.
          losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
          self.assertEqual(3, len(losses))
          sess.run(tf.initialize_variables([u, w, x, y, z]))
          self.assertAllClose(losses[0].eval(), 0.4)
          self.assertAllClose(losses[1].eval(), 0.4)
          self.assertAllClose(losses[2].eval(), 0.5)
        with tf.variable_scope("foo", reuse=True):
          v = tf.get_variable("v", [])  # "v" is alredy there, reused
          losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
          self.assertEqual(3, len(losses))  # No new loss added. 
Example #4
Source File: network.py    From MSDS-RCNN with MIT License 4 votes vote down vote up
def create_architecture_demo(self, mode, num_classes, tag=None,
                          anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2)):
    assert mode == 'TEST', 'only for demo'

    self._image = tf.placeholder(tf.float32, shape=[1, None, None, 3])
    self._lwir = tf.placeholder(tf.float32, shape=[1, None, None, 3])
    self._im_info = tf.placeholder(tf.float32, shape=[3])
    self._tag = tag

    self._num_classes = num_classes
    self._mode = mode
    self._anchor_scales = anchor_scales
    self._num_scales = len(anchor_scales)

    self._anchor_ratios = anchor_ratios
    self._num_ratios = len(anchor_ratios)

    self._num_anchors = self._num_scales * self._num_ratios

    training = mode == 'TRAIN'
    testing = mode == 'TEST'

    assert tag != None

    # handle most of the regularizers here
    weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY)
    if cfg.TRAIN.BIAS_DECAY:
      biases_regularizer = weights_regularizer
    else:
      biases_regularizer = tf.no_regularizer

    # select initializers
    if cfg.TRAIN.TRUNCATED:
      initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.01)
      initializer_bbox = tf.truncated_normal_initializer(mean=0.0, stddev=0.001)
    else:
      initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)
      initializer_bbox = tf.random_normal_initializer(mean=0.0, stddev=0.001)

    # list as many types of layers as possible, even if they are not used now
    with arg_scope([slim.conv2d, slim.conv2d_in_plane,
                    slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected], 
                    weights_regularizer=weights_regularizer,
                    biases_regularizer=biases_regularizer, 
                    biases_initializer=tf.constant_initializer(0.0)):
      self._build_network(training, initializer, initializer_bbox)

    layers_to_output = {}

    return layers_to_output 
Example #5
Source File: network.py    From iter-reason with MIT License 4 votes vote down vote up
def create_architecture(self, mode, num_classes, tag=None):
    self._image = tf.placeholder(tf.float32, shape=[1, None, None, 3])
    self._im_info = tf.placeholder(tf.float32, shape=[3])
    self._memory_size = tf.placeholder(tf.int32, shape=[2])
    self._gt_boxes = tf.placeholder(tf.float32, shape=[None, 5])
    self._num_gt = tf.placeholder(tf.int32, shape=[])
    self._tag = tag

    self._num_classes = num_classes
    self._mode = mode

    training = mode == 'TRAIN'
    testing = mode == 'TEST'

    assert tag is not None

    # handle most of the regularizers here
    weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY)
    if cfg.TRAIN.BIAS_DECAY:
      biases_regularizer = weights_regularizer
    else:
      biases_regularizer = tf.no_regularizer

    # list as many types of layers as possible, even if they are not used now
    with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane, \
                    slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected], 
                    weights_regularizer=weights_regularizer,
                    biases_regularizer=biases_regularizer, 
                    biases_initializer=tf.constant_initializer(0.0)): 
      rois, cls_prob = self._build_network(training)

    layers_to_output = {'rois': rois}

    if not testing:
      self._add_losses()
      layers_to_output.update(self._losses)
      val_summaries = []
      with tf.device("/cpu:0"):
        val_summaries.append(self._add_gt_image_summary())
        val_summaries.append(self._add_pred_summary())
        for key, var in self._event_summaries.items():
          val_summaries.append(tf.summary.scalar(key, var))
        for key, var in self._score_summaries.items():
          self._add_score_summary(key, var)
        for var in self._act_summaries:
          self._add_act_summary(var)

      self._summary_op = tf.summary.merge_all()
      self._summary_op_val = tf.summary.merge(val_summaries)

    layers_to_output.update(self._predictions)

    return layers_to_output 
Example #6
Source File: base_memory.py    From iter-reason with MIT License 4 votes vote down vote up
def create_architecture(self, mode, num_classes, tag=None):
    self._image = tf.placeholder(tf.float32, shape=[1, None, None, 3])
    self._im_info = tf.placeholder(tf.float32, shape=[3])
    self._memory_size = tf.placeholder(tf.int32, shape=[2])
    self._gt_boxes = tf.placeholder(tf.float32, shape=[None, 5])
    self._count_base = tf.ones([1, cfg.MEM.CROP_SIZE, cfg.MEM.CROP_SIZE, 1])
    self._num_gt = tf.placeholder(tf.int32, shape=[])
    self._tag = tag

    self._num_classes = num_classes
    self._mode = mode

    training = mode == 'TRAIN'
    testing = mode == 'TEST'

    assert tag is not None

    # handle most of the regularizers here
    weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY)
    if cfg.TRAIN.BIAS_DECAY:
      biases_regularizer = weights_regularizer
    else:
      biases_regularizer = tf.no_regularizer

    # list as many types of layers as possible, even if they are not used now
    with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane, \
                        slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected], 
                        weights_regularizer=weights_regularizer,
                        biases_regularizer=biases_regularizer, 
                        biases_initializer=tf.constant_initializer(0.0)): 
      rois = self._build_memory(training, testing)

    layers_to_output = {'rois': rois}

    if not testing:
      self._add_memory_losses("loss")
      layers_to_output.update(self._losses)
      self._create_summary()

    layers_to_output.update(self._predictions)

    return layers_to_output

  # take the last predicted output 
Example #7
Source File: base.py    From PoseFix_RELEASE with MIT License 4 votes vote down vote up
def _make_graph(self):
        self.logger.info("Generating training graph on {} GPUs ...".format(self.cfg.num_gpus))

        weights_initializer = slim.xavier_initializer()
        biases_initializer = tf.constant_initializer(0.)
        biases_regularizer = tf.no_regularizer
        weights_regularizer = tf.contrib.layers.l2_regularizer(self.cfg.weight_decay)

        tower_grads = []
        with tf.variable_scope(tf.get_variable_scope()):
            for i in range(self.cfg.num_gpus):
                with tf.device('/gpu:%d' % i):
                    with tf.name_scope('tower_%d' % i) as name_scope:
                        # Force all Variables to reside on the CPU.
                        with slim.arg_scope([slim.model_variable, slim.variable], device='/device:CPU:0'):
                            with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane, \
                                                 slim.conv2d_transpose, slim.separable_conv2d,
                                                 slim.fully_connected],
                                                weights_regularizer=weights_regularizer,
                                                biases_regularizer=biases_regularizer,
                                                weights_initializer=weights_initializer,
                                                biases_initializer=biases_initializer):
                                # loss over single GPU
                                self.net.make_network(is_train=True)
                                if i == self.cfg.num_gpus - 1:
                                    loss = self.net.get_loss(include_wd=True)
                                else:
                                    loss = self.net.get_loss()
                                self._input_list.append( self.net.get_inputs() )

                        tf.get_variable_scope().reuse_variables()

                        if i == 0:
                            if self.cfg.num_gpus > 1 and self.cfg.bn_train is True:
                                self.logger.warning("BN is calculated only on single GPU.")
                            extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope)
                            with tf.control_dependencies(extra_update_ops):
                                grads = self._optimizer.compute_gradients(loss)
                        else:
                            grads = self._optimizer.compute_gradients(loss)
                        final_grads = []
                        with tf.variable_scope('Gradient_Mult') as scope:
                            for grad, var in grads:
                                final_grads.append((grad, var))
                        tower_grads.append(final_grads)

        if len(tower_grads) > 1:
            grads = average_gradients(tower_grads)
        else:
            grads = tower_grads[0]

        apply_gradient_op = self._optimizer.apply_gradients(grads)
        train_op = tf.group(apply_gradient_op, *extra_update_ops)

        return train_op