Python tensorflow.contrib.layers.l2_regularizer() Examples

The following are 30 code examples of tensorflow.contrib.layers.l2_regularizer(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.contrib.layers , or try the search function .
Example #1
Source File: medgan.py    From medgan with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def getDiscriminatorResults(self, x_input, keepRate, reuse=False):
        batchSize = tf.shape(x_input)[0]
        inputMean = tf.reshape(tf.tile(tf.reduce_mean(x_input,0), [batchSize]), (batchSize, self.inputDim))
        tempVec = tf.concat([x_input, inputMean], 1)
        tempDim = self.inputDim * 2
        with tf.variable_scope('discriminator', reuse=reuse, regularizer=l2_regularizer(self.l2scale)):
            for i, discDim in enumerate(self.discriminatorDims[:-1]):
                W = tf.get_variable('W_'+str(i), shape=[tempDim, discDim])
                b = tf.get_variable('b_'+str(i), shape=[discDim])
                h = self.discriminatorActivation(tf.add(tf.matmul(tempVec,W),b))
                h = tf.nn.dropout(h, keepRate)
                tempVec = h
                tempDim = discDim
            W = tf.get_variable('W', shape=[tempDim, 1])
            b = tf.get_variable('b', shape=[1])
            y_hat = tf.squeeze(tf.nn.sigmoid(tf.add(tf.matmul(tempVec, W), b)))
        return y_hat 
Example #2
Source File: mru.py    From SketchySceneColorization with MIT License 6 votes vote down vote up
def embed_labels(inputs, num_classes, output_dim, sn,
                 weight_decay_rate=1e-5,
                 reuse=None, scope=None):
    # TODO move regularizer definitions to model
    weights_regularizer = ly.l2_regularizer(weight_decay_rate)

    with tf.variable_scope(scope, 'embedding', [inputs], reuse=reuse) as sc:
        inputs = tf.convert_to_tensor(inputs)

        weights = tf.get_variable(name="weights", shape=(num_classes, output_dim),
                                  initializer=init_ops.random_normal_initializer)

        # Spectral Normalization
        if sn:
            weights = spectral_normed_weight(weights, num_iters=1, update_collection=Config.SPECTRAL_NORM_UPDATE_OPS)

        embed_out = tf.nn.embedding_lookup(weights, inputs)

    return embed_out 
Example #3
Source File: film_resnet_model.py    From tensor2robot with Apache License 2.0 6 votes vote down vote up
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format,
                         weight_decay):
  """Strided 2-D convolution with explicit padding."""
  # The padding is consistent and is based only on `kernel_size`, not on the
  # dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
  if strides > 1:
    inputs = fixed_padding(inputs, kernel_size, data_format)

  if weight_decay is not None:
    weight_decay = contrib_layers.l2_regularizer(weight_decay)

  return tf.layers.conv2d(
      inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
      padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,
      kernel_initializer=tf.variance_scaling_initializer(),
      kernel_regularizer=weight_decay,
      data_format=data_format) 
Example #4
Source File: hybrid_model.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def __init__(self,
               params,
               device_assigner=None,
               optimizer_class=adagrad.AdagradOptimizer,
               **kwargs):

    self.device_assigner = (
        device_assigner or tensor_forest.RandomForestDeviceAssigner())

    self.params = params

    self.optimizer = optimizer_class(self.params.learning_rate)

    self.is_regression = params.regression

    self.regularizer = None
    if params.regularization == "l1":
      self.regularizer = layers.l1_regularizer(
          self.params.regularization_strength)
    elif params.regularization == "l2":
      self.regularizer = layers.l2_regularizer(
          self.params.regularization_strength) 
Example #5
Source File: build_resnet.py    From tensorflow-litterbox with Apache License 2.0 6 votes vote down vote up
def resnet_arg_scope(
        weight_decay=0.0001,
        batch_norm_decay=0.997,
        batch_norm_epsilon=1e-5,
        batch_norm_scale=True,
):
    batch_norm_params = {
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
    }
    l2_regularizer = layers.l2_regularizer(weight_decay)

    arg_scope_layers = arg_scope(
        [layers.conv2d, my_layers.preact_conv2d, layers.fully_connected],
        weights_initializer=layers.variance_scaling_initializer(),
        weights_regularizer=l2_regularizer,
        activation_fn=tf.nn.relu)
    arg_scope_conv = arg_scope(
        [layers.conv2d, my_layers.preact_conv2d],
        normalizer_fn=layers.batch_norm,
        normalizer_params=batch_norm_params)
    with arg_scope_layers, arg_scope_conv as arg_sc:
        return arg_sc 
Example #6
Source File: hybrid_model.py    From lambda-packs with MIT License 6 votes vote down vote up
def __init__(self,
               params,
               device_assigner=None,
               optimizer_class=adagrad.AdagradOptimizer,
               **kwargs):

    self.device_assigner = (
        device_assigner or framework_variables.VariableDeviceChooser())

    self.params = params

    self.optimizer = optimizer_class(self.params.learning_rate)

    self.is_regression = params.regression

    self.regularizer = None
    if params.regularization == "l1":
      self.regularizer = layers.l1_regularizer(
          self.params.regularization_strength)
    elif params.regularization == "l2":
      self.regularizer = layers.l2_regularizer(
          self.params.regularization_strength) 
Example #7
Source File: hybrid_model.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def __init__(self,
               params,
               device_assigner=None,
               optimizer_class=adagrad.AdagradOptimizer,
               **kwargs):

    self.device_assigner = (
        device_assigner or tensor_forest.RandomForestDeviceAssigner())

    self.params = params

    self.optimizer = optimizer_class(self.params.learning_rate)

    self.is_regression = params.regression

    self.regularizer = None
    if params.regularization == "l1":
      self.regularizer = layers.l1_regularizer(
          self.params.regularization_strength)
    elif params.regularization == "l2":
      self.regularizer = layers.l2_regularizer(
          self.params.regularization_strength) 
Example #8
Source File: layers.py    From tensornets with MIT License 6 votes vote down vote up
def darkconv(*args, **kwargs):
    scope = kwargs.pop('scope', None)
    onlyconv = kwargs.pop('onlyconv', False)
    with tf.variable_scope(scope):
        conv_kwargs = {
            'padding': 'SAME',
            'activation_fn': None,
            'weights_initializer': variance_scaling_initializer(1.53846),
            'weights_regularizer': l2(5e-4),
            'biases_initializer': None,
            'scope': 'conv'}
        if onlyconv:
            conv_kwargs.pop('biases_initializer')
        with arg_scope([conv2d], **conv_kwargs):
            x = conv2d(*args, **kwargs)
            if onlyconv: return x
            x = batch_norm(x, decay=0.99, center=False, scale=True,
                           epsilon=1e-5, scope='bn')
            x = bias_add(x, scope='bias')
            x = leaky_relu(x, alpha=0.1, name='lrelu')
            return x 
Example #9
Source File: resnet.py    From TF_Face_Toolbox with Apache License 2.0 6 votes vote down vote up
def forward(self, images, num_classes=None, is_training=True):
    assert num_classes is not None, 'num_classes must be given when is_training=True'
    # Forward
    features, _ = self.backbone(images, is_training=is_training)
    # Logits
    with tf.variable_scope('classifier'):
      features_drop = layers.dropout(features, keep_prob=0.5, is_training=is_training)
      logit = layers.fully_connected(features_drop, num_classes, activation_fn=None, 
                                     weights_initializer=tf.random_normal_initializer(stddev=0.001),
                                     weights_regularizer=layers.l2_regularizer(self.weight_decay),
                                     biases_initializer=None,
                                     scope='fc_classifier')
    logits = {}
    logits['logits'] = logit

    return logits 
Example #10
Source File: sphere.py    From TF_Face_Toolbox with Apache License 2.0 6 votes vote down vote up
def forward(self, images, num_classes=None, is_training=True):
    if is_training:
      assert num_classes is not None, 'num_classes must be given when is_training=True'
      # Forward
      features = self.backbone(images, is_training=is_training)
      # Logits
      with tf.variable_scope('classifier'):
        print(features)
        logit = layers.fully_connected(features, num_classes, activation_fn=None, 
                                       weights_initializer=tf.random_normal_initializer(stddev=0.001),
                                       weights_regularizer=layers.l2_regularizer(self.weight_decay),
                                       biases_initializer=None,
                                       scope='fc_classifier')
      print(num_classes)
      logits = {}
      logits['logits'] = logit

      return logits
    else:
      features = self.backbone(images, is_training=is_training)
      features_flipped = self.backbone(tf.reverse(images, axis=[2]), is_training=is_training, reuse=True)
      features = (features+features_flipped)/2

      return features 
Example #11
Source File: model_deploy_test.py    From aster with MIT License 6 votes vote down vote up
def testNoSummariesOnGPU(self):
    with tf.Graph().as_default():
      deploy_config = model_deploy.DeploymentConfig(num_clones=2)

      # clone function creates a fully_connected layer with a regularizer loss.
      def ModelFn():
        inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
        reg = layers.l2_regularizer(0.001)
        layers.fully_connected(inputs, 30, weights_regularizer=reg)

      model = model_deploy.deploy(
          deploy_config, ModelFn,
          optimizer=tf.train.GradientDescentOptimizer(1.0))
      # The model summary op should have a few summary inputs and all of them
      # should be on the CPU.
      self.assertTrue(model.summary_op.op.inputs)
      for inp in  model.summary_op.op.inputs:
        self.assertEqual('/device:CPU:0', inp.device) 
Example #12
Source File: model_deploy_test.py    From aster with MIT License 6 votes vote down vote up
def testNoSummariesOnGPUForEvals(self):
    with tf.Graph().as_default():
      deploy_config = model_deploy.DeploymentConfig(num_clones=2)

      # clone function creates a fully_connected layer with a regularizer loss.
      def ModelFn():
        inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
        reg = layers.l2_regularizer(0.001)
        layers.fully_connected(inputs, 30, weights_regularizer=reg)

      # No optimizer here, it's an eval.
      model = model_deploy.deploy(deploy_config, ModelFn)
      # The model summary op should have a few summary inputs and all of them
      # should be on the CPU.
      self.assertTrue(model.summary_op.op.inputs)
      for inp in  model.summary_op.op.inputs:
        self.assertEqual('/device:CPU:0', inp.device) 
Example #13
Source File: hyperparams_builder.py    From aster with MIT License 6 votes vote down vote up
def _build_regularizer(regularizer):
  """Builds a regularizer from config.

  Args:
    regularizer: hyperparams_pb2.Hyperparams.regularizer proto.

  Returns:
    regularizer.

  Raises:
    ValueError: On unknown regularizer.
  """
  regularizer_oneof = regularizer.WhichOneof('regularizer_oneof')
  if  regularizer_oneof == 'l1_regularizer':
    return layers.l1_regularizer(scale=float(regularizer.l1_regularizer.weight))
  if regularizer_oneof == 'l2_regularizer':
    return layers.l2_regularizer(scale=float(regularizer.l2_regularizer.weight))
  raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof)) 
Example #14
Source File: humanEncoder_ablation.py    From Convolutional-Sequence-to-Sequence-Model-for-Human-Dynamics with MIT License 6 votes vote down vote up
def forward(self, dec_in, reuse=False, trainable=True, is_training=True):
        with tf.variable_scope(self.name_scope) as vs:
            if (reuse):
                vs.reuse_variables()
            lrelu = VAE.lrelu

            dec_in_enc = self.encoder.forward(dec_in, reuse=reuse, trainable=trainable, is_training=is_training)


            h0 = tcl.fully_connected(dec_in_enc, 512, scope="fc3", activation_fn=lrelu,
                                     weights_regularizer=tcl.l2_regularizer(self.re_term))

            h0 = tcl.dropout(h0, 0.5, is_training=is_training)

            h0 = tcl.fully_connected(h0, 54, scope="fc4", activation_fn=None,
                                     weights_regularizer=tcl.l2_regularizer(self.re_term), )

            h0 = tf.expand_dims(tf.expand_dims(h0, 1), 3)

            return h0 
Example #15
Source File: humanEncoder_ablation.py    From Convolutional-Sequence-to-Sequence-Model-for-Human-Dynamics with MIT License 6 votes vote down vote up
def forward(self, data_input, input_class,
                reuse=False, trainable=True, is_training=True):
        with tf.variable_scope(self.name_scope) as vs:
            if (reuse):
                vs.reuse_variables()

            dec_in_enc = self.encoder.forward(data_input, reuse=reuse, trainable=trainable, is_training=is_training,
                                              with_batchnorm=False)

            dec_in_enc = tf.nn.relu(dec_in_enc)

            y = tf.concat([dec_in_enc, input_class], 1)

            h0 = tcl.fully_connected(y, self.nfilters * 8, scope="fc1",
                                     weights_regularizer=tcl.l2_regularizer(self.re_term))

            return tcl.fully_connected(h0, 1, activation_fn=None, weights_regularizer=tcl.l2_regularizer(self.re_term)) 
Example #16
Source File: medgan.py    From medgan with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def buildGeneratorTest(self, x_input, bn_train):
        tempVec = x_input
        tempDim = self.randomDim
        with tf.variable_scope('generator', regularizer=l2_regularizer(self.l2scale)):
            for i, genDim in enumerate(self.generatorDims[:-1]):
                W = tf.get_variable('W_'+str(i), shape=[tempDim, genDim])
                h = tf.matmul(tempVec,W)
                h2 = batch_norm(h, decay=self.bnDecay, scale=True, is_training=bn_train, updates_collections=None, trainable=False)
                h3 = self.generatorActivation(h2)
                tempVec = h3 + tempVec
                tempDim = genDim
            W = tf.get_variable('W'+str(i), shape=[tempDim, self.generatorDims[-1]])
            h = tf.matmul(tempVec,W)
            h2 = batch_norm(h, decay=self.bnDecay, scale=True, is_training=bn_train, updates_collections=None, trainable=False)

            if self.dataType == 'binary':
                h3 = tf.nn.tanh(h2)
            else:
                h3 = tf.nn.relu(h2)

            output = h3 + tempVec
        return output 
Example #17
Source File: medgan.py    From medgan with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def buildGenerator(self, x_input, bn_train):
        tempVec = x_input
        tempDim = self.randomDim
        with tf.variable_scope('generator', regularizer=l2_regularizer(self.l2scale)):
            for i, genDim in enumerate(self.generatorDims[:-1]):
                W = tf.get_variable('W_'+str(i), shape=[tempDim, genDim])
                h = tf.matmul(tempVec,W)
                h2 = batch_norm(h, decay=self.bnDecay, scale=True, is_training=bn_train, updates_collections=None)
                h3 = self.generatorActivation(h2)
                tempVec = h3 + tempVec
                tempDim = genDim
            W = tf.get_variable('W'+str(i), shape=[tempDim, self.generatorDims[-1]])
            h = tf.matmul(tempVec,W)
            h2 = batch_norm(h, decay=self.bnDecay, scale=True, is_training=bn_train, updates_collections=None)

            if self.dataType == 'binary':
                h3 = tf.nn.tanh(h2)
            else:
                h3 = tf.nn.relu(h2)

            output = h3 + tempVec
        return output 
Example #18
Source File: hybrid_model.py    From keras-lambda with MIT License 6 votes vote down vote up
def __init__(self,
               params,
               device_assigner=None,
               optimizer_class=adagrad.AdagradOptimizer,
               **kwargs):

    self.device_assigner = (
        device_assigner or tensor_forest.RandomForestDeviceAssigner())

    self.params = params

    self.optimizer = optimizer_class(self.params.learning_rate)

    self.is_regression = params.regression

    self.regularizer = None
    if params.regularization == "l1":
      self.regularizer = layers.l1_regularizer(
          self.params.regularization_strength)
    elif params.regularization == "l2":
      self.regularizer = layers.l2_regularizer(
          self.params.regularization_strength) 
Example #19
Source File: model_deploy_test.py    From DirectML with MIT License 6 votes vote down vote up
def testNoSummariesOnGPUForEvals(self):
    with tf.Graph().as_default():
      deploy_config = model_deploy.DeploymentConfig(num_clones=2)

      # clone function creates a fully_connected layer with a regularizer loss.
      def ModelFn():
        inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
        reg = contrib_layers.l2_regularizer(0.001)
        contrib_layers.fully_connected(inputs, 30, weights_regularizer=reg)

      # No optimizer here, it's an eval.
      model = model_deploy.deploy(deploy_config, ModelFn)
      # The model summary op should have a few summary inputs and all of them
      # should be on the CPU.
      self.assertTrue(model.summary_op.op.inputs)
      for inp in  model.summary_op.op.inputs:
        self.assertEqual('/device:CPU:0', inp.device) 
Example #20
Source File: model_deploy_test.py    From DirectML with MIT License 6 votes vote down vote up
def testNoSummariesOnGPU(self):
    with tf.Graph().as_default():
      deploy_config = model_deploy.DeploymentConfig(num_clones=2)

      # clone function creates a fully_connected layer with a regularizer loss.
      def ModelFn():
        inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
        reg = contrib_layers.l2_regularizer(0.001)
        contrib_layers.fully_connected(inputs, 30, weights_regularizer=reg)

      model = model_deploy.deploy(
          deploy_config, ModelFn,
          optimizer=tf.train.GradientDescentOptimizer(1.0))
      # The model summary op should have a few summary inputs and all of them
      # should be on the CPU.
      self.assertTrue(model.summary_op.op.inputs)
      for inp in  model.summary_op.op.inputs:
        self.assertEqual('/device:CPU:0', inp.device) 
Example #21
Source File: nas_network.py    From models with Apache License 2.0 6 votes vote down vote up
def nas_arg_scope(weight_decay=4e-5,
                  batch_norm_decay=0.9997,
                  batch_norm_epsilon=0.001,
                  sync_batch_norm_method='None'):
  """Default arg scope for the NAS models."""
  batch_norm_params = {
      # Decay for the moving averages.
      'decay': batch_norm_decay,
      # epsilon to prevent 0s in variance.
      'epsilon': batch_norm_epsilon,
      'scale': True,
  }
  batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method)
  weights_regularizer = contrib_layers.l2_regularizer(weight_decay)
  weights_initializer = contrib_layers.variance_scaling_initializer(
      factor=1 / 3.0, mode='FAN_IN', uniform=True)
  with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d],
                 weights_regularizer=weights_regularizer,
                 weights_initializer=weights_initializer):
    with arg_scope([slim.fully_connected],
                   activation_fn=None, scope='FC'):
      with arg_scope([slim.conv2d, slim.separable_conv2d],
                     activation_fn=None, biases_initializer=None):
        with arg_scope([batch_norm], **batch_norm_params) as sc:
          return sc 
Example #22
Source File: MultiVae_Dae.py    From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 6 votes vote down vote up
def build_graph(self):

        self.construct_weights()

        saver, logits = self.forward_pass()
        log_softmax_var = tf.nn.log_softmax(logits)

        # per-user average negative log-likelihood
        neg_ll = -tf.reduce_mean(tf.reduce_sum(
            log_softmax_var * self.input_ph, axis=1))
        # apply regularization to weights
        reg = l2_regularizer(self.lam)
        reg_var = apply_regularization(reg, self.weights)
        # tensorflow l2 regularization multiply 0.5 to the l2 norm
        # multiply 2 so that it is back in the same scale
        loss = neg_ll + 2 * reg_var

        train_op = tf.train.AdamOptimizer(self.lr).minimize(loss)

        # add summary statistics
        tf.summary.scalar('negative_multi_ll', neg_ll)
        tf.summary.scalar('loss', loss)
        merged = tf.summary.merge_all()
        return saver, logits, loss, train_op, merged 
Example #23
Source File: modules.py    From squad-transformer with Apache License 2.0 6 votes vote down vote up
def build_graph(self, q, k, v, seq_len, mask, scope="MultiHeadAttn", reuse=None):
        with tf.variable_scope(scope, reuse=reuse):
            # Project each of q, k, v linearly
            q, k, v = self._project(q, k, v, reuse=reuse)
            # Split each of q, k, v to prepare for scaled dot product in parallel
            q, k, v = self._split(q, k, v, seq_len)
            # Perform scaled dot-product attention on q, k, v
            sdp_attn = ScaledDotProductAttn()
            attn_outputs = sdp_attn.build_graph(q, k, v, mask)
            # Merge the outputs of each head
            attn_outputs = self._concat(attn_outputs, seq_len)
            # Linear transform to project back to model dimension
            attn_outputs = tf_layers.fully_connected(attn_outputs,
                                                     self.d_model,
                                                     biases_initializer=None,
                                                     activation_fn=None,
                                                     weights_regularizer=tf_layers.l2_regularizer(scale=self.l2_lambda),
                                                     scope="OutputTransform",
                                                     reuse=reuse)

        return attn_outputs 
Example #24
Source File: modules.py    From squad-transformer with Apache License 2.0 6 votes vote down vote up
def _project(self, q, k, v, scope="Linearity", reuse=None):
        """Project queries, keys, values with a linear layer.

        Note: We project the inputs for q, k, v *before* splitting to prepare inputs for each head.
        This differs from the order in "Attention Is All You Need," but is functionally equivalent.
        """
        def _project_one(x, d, inner_scope):
            return tf_layers.fully_connected(x, d, activation_fn=None, biases_initializer=None,
                                             weights_regularizer=tf_layers.l2_regularizer(scale=self.l2_lambda),
                                             scope=inner_scope, reuse=reuse)

        with tf.variable_scope(scope, reuse=reuse):
            q_projected = _project_one(q, self.d_model, "q")
            k_projected = _project_one(k, self.d_model, "k")
            v_projected = _project_one(v, self.d_model, "v")

        return q_projected, k_projected, v_projected 
Example #25
Source File: humanEncoder_cmu.py    From Convolutional-Sequence-to-Sequence-Model-for-Human-Dynamics with MIT License 6 votes vote down vote up
def forward(self, decoder_hidden, dec_in, decoder_category, reuse=False, trainable=True, is_training=True):
        with tf.variable_scope(self.name_scope) as vs:
            if (reuse):
                vs.reuse_variables()
            lrelu = VAE.lrelu

            dec_in_enc = self.encoder.forward(dec_in, reuse=reuse, trainable=trainable, is_training=is_training)

            y = tf.concat([decoder_hidden, dec_in_enc], 1)

            h0 = tcl.fully_connected(y, 512, scope="fc3", activation_fn=lrelu,
                                     weights_regularizer=tcl.l2_regularizer(self.re_term))

            h0 = tcl.dropout(h0, 0.5, is_training=is_training)

            h0 = tcl.fully_connected(h0, 70, scope="fc4", activation_fn=None,
                                     weights_regularizer=tcl.l2_regularizer(self.re_term), )

            h0 = tf.expand_dims(tf.expand_dims(h0, 1), 3)

            return h0 
Example #26
Source File: humanEncoder.py    From Convolutional-Sequence-to-Sequence-Model-for-Human-Dynamics with MIT License 6 votes vote down vote up
def forward(self, decoder_hidden, dec_in, decoder_category, reuse=False, trainable=True, is_training=True):
        with tf.variable_scope(self.name_scope) as vs:
            if(reuse):
                vs.reuse_variables()
            lrelu = VAE.lrelu

            dec_in_enc = self.encoder.forward(dec_in, reuse=reuse, trainable=trainable, is_training=is_training)
            
            
            y = tf.concat([decoder_hidden, dec_in_enc], 1)

            h0 = tcl.fully_connected(y, 512, scope="fc3", activation_fn=lrelu, weights_regularizer=tcl.l2_regularizer(self.re_term))

            h0 = tcl.dropout(h0, 0.5, is_training=is_training)

        
            
            h0 = tcl.fully_connected(h0, 54, scope="fc4", activation_fn=None,
                                     weights_regularizer=tcl.l2_regularizer(self.re_term),)

            h0 = tf.expand_dims(tf.expand_dims(h0, 1), 3)

            
            return h0 
Example #27
Source File: conv2d_ws_test.py    From models with Apache License 2.0 5 votes vote down vote up
def testReuseConvWithWD(self):
    height, width = 7, 9
    with self.cached_session():
      images = tf.random_uniform((5, height, width, 3), seed=1)
      weight_decay = contrib_layers.l2_regularizer(0.01)
      with contrib_framework.arg_scope([conv2d_ws.conv2d],
                                       weights_regularizer=weight_decay):
        conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1')
        self.assertEqual(len(contrib_framework.get_variables()), 2)
        self.assertEqual(
            len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
        conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1', reuse=True)
        self.assertEqual(len(contrib_framework.get_variables()), 2)
        self.assertEqual(
            len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1) 
Example #28
Source File: utils.py    From s4l with Apache License 2.0 5 votes vote down vote up
def linear(inputs, num_outputs, name, reuse=tf.AUTO_REUSE, weight_decay="flag"):
  """A linear layer on the inputs."""
  if weight_decay == "flag":
    weight_decay = flags.FLAGS.weight_decay

  kernel_regularizer = l2_regularizer(scale=weight_decay)
  logits = tf.layers.conv2d(
      inputs,
      filters=num_outputs,
      kernel_size=1,
      kernel_regularizer=kernel_regularizer,
      name=name,
      reuse=reuse)

  return tf.squeeze(logits, [1, 2]) 
Example #29
Source File: MultiVae_Dae.py    From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 5 votes vote down vote up
def build_graph(self):
        self._construct_weights()

        saver, logits, KL = self.forward_pass()
        log_softmax_var = tf.nn.log_softmax(logits)

        neg_ll = -tf.reduce_mean(tf.reduce_sum(
            log_softmax_var * self.input_ph,
            axis=-1))
        # apply regularization to weights
        reg = l2_regularizer(self.lam)

        reg_var = apply_regularization(reg, self.weights_q + self.weights_p)
        # tensorflow l2 regularization multiply 0.5 to the l2 norm
        # multiply 2 so that it is back in the same scale
        neg_ELBO = neg_ll + self.anneal_ph * KL + 2 * reg_var

        train_op = tf.train.AdamOptimizer(self.lr).minimize(neg_ELBO)

        # add summary statistics
        tf.summary.scalar('negative_multi_ll', neg_ll)
        tf.summary.scalar('KL', KL)
        tf.summary.scalar('neg_ELBO_train', neg_ELBO)
        merged = tf.summary.merge_all()

        return saver, logits, neg_ELBO, train_op, merged 
Example #30
Source File: off.py    From Optical-Flow-Guided-Feature with MIT License 5 votes vote down vote up
def off_unit_first(feature_t0, feature_t1):
    with tf.variable_scope('off_unit_first', values=[feature_t0, feature_t1]):
        # feature_t0 = batch_norm(feature_t0)
        # feature_t0 = tf.nn.relu(feature_t0)
        feature_t0 = conv2d(feature_t0, _NUM_CHANELS, 1, padding='SAME',
                            # weights_initializer=tf.truncated_normal_initializer(0, 0.01),
                            # biases_initializer=tf.zeros_initializer,
                            # weights_regularizer=l2_regularizer(1e-3),
                            # biases_regularizer=l2_regularizer(0.0001),
                            # normalizer_fn=batch_norm,
                            scope='conv1x1_t0')

        # feature_t1 = batch_norm(feature_t1)
        # feature_t1 = tf.nn.relu(feature_t1)
        feature_t1 = conv2d(feature_t1, _NUM_CHANELS, 1, padding='SAME',
                            # weights_initializer=tf.truncated_normal_initializer(0, 0.01),
                            # biases_initializer=tf.zeros_initializer,
                            # weights_regularizer=l2_regularizer(1e-3),
                            # biases_regularizer=l2_regularizer(0.0001),
                            # normalizer_fn=batch_norm,
                            scope='conv1x1_t1')

        ft = tf.subtract(feature_t0, feature_t1)
        fx, fy = sobel(feature_t0)

        return tf.concat(
            [
                fx,
                fy,
                ft
            ],
            axis=3
        )