Python tensorflow.zeros_initializer() Examples

The following are 30 code examples of tensorflow.zeros_initializer(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: nn_module.py    From tensorflow-XNN with MIT License 6 votes vote down vote up
def _resnet_branch_mode1(x, hidden_units, dropouts, training, seed=0):
    h1, h2, h3 = hidden_units
    dr1, dr2, dr3 = dropouts
    # branch 2
    x2 = tf.layers.Dense(h1, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * 2), dtype=tf.float32,
                         bias_initializer=tf.zeros_initializer())(x)
    x2 = tf.layers.BatchNormalization()(x2)
    x2 = tf.nn.relu(x2)
    x2 = tf.layers.Dropout(dr1, seed=seed * 1)(x2, training=training) if dr1 > 0 else x2

    x2 = tf.layers.Dense(h2, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * 3), dtype=tf.float32,
                         bias_initializer=tf.zeros_initializer())(x2)
    x2 = tf.layers.BatchNormalization()(x2)
    x2 = tf.nn.relu(x2)
    x2 = tf.layers.Dropout(dr2, seed=seed * 2)(x2, training=training) if dr2 > 0 else x2

    x2 = tf.layers.Dense(h3, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * 4), dtype=tf.float32,
                         bias_initializer=tf.zeros_initializer())(x2)
    x2 = tf.layers.BatchNormalization()(x2)

    return x2 
Example #2
Source File: resnet_model.py    From benchmarks with The Unlicense 6 votes vote down vote up
def resnet_bottleneck(l, ch_out, stride, stride_first=False):
    shortcut = l
    norm_relu = lambda x: tf.nn.relu(Norm(x))
    l = Conv2D('conv1', l, ch_out, 1, strides=stride if stride_first else 1, activation=norm_relu)
    """
    Sec 5.1:
    We use the ResNet-50 [16] variant from [12], noting that
    the stride-2 convolutions are on 3×3 layers instead of on 1×1 layers
    """
    l = Conv2D('conv2', l, ch_out, 3, strides=1 if stride_first else stride, activation=norm_relu)
    """
    Section 5.1:
    For BN layers, the learnable scaling coefficient γ is initialized
    to be 1, except for each residual block's last BN
    where γ is initialized to be 0.
    """
    l = Conv2D('conv3', l, ch_out * 4, 1, activation=lambda x: Norm(x, gamma_initializer=tf.zeros_initializer()))
    ret = l + resnet_shortcut(shortcut, ch_out * 4, stride, activation=lambda x: Norm(x))
    return tf.nn.relu(ret, name='block_output') 
Example #3
Source File: nn_module.py    From tensorflow-XNN with MIT License 6 votes vote down vote up
def _dense_block_mode2(x, hidden_units, dropouts, densenet=False, training=False, seed=0, bn=False, name="dense_block"):
    """
    :param x:
    :param hidden_units:
    :param dropouts:
    :param densenet: enable densenet
    :return:
    Ref: https://github.com/titu1994/DenseNet
    """
    for i, (h, d) in enumerate(zip(hidden_units, dropouts)):
        if bn:
            z = batch_normalization(x, training=training, name=name + "-" + str(i))
        z = tf.nn.relu(z)
        z = tf.layers.Dropout(d, seed=seed * i)(z, training=training) if d > 0 else z
        z = tf.layers.Dense(h, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * i), dtype=tf.float32,
                            bias_initializer=tf.zeros_initializer())(z)
        if densenet:
            x = tf.concat([x, z], axis=-1)
        else:
            x = z
    return x 
Example #4
Source File: svdpp.py    From tf-recsys with MIT License 6 votes vote down vote up
def _create_user_terms(self, users, N):
        num_users = self.num_users
        num_items = self.num_items
        num_factors = self.num_factors

        p_u, b_u = super(SVDPP, self)._create_user_terms(users)

        with tf.variable_scope('user'):
            implicit_feedback_embeddings = tf.get_variable(
                name='implict_feedback_embedding',
                shape=[num_items, num_factors],
                initializer=tf.zeros_initializer(),
                regularizer=tf.contrib.layers.l2_regularizer(self.reg_y_u))

            y_u = tf.gather(
                tf.nn.embedding_lookup_sparse(
                    implicit_feedback_embeddings,
                    N,
                    sp_weights=None,
                    combiner='sqrtn'),
                users,
                name='y_u'
            )

        return p_u, b_u, y_u 
Example #5
Source File: nn_module.py    From tensorflow-XNN with MIT License 6 votes vote down vote up
def _dense_block_mode1(x, hidden_units, dropouts, densenet=False, training=False, seed=0, bn=False, name="dense_block"):
    """
    :param x:
    :param hidden_units:
    :param dropouts:
    :param densenet: enable densenet
    :return:
    Ref: https://github.com/titu1994/DenseNet
    """
    for i, (h, d) in enumerate(zip(hidden_units, dropouts)):
        z = tf.layers.Dense(h, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * i),
                            dtype=tf.float32,
                            bias_initializer=tf.zeros_initializer())(x)
        if bn:
            z = batch_normalization(z, training=training, name=name+"-"+str(i))
        z = tf.nn.relu(z)
        # z = tf.nn.selu(z)
        z = tf.layers.Dropout(d, seed=seed * i)(z, training=training) if d > 0 else z
        if densenet:
            x = tf.concat([x, z], axis=-1)
        else:
            x = z
    return x 
Example #6
Source File: lstm_ops.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def init_state(inputs,
               state_shape,
               state_initializer=tf.zeros_initializer(),
               dtype=tf.float32):
  """Helper function to create an initial state given inputs.

  Args:
    inputs: input Tensor, at least 2D, the first dimension being batch_size
    state_shape: the shape of the state.
    state_initializer: Initializer(shape, dtype) for state Tensor.
    dtype: Optional dtype, needed when inputs is None.
  Returns:
     A tensors representing the initial state.
  """
  if inputs is not None:
    # Handle both the dynamic shape as well as the inferred shape.
    inferred_batch_size = inputs.get_shape().with_rank_at_least(1)[0]
    dtype = inputs.dtype
  else:
    inferred_batch_size = 0
  initial_state = state_initializer(
      [inferred_batch_size] + state_shape, dtype=dtype)
  return initial_state 
Example #7
Source File: common_layers.py    From fine-lm with MIT License 6 votes vote down vote up
def layer_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):
  """Layer normalize the tensor x, averaging over the last dimension."""
  if filters is None:
    filters = shape_list(x)[-1]
  with tf.variable_scope(
      name, default_name="layer_norm", values=[x], reuse=reuse):
    scale = tf.get_variable(
        "layer_norm_scale", [filters], initializer=tf.ones_initializer())
    bias = tf.get_variable(
        "layer_norm_bias", [filters], initializer=tf.zeros_initializer())
    if allow_defun:
      result = layer_norm_compute(x, tf.constant(epsilon), scale, bias)
      result.set_shape(x.get_shape())
    else:
      result = layer_norm_compute_python(x, epsilon, scale, bias)
    return result 
Example #8
Source File: variables.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def global_step(device=''):
  """Returns the global step variable.

  Args:
    device: Optional device to place the variable. It can be an string or a
      function that is called to get the device for the variable.

  Returns:
    the tensor representing the global step variable.
  """
  global_step_ref = tf.get_collection(tf.GraphKeys.GLOBAL_STEP)
  if global_step_ref:
    return global_step_ref[0]
  else:
    collections = [
        VARIABLES_TO_RESTORE,
        tf.GraphKeys.GLOBAL_VARIABLES,
        tf.GraphKeys.GLOBAL_STEP,
    ]
    # Get the device for the variable.
    with tf.device(variable_device(device, 'global_step')):
      return tf.get_variable('global_step', shape=[], dtype=tf.int64,
                             initializer=tf.zeros_initializer(),
                             trainable=False, collections=collections) 
Example #9
Source File: generator.py    From UROP-Adversarial-Feature-Matching-for-Text-Generation with GNU Affero General Public License v3.0 6 votes vote down vote up
def init_param(self):
		idm = self.input_dim
		hs = self.hidden_size
		ws = len(self.window)
		nf = idm * ws
		# author's special initlaization strategy.
		self.Wemb = tf.get_variable(name=self.name + '_Wemb', shape=[self.vocab_size, idm], dtype=tf.float32, initializer=tf.random_uniform_initializer())
		self.bhid = tf.get_variable(name=self.name + '_bhid', shape=[self.vocab_size], dtype=tf.float32, initializer=tf.zeros_initializer())
		self.Vhid = tf.get_variable(name=self.name + '_Vhid', shape=[hs, idm], dtype=tf.float32, initializer=tf.random_uniform_initializer())
		self.Vhid = dot(self.Vhid, self.Wemb) # [hidden_size, vocab_size]
		self.i2h_W = tf.get_variable(name=self.name + '_i2h_W', shape=[idm, hs * 4], dtype=tf.float32, initializer=tf.random_uniform_initializer())
		self.h2h_W = tf.get_variable(name=self.name + '_h2h_W', shape=[hs, hs * 4], dtype=tf.float32, initializer=tf.orthogonal_initializer())
		self.z2h_W = tf.get_variable(name=self.name + '_z2h_W', shape=[nf, hs * 4], dtype=tf.float32, initializer=tf.random_uniform_initializer())
		b_init_1 = tf.zeros((hs,))
		b_init_2 = tf.ones((hs,)) * 3
		b_init_3 = tf.zeros((hs,))
		b_init_4 = tf.zeros((hs,))
		b_init = tf.concat([b_init_1, b_init_2, b_init_3, b_init_4], axis=0)
		# b_init = tf.constant(b_init)
		# self.b = tf.get_variable(name=self.name + '_b', shape=[hs * 4], dtype=tf.float32, initializer=b_init)
		self.b = tf.get_variable(name=self.name + '_b', dtype=tf.float32, initializer=b_init) # ValueError: If initializer is a constant, do not specify shape.
		self.C0 = tf.get_variable(name=self.name + '_C0', shape=[nf, hs], dtype=tf.float32, initializer=tf.random_uniform_initializer())
		self.b0 = tf.get_variable(name=self.name + '_b0', shape=[hs], dtype=tf.float32, initializer=tf.zeros_initializer()) 
Example #10
Source File: ops.py    From TEGAN with Apache License 2.0 6 votes vote down vote up
def conv3d(inpt, f, output_channels, s, use_bias=False, scope='conv', name=None):
    inpt_shape = inpt.get_shape().as_list()
    with tf.variable_scope(scope):
        filtr = tf.get_variable(initializer=tf.contrib.layers.xavier_initializer(),
                                shape=[f,f,f,inpt_shape[-1],output_channels],name='filtr')
        
    strides = [1,s,s,s,1]
    output = conv3d_withPeriodicPadding(inpt,filtr,strides,name)
    
    if use_bias:
        with tf.variable_scope(scope):
            bias = tf.get_variable(intializer=tf.zeros_initializer(
                [1,1,1,1,output_channels],dtype=tf.float32),name='bias')
            output = output + bias;
    
    return output 
Example #11
Source File: network_units.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def __init__(self, component, name, shape, dtype):
    """Construct variables to normalize an input of given shape.

    Arguments:
      component: ComponentBuilder handle.
      name: Human readable name to organize the variables.
      shape: Shape of the layer to be normalized.
      dtype: Type of the layer to be normalized.
    """
    self._name = name
    self._shape = shape
    self._component = component
    beta = tf.get_variable(
        'beta_%s' % name,
        shape=shape,
        dtype=dtype,
        initializer=tf.zeros_initializer())
    gamma = tf.get_variable(
        'gamma_%s' % name,
        shape=shape,
        dtype=dtype,
        initializer=tf.ones_initializer())
    self._params = [beta, gamma] 
Example #12
Source File: model.py    From DNA-GAN with MIT License 5 votes vote down vote up
def make_fc_bn(self, name, X, out_dim):
        in_dim = X.get_shape().as_list()[-1]
        with tf.variable_scope(name) as scope:
            W = tf.get_variable('W',
                                shape=[in_dim, out_dim],
                                initializer=tf.random_normal_initializer(stddev=0.02),
                                )
            b = tf.get_variable('b',
                                shape=[out_dim],
                                initializer=tf.zeros_initializer(),
                                )
            X = tf.add(tf.matmul(X, W), b)
            return tf.layers.batch_normalization(X, training=self.is_train) 
Example #13
Source File: model.py    From DNA-GAN with MIT License 5 votes vote down vote up
def make_fc(self, name, X, out_dim):
        in_dim = X.get_shape().as_list()[-1]
        with tf.variable_scope(name) as scope:
            W = tf.get_variable('W',
                                shape=[in_dim, out_dim],
                                initializer=tf.random_normal_initializer(stddev=0.02),
                                )
            b = tf.get_variable('b',
                                shape=[out_dim],
                                initializer=tf.zeros_initializer(),
                                )
            return tf.add(tf.matmul(X, W), b) 
Example #14
Source File: resnet.py    From tensorflow_multigpu_imagenet with MIT License 5 votes vote down vote up
def getModel(net, num_output, wd, is_training, num_blocks=[3, 4, 6, 3],  # defaults to 50-layer network
            bottleneck= True, transfer_mode= False):

  conv_weight_initializer = tf.truncated_normal_initializer(stddev= 0.1)

  fc_weight_initializer = tf.truncated_normal_initializer(stddev= 0.01)

  with tf.variable_scope('scale1'):
    net = spatialConvolution(net, 7, 2, 64, weight_initializer= conv_weight_initializer, wd= wd)
    net = batchNormalization(net, is_training= is_training)
    net = tf.nn.relu(net)

  with tf.variable_scope('scale2'):
    net = maxPool(net, 3, 2)
    net = resnetStack(net, num_blocks[0], 1, 64, bottleneck, wd= wd, is_training= is_training)

  with tf.variable_scope('scale3'):
    net = resnetStack(net, num_blocks[1], 2, 128, bottleneck, wd= wd, is_training= is_training)

  with tf.variable_scope('scale4'):
    net = resnetStack(net, num_blocks[2], 2, 256, bottleneck, wd= wd, is_training= is_training)

  with tf.variable_scope('scale5'):
    net = resnetStack(net, num_blocks[3], 2, 512, bottleneck, wd= wd, is_training= is_training)

  # post-net
  net = tf.reduce_mean(net, reduction_indices= [1, 2], name= "avg_pool")

  with tf.variable_scope('output'):
    net = fullyConnected(net, num_output, weight_initializer= fc_weight_initializer, bias_initializer= tf.zeros_initializer, wd= wd)

  return net 
Example #15
Source File: distributions.py    From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 5 votes vote down vote up
def pdfromlatent(self, latent_vector, init_scale=1.0, init_bias=0.0):
        mean = fc(latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias)
        logstd = tf.get_variable(name='pi/logstd', shape=[1, self.size], initializer=tf.zeros_initializer())
        pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1)
        return self.pdfromflat(pdparam), mean 
Example #16
Source File: ops.py    From TEGAN with Apache License 2.0 5 votes vote down vote up
def prelu_tf(inputs, name='Prelu'):
    with tf.variable_scope(name):
        alphas = tf.get_variable('alpha',inputs.get_shape()[-1],
                                 initializer=tf.zeros_initializer(),dtype=tf.float32)
    pos = tf.nn.relu(inputs)
    neg = alphas * (inputs - abs(inputs)) * 0.5

    return pos + neg 
Example #17
Source File: keras_words_subtoken_metrics.py    From code2vec with MIT License 5 votes vote down vote up
def __init__(self,
                 index_to_word_table: Optional[tf.lookup.StaticHashTable] = None,
                 topk_predicted_words=None,
                 predicted_words_filters: Optional[List[FilterType]] = None,
                 subtokens_delimiter: str = '|', name=None, dtype=None):
        super(WordsSubtokenMetricBase, self).__init__(name=name, dtype=dtype)
        self.tp = self.add_weight('true_positives', shape=(), initializer=tf.zeros_initializer)
        self.fp = self.add_weight('false_positives', shape=(), initializer=tf.zeros_initializer)
        self.fn = self.add_weight('false_negatives', shape=(), initializer=tf.zeros_initializer)
        self.index_to_word_table = index_to_word_table
        self.topk_predicted_words = topk_predicted_words
        self.predicted_words_filters = predicted_words_filters
        self.subtokens_delimiter = subtokens_delimiter 
Example #18
Source File: distributions.py    From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 5 votes vote down vote up
def pdfromlatent(self, latent_vector, init_scale=1.0, init_bias=0.0):
        mean = fc(latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias)
        logstd = tf.get_variable(name='pi/logstd', shape=[1, self.size], initializer=tf.zeros_initializer())
        pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1)
        return self.pdfromflat(pdparam), mean 
Example #19
Source File: svdpp.py    From tf-recsys with MIT License 5 votes vote down vote up
def _create_item_terms(self, items, H=None):
        num_users = self.num_users
        num_items = self.num_items
        num_factors = self.num_factors

        q_i, b_i = super(SVDPP, self)._create_item_terms(items)

        if H is None:
            return q_i, b_i
        else:
            with tf.variable_scope('item'):
                implicit_feedback_embeddings = tf.get_variable(
                    name='implict_feedback_embedding',
                    shape=[num_users, num_factors],
                    initializer=tf.zeros_initializer(),
                    regularizer=tf.contrib.layers.l2_regularizer(self.reg_g_i))

                g_i = tf.gather(
                    tf.nn.embedding_lookup_sparse(
                        implicit_feedback_embeddings,
                        H,
                        sp_weights=None,
                        combiner='sqrtn'),
                    items,
                    name='g_i'
                )

            return q_i, b_i, g_i 
Example #20
Source File: overfeat.py    From STORK with MIT License 5 votes vote down vote up
def overfeat_arg_scope(weight_decay=0.0005):
  with slim.arg_scope([slim.conv2d, slim.fully_connected],
                      activation_fn=tf.nn.relu,
                      weights_regularizer=slim.l2_regularizer(weight_decay),
                      biases_initializer=tf.zeros_initializer()):
    with slim.arg_scope([slim.conv2d], padding='SAME'):
      with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
        return arg_sc 
Example #21
Source File: vgg.py    From STORK with MIT License 5 votes vote down vote up
def vgg_arg_scope(weight_decay=0.0005):
  """Defines the VGG arg scope.

  Args:
    weight_decay: The l2 regularization coefficient.

  Returns:
    An arg_scope.
  """
  with slim.arg_scope([slim.conv2d, slim.fully_connected],
                      activation_fn=tf.nn.relu,
                      weights_regularizer=slim.l2_regularizer(weight_decay),
                      biases_initializer=tf.zeros_initializer()):
    with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
      return arg_sc 
Example #22
Source File: common.py    From tensorflow_multigpu_imagenet with MIT License 5 votes vote down vote up
def batchNormalization(x, is_training, decay= 0.9, epsilon= 0.001, inference_only= False):
    x_shape = x.get_shape()
    params_shape = x_shape[-1:]


    axis = list(range(len(x_shape) - 1))

    beta = _get_variable('beta',
                         params_shape,
                         initializer= tf.zeros_initializer)
    gamma = _get_variable('gamma',
                          params_shape,
                          initializer= tf.ones_initializer)

    moving_mean = _get_variable('moving_mean',
                                params_shape,
                                initializer= tf.zeros_initializer,
                                trainable= False)
    moving_variance = _get_variable('moving_variance',
                                    params_shape,
                                    initializer= tf.ones_initializer,
                                    trainable= False)

    # These ops will only be preformed when training.

    
    mean, variance = tf.nn.moments(x, axis)
    update_moving_mean = moving_averages.assign_moving_average(moving_mean,
                                                               mean, decay)
    update_moving_variance = moving_averages.assign_moving_average(
        moving_variance, variance, decay)
    tf.add_to_collection(tf.GraphKeys.UPDATE_OPS , update_moving_mean)
    tf.add_to_collection(tf.GraphKeys.UPDATE_OPS , update_moving_variance)
    return tf.cond(is_training, lambda: tf.nn.batch_normalization(x, mean, variance, beta, gamma, epsilon), lambda: tf.nn.batch_normalization(x, moving_mean, moving_variance, beta, gamma, epsilon))
    #return tf.contrib.layers.batch_norm(x, decay= decay, epsilon= epsilon, is_training= is_training)
# Flatten Layer 
Example #23
Source File: overfeat.py    From DeepLab_v3 with MIT License 5 votes vote down vote up
def overfeat_arg_scope(weight_decay=0.0005):
  with slim.arg_scope([slim.conv2d, slim.fully_connected],
                      activation_fn=tf.nn.relu,
                      weights_regularizer=slim.l2_regularizer(weight_decay),
                      biases_initializer=tf.zeros_initializer()):
    with slim.arg_scope([slim.conv2d], padding='SAME'):
      with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
        return arg_sc 
Example #24
Source File: aggregators.py    From PathCon with MIT License 5 votes vote down vote up
def __init__(self, batch_size, input_dim, output_dim, act=lambda x: x, self_included=True, name=None):
        super(CrossAggregator, self).__init__(batch_size, input_dim, output_dim, act, self_included, name)

        with tf.variable_scope(self.name):
            addition = self.input_dim if self.self_included else 0
            self.weights = tf.get_variable(shape=[self.input_dim * self.input_dim + addition, self.output_dim],
                                           initializer=tf.contrib.layers.xavier_initializer(),
                                           dtype=tf.float64,
                                           name='weights')
            self.bias = tf.get_variable(shape=[self.output_dim],
                                        initializer=tf.zeros_initializer(),
                                        dtype=tf.float64,
                                        name='bias') 
Example #25
Source File: aggregators.py    From PathCon with MIT License 5 votes vote down vote up
def __init__(self, batch_size, input_dim, output_dim, act=lambda x: x, self_included=True, name=None):
        super(ConcatAggregator, self).__init__(batch_size, input_dim, output_dim, act, self_included, name)

        with tf.variable_scope(self.name):
            multiplier = 3 if self_included else 2
            self.weights = tf.get_variable(shape=[self.input_dim * multiplier, self.output_dim],
                                           initializer=tf.contrib.layers.xavier_initializer(),
                                           dtype=tf.float64,
                                           name='weights')
            self.bias = tf.get_variable(shape=[self.output_dim],
                                        initializer=tf.zeros_initializer(),
                                        dtype=tf.float64,
                                        name='bias') 
Example #26
Source File: aggregators.py    From PathCon with MIT License 5 votes vote down vote up
def __init__(self, batch_size, input_dim, output_dim, act=lambda x: x, self_included=True, name=None):
        super(MeanAggregator, self).__init__(batch_size, input_dim, output_dim, act, self_included, name)

        with tf.variable_scope(self.name):
            self.weights = tf.get_variable(shape=[self.input_dim, self.output_dim],
                                           initializer=tf.contrib.layers.xavier_initializer(),
                                           dtype=tf.float64,
                                           name='weights')
            self.bias = tf.get_variable(shape=[self.output_dim],
                                        initializer=tf.zeros_initializer(),
                                        dtype=tf.float64,
                                        name='bias') 
Example #27
Source File: model.py    From PathCon with MIT License 5 votes vote down vote up
def _get_weight_and_bias(input_dim, output_dim):
        weight = tf.get_variable('weight', [input_dim, output_dim], tf.float64, tf.contrib.layers.xavier_initializer())
        bias = tf.get_variable('bias', [output_dim], tf.float64, tf.zeros_initializer())
        return weight, bias 
Example #28
Source File: freeze_model.py    From deep_sort with GNU General Public License v3.0 5 votes vote down vote up
def residual_block(incoming, scope, nonlinearity=tf.nn.elu,
                   weights_initializer=tf.truncated_normal_initializer(1e3),
                   bias_initializer=tf.zeros_initializer(), regularizer=None,
                   increase_dim=False, is_first=False,
                   summarize_activations=True):

    def network_builder(x, s):
        return create_inner_block(
            x, s, nonlinearity, weights_initializer, bias_initializer,
            regularizer, increase_dim, summarize_activations)

    return create_link(
        incoming, network_builder, scope, nonlinearity, weights_initializer,
        regularizer, is_first, summarize_activations) 
Example #29
Source File: freeze_model.py    From deep_sort with GNU General Public License v3.0 5 votes vote down vote up
def create_inner_block(
        incoming, scope, nonlinearity=tf.nn.elu,
        weights_initializer=tf.truncated_normal_initializer(1e-3),
        bias_initializer=tf.zeros_initializer(), regularizer=None,
        increase_dim=False, summarize_activations=True):
    n = incoming.get_shape().as_list()[-1]
    stride = 1
    if increase_dim:
        n *= 2
        stride = 2

    incoming = slim.conv2d(
        incoming, n, [3, 3], stride, activation_fn=nonlinearity, padding="SAME",
        normalizer_fn=_batch_norm_fn, weights_initializer=weights_initializer,
        biases_initializer=bias_initializer, weights_regularizer=regularizer,
        scope=scope + "/1")
    if summarize_activations:
        tf.summary.histogram(incoming.name + "/activations", incoming)

    incoming = slim.dropout(incoming, keep_prob=0.6)

    incoming = slim.conv2d(
        incoming, n, [3, 3], 1, activation_fn=None, padding="SAME",
        normalizer_fn=None, weights_initializer=weights_initializer,
        biases_initializer=bias_initializer, weights_regularizer=regularizer,
        scope=scope + "/2")
    return incoming 
Example #30
Source File: vgg.py    From DeepLab_v3 with MIT License 5 votes vote down vote up
def vgg_arg_scope(weight_decay=0.0005):
  """Defines the VGG arg scope.

  Args:
    weight_decay: The l2 regularization coefficient.

  Returns:
    An arg_scope.
  """
  with slim.arg_scope([slim.conv2d, slim.fully_connected],
                      activation_fn=tf.nn.relu,
                      weights_regularizer=slim.l2_regularizer(weight_decay),
                      biases_initializer=tf.zeros_initializer()):
    with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
      return arg_sc