Python tensorflow.truncated_normal_initializer() Examples
The following are 30
code examples of tensorflow.truncated_normal_initializer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: model.py From DOTA_models with Apache License 2.0 | 6 votes |
def _Deconv(self, net, out_filters, kernel_size, stride): shape = net.get_shape().as_list() in_filters = shape[3] kernel_shape = [kernel_size, kernel_size, out_filters, in_filters] weights = tf.get_variable( name='weights', shape=kernel_shape, dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=0.01)) out_height = shape[1] * stride out_width = shape[2] * stride batch_size = shape[0] output_shape = [batch_size, out_height, out_width, out_filters] net = tf.nn.conv2d_transpose(net, weights, output_shape, [1, stride, stride, 1], padding='SAME') slim.batch_norm(net) return net
Example #2
Source File: cifarnet.py From DeepLab_v3 with MIT License | 6 votes |
def cifarnet_arg_scope(weight_decay=0.004): """Defines the default cifarnet argument scope. Args: weight_decay: The weight decay to use for regularizing the model. Returns: An `arg_scope` to use for the inception v3 model. """ with slim.arg_scope( [slim.conv2d], weights_initializer=tf.truncated_normal_initializer(stddev=5e-2), activation_fn=tf.nn.relu): with slim.arg_scope( [slim.fully_connected], biases_initializer=tf.constant_initializer(0.1), weights_initializer=trunc_normal(0.04), weights_regularizer=slim.l2_regularizer(weight_decay), activation_fn=tf.nn.relu) as sc: return sc
Example #3
Source File: routing.py From CapsLayer with Apache License 2.0 | 6 votes |
def M_step(log_R, log_activation, vote, lambda_val=0.01): R_shape = tf.shape(log_R) log_R = log_R + log_activation R_sum_i = cl.reduce_sum(tf.exp(log_R), axis=-3, keepdims=True) log_normalized_R = log_R - tf.reduce_logsumexp(log_R, axis=-3, keepdims=True) pose = cl.reduce_sum(vote * tf.exp(log_normalized_R), axis=-3, keepdims=True) log_var = tf.reduce_logsumexp(log_normalized_R + cl.log(tf.square(vote - pose)), axis=-3, keepdims=True) beta_v = tf.get_variable('beta_v', shape=[1 for i in range(len(pose.shape) - 2)] + [pose.shape[-2], 1], initializer=tf.truncated_normal_initializer(mean=15., stddev=3.)) cost = R_sum_i * (beta_v + 0.5 * log_var) beta_a = tf.get_variable('beta_a', shape=[1 for i in range(len(pose.shape) - 2)] + [pose.shape[-2], 1], initializer=tf.truncated_normal_initializer(mean=100.0, stddev=10)) cost_sum_h = cl.reduce_sum(cost, axis=-1, keepdims=True) logit = lambda_val * (beta_a - cost_sum_h) log_activation = tf.log_sigmoid(logit) return(pose, log_var, log_activation)
Example #4
Source File: deep_cnn.py From DOTA_models with Apache License 2.0 | 6 votes |
def _variable_with_weight_decay(name, shape, stddev, wd): """Helper to create an initialized Variable with weight decay. Note that the Variable is initialized with a truncated normal distribution. A weight decay is added only if one is specified. Args: name: name of the variable shape: list of ints stddev: standard deviation of a truncated Gaussian wd: add L2Loss weight decay multiplied by this float. If None, weight decay is not added for this Variable. Returns: Variable Tensor """ var = _variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev)) if wd is not None: weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss') tf.add_to_collection('losses', weight_decay) return var
Example #5
Source File: cifarnet.py From ctw-baseline with MIT License | 6 votes |
def cifarnet_arg_scope(weight_decay=0.004): """Defines the default cifarnet argument scope. Args: weight_decay: The weight decay to use for regularizing the model. Returns: An `arg_scope` to use for the inception v3 model. """ with slim.arg_scope( [slim.conv2d], weights_initializer=tf.truncated_normal_initializer(stddev=5e-2), activation_fn=tf.nn.relu): with slim.arg_scope( [slim.fully_connected], biases_initializer=tf.constant_initializer(0.1), weights_initializer=trunc_normal(0.04), weights_regularizer=slim.l2_regularizer(weight_decay), activation_fn=tf.nn.relu) as sc: return sc
Example #6
Source File: cifarnet.py From STORK with MIT License | 6 votes |
def cifarnet_arg_scope(weight_decay=0.004): """Defines the default cifarnet argument scope. Args: weight_decay: The weight decay to use for regularizing the model. Returns: An `arg_scope` to use for the inception v3 model. """ with slim.arg_scope( [slim.conv2d], weights_initializer=tf.truncated_normal_initializer(stddev=5e-2), activation_fn=tf.nn.relu): with slim.arg_scope( [slim.fully_connected], biases_initializer=tf.constant_initializer(0.1), weights_initializer=trunc_normal(0.04), weights_regularizer=slim.l2_regularizer(weight_decay), activation_fn=tf.nn.relu) as sc: return sc
Example #7
Source File: TDCFeaturizer.py From HardRLWithYoutube with MIT License | 6 votes |
def _convolutional_layer(input, filters, strides, is_training): """Constructs a conv2d layer followed by batch normalization, and max pooling""" x = tf.layers.conv2d( input, filters=filters, kernel_size=(3, 3), strides=strides, padding='same', activation=tf.nn.relu, kernel_initializer=tf.truncated_normal_initializer(stddev=0.1), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001) ) x = tf.layers.batch_normalization(x, training=is_training) output = tf.layers.max_pooling2d(x, 2, 2) return output
Example #8
Source File: VAEFeaturizer.py From HardRLWithYoutube with MIT License | 6 votes |
def _deconvolutional_layer(input, is_training, filters): # Implements transposed convolutional layers. Returns data with double the shape of input output = tf.layers.conv2d_transpose( input, filters=filters, kernel_size=(3, 3), strides=2, padding='same', activation=tf.nn.relu, kernel_initializer=tf.truncated_normal_initializer(stddev=0.1), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001) ) #output = tf.layers.batch_normalization(output, training=is_training) output = tf.layers.conv2d_transpose( output, filters=filters, kernel_size=(3, 3), strides=2, padding='same', activation=tf.nn.relu, kernel_initializer=tf.truncated_normal_initializer(stddev=0.1), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001) ) #output = tf.layers.batch_normalization(output, training=is_training) return output
Example #9
Source File: cifar10.py From DOTA_models with Apache License 2.0 | 6 votes |
def _variable_with_weight_decay(name, shape, stddev, wd): """Helper to create an initialized Variable with weight decay. Note that the Variable is initialized with a truncated normal distribution. A weight decay is added only if one is specified. Args: name: name of the variable shape: list of ints stddev: standard deviation of a truncated Gaussian wd: add L2Loss weight decay multiplied by this float. If None, weight decay is not added for this Variable. Returns: Variable Tensor """ dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 var = _variable_on_cpu( name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype)) if wd is not None: weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss') tf.add_to_collection('losses', weight_decay) return var
Example #10
Source File: cifarnet.py From DOTA_models with Apache License 2.0 | 6 votes |
def cifarnet_arg_scope(weight_decay=0.004): """Defines the default cifarnet argument scope. Args: weight_decay: The weight decay to use for regularizing the model. Returns: An `arg_scope` to use for the inception v3 model. """ with slim.arg_scope( [slim.conv2d], weights_initializer=tf.truncated_normal_initializer(stddev=5e-2), activation_fn=tf.nn.relu): with slim.arg_scope( [slim.fully_connected], biases_initializer=tf.constant_initializer(0.1), weights_initializer=trunc_normal(0.04), weights_regularizer=slim.l2_regularizer(weight_decay), activation_fn=tf.nn.relu) as sc: return sc
Example #11
Source File: cifar10_reusable.py From blackbox-attacks with MIT License | 6 votes |
def _variable_with_weight_decay(name, shape, stddev, wd): """Helper to create an initialized Variable with weight decay. Note that the Variable is initialized with a truncated normal distribution. A weight decay is added only if one is specified. Args: name: name of the variable shape: list of ints stddev: standard deviation of a truncated Gaussian wd: add L2Loss weight decay multiplied by this float. If None, weight decay is not added for this Variable. Returns: Variable Tensor """ dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 var = _variable_on_cpu( name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype)) if wd is not None: weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss') tf.add_to_collection('losses', weight_decay) return var
Example #12
Source File: mobilenet_v1.py From ctw-baseline with MIT License | 5 votes |
def mobilenet_v1_arg_scope(is_training=True, weight_decay=0.00004, stddev=0.09, regularize_depthwise=False): """Defines the default MobilenetV1 arg scope. Args: is_training: Whether or not we're training the model. weight_decay: The weight decay to use for regularizing the model. stddev: The standard deviation of the trunctated normal weight initializer. regularize_depthwise: Whether or not apply regularization on depthwise. Returns: An `arg_scope` to use for the mobilenet v1 model. """ batch_norm_params = { 'is_training': is_training, 'center': True, 'scale': True, 'decay': 0.9997, 'epsilon': 0.001, } # Set weight_decay for weights in Conv and DepthSepConv layers. weights_init = tf.truncated_normal_initializer(stddev=stddev) regularizer = tf.contrib.layers.l2_regularizer(weight_decay) if regularize_depthwise: depthwise_regularizer = regularizer else: depthwise_regularizer = None with slim.arg_scope([slim.conv2d, slim.separable_conv2d], weights_initializer=weights_init, activation_fn=tf.nn.relu6, normalizer_fn=slim.batch_norm): with slim.arg_scope([slim.batch_norm], **batch_norm_params): with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer): with slim.arg_scope([slim.separable_conv2d], weights_regularizer=depthwise_regularizer) as sc: return sc
Example #13
Source File: ops.py From adagan with BSD 3-Clause "New" or "Revised" License | 5 votes |
def conv2d(opts, input_, output_dim, d_h=2, d_w=2, scope=None, conv_filters_dim=None, padding='SAME', l2_norm=False): """Convolutional layer. Args: input_: should be a 4d tensor with [num_points, dim1, dim2, dim3]. """ stddev = opts['init_std'] bias_start = opts['init_bias'] shape = input_.get_shape().as_list() if conv_filters_dim is None: conv_filters_dim = opts['conv_filters_dim'] k_h = conv_filters_dim k_w = k_h assert len(shape) == 4, 'Conv2d works only with 4d tensors.' with tf.variable_scope(scope or 'conv2d'): w = tf.get_variable( 'filter', [k_h, k_w, shape[-1], output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) if l2_norm: w = tf.nn.l2_normalize(w, 2) conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding=padding) biases = tf.get_variable( 'b', [output_dim], initializer=tf.constant_initializer(bias_start)) conv = tf.nn.bias_add(conv, biases) return conv
Example #14
Source File: custom_init.py From GroundeR with MIT License | 5 votes |
def msr_init(shape): n = 1.0 for dim in shape: n *= float(dim) std = np.sqrt(2.0/n) init = tf.truncated_normal_initializer(mean=0.0, stddev=std) return init
Example #15
Source File: alexnet.py From tf-lcnn with GNU General Public License v3.0 | 5 votes |
def alex_conv_pool_layer(tensor_in, n_filters, kernel_size, stride, pool_size, pool_stride, param_lambda, bias_initializer=tf.zeros_initializer(), activation_fn=tf.nn.relu, padding='SAME', convtype='conv', dict_size=None, init_sparsity=None): if convtype == 'lcnn': conv = lookup_conv2d(tensor_in, dict_size=dict_size, initial_sparsity=init_sparsity, param_lambda=param_lambda, stride=stride, num_outputs=n_filters, kernel_size=kernel_size, activation_fn=activation_fn, biases_initializer=bias_initializer, padding=2) else: conv = tf.contrib.layers.convolution2d(tensor_in, num_outputs=n_filters, kernel_size=kernel_size, stride=stride, activation_fn=activation_fn, weights_initializer=tf.truncated_normal_initializer(0.0, 0.01), biases_initializer=bias_initializer, padding=padding) conv = tf.nn.lrn(conv, bias=1.0, depth_radius=5, alpha=0.0001, beta=0.75) pool = tf.nn.max_pool(conv, ksize=pool_size, strides=pool_stride, padding=padding) return pool
Example #16
Source File: ops.py From opt-mmd with BSD 3-Clause "New" or "Revised" License | 5 votes |
def conv2d(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name="conv2d"): with tf.variable_scope(name): w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME') biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape()) return conv
Example #17
Source File: lenet.py From ctw-baseline with MIT License | 5 votes |
def lenet_arg_scope(weight_decay=0.0): """Defines the default lenet argument scope. Args: weight_decay: The weight decay to use for regularizing the model. Returns: An `arg_scope` to use for the inception v3 model. """ with slim.arg_scope( [slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=tf.truncated_normal_initializer(stddev=0.1), activation_fn=tf.nn.relu) as sc: return sc
Example #18
Source File: utils.py From tf-lcnn with GNU General Public License v3.0 | 5 votes |
def dense_layer(tensor_in, layers, activation_fn=tf.nn.tanh, keep_prob=None): tensor_out = tensor_in for idx, layer in enumerate(layers): tensor_out = tf.contrib.layers.fully_connected(tensor_out, layer, activation_fn=activation_fn, weights_initializer=tf.truncated_normal_initializer(0.0, 0.005), biases_initializer=tf.constant_initializer(0.001)) tensor_out = tf.contrib.layers.dropout(tensor_out, keep_prob=keep_prob) return tensor_out
Example #19
Source File: resnet.py From KittiSeg with MIT License | 5 votes |
def _conv(x, filters_out, ksize=3, stride=1): filters_in = x.get_shape()[-1] shape = [ksize, ksize, filters_in, filters_out] initializer = tf.truncated_normal_initializer(stddev=CONV_WEIGHT_STDDEV) weights = _get_variable('weights', shape=shape, dtype='float', initializer=initializer, weight_decay=CONV_WEIGHT_DECAY) return tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding='SAME')
Example #20
Source File: run_squad.py From Extending-Google-BERT-as-Question-and-Answering-model-and-Chatbot with Apache License 2.0 | 5 votes |
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, use_one_hot_embeddings): """Creates a classification model.""" model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) final_hidden = model.get_sequence_output() final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3) batch_size = final_hidden_shape[0] seq_length = final_hidden_shape[1] hidden_size = final_hidden_shape[2] output_weights = tf.get_variable( "cls/squad/output_weights", [2, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "cls/squad/output_bias", [2], initializer=tf.zeros_initializer()) final_hidden_matrix = tf.reshape(final_hidden, [batch_size * seq_length, hidden_size]) logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) logits = tf.reshape(logits, [batch_size, seq_length, 2]) logits = tf.transpose(logits, [2, 0, 1]) unstacked_logits = tf.unstack(logits, axis=0) (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1]) return (start_logits, end_logits)
Example #21
Source File: freeze_model.py From deep_sort with GNU General Public License v3.0 | 5 votes |
def create_link( incoming, network_builder, scope, nonlinearity=tf.nn.elu, weights_initializer=tf.truncated_normal_initializer(stddev=1e-3), regularizer=None, is_first=False, summarize_activations=True): if is_first: network = incoming else: network = _batch_norm_fn(incoming, scope=scope + "/bn") network = nonlinearity(network) if summarize_activations: tf.summary.histogram(scope+"/activations", network) pre_block_network = network post_block_network = network_builder(pre_block_network, scope) incoming_dim = pre_block_network.get_shape().as_list()[-1] outgoing_dim = post_block_network.get_shape().as_list()[-1] if incoming_dim != outgoing_dim: assert outgoing_dim == 2 * incoming_dim, \ "%d != %d" % (outgoing_dim, 2 * incoming) projection = slim.conv2d( incoming, outgoing_dim, 1, 2, padding="SAME", activation_fn=None, scope=scope+"/projection", weights_initializer=weights_initializer, biases_initializer=None, weights_regularizer=regularizer) network = projection + post_block_network else: network = incoming + post_block_network return network
Example #22
Source File: audio_model.py From Tensorflow-Audio-Classification with Apache License 2.0 | 5 votes |
def define_audio_slim(training=False): """Defines the audio TensorFlow model. All ops are created in the current default graph, under the scope 'audio/'. The input is a placeholder named 'audio/vggish_input' of type float32 and shape [batch_size, feature_size] where batch_size is variable and feature_size is constant, and feature_size represents a VGGish output feature. The output is an op named 'audio/prediction' which produces the activations of a NUM_CLASSES layer. Args: training: If true, all parameters are marked trainable. Returns: The op 'audio/logits'. """ with slim.arg_scope([slim.fully_connected], weights_initializer=tf.truncated_normal_initializer( stddev=params.INIT_STDDEV), biases_initializer=tf.zeros_initializer(), trainable=training),\ tf.variable_scope('audio'): vggish_input = tf.placeholder(tf.float32, shape=[None, params.NUM_FEATURES], name='vggish_input') # Add a fully connected layer with NUM_UNITS units fc = slim.fully_connected(vggish_input, params.NUM_UNITS) logits = slim.fully_connected(fc, params.NUM_CLASSES, activation_fn=None, scope='logits') tf.nn.softmax(logits, name='prediction') return logits
Example #23
Source File: lenet.py From STORK with MIT License | 5 votes |
def lenet_arg_scope(weight_decay=0.0): """Defines the default lenet argument scope. Args: weight_decay: The weight decay to use for regularizing the model. Returns: An `arg_scope` to use for the inception v3 model. """ with slim.arg_scope( [slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=tf.truncated_normal_initializer(stddev=0.1), activation_fn=tf.nn.relu) as sc: return sc
Example #24
Source File: mobilenet_v1.py From STORK with MIT License | 5 votes |
def mobilenet_v1_arg_scope(is_training=True, weight_decay=0.00004, stddev=0.09, regularize_depthwise=False): """Defines the default MobilenetV1 arg scope. Args: is_training: Whether or not we're training the model. weight_decay: The weight decay to use for regularizing the model. stddev: The standard deviation of the trunctated normal weight initializer. regularize_depthwise: Whether or not apply regularization on depthwise. Returns: An `arg_scope` to use for the mobilenet v1 model. """ batch_norm_params = { 'is_training': is_training, 'center': True, 'scale': True, 'decay': 0.9997, 'epsilon': 0.001, } # Set weight_decay for weights in Conv and DepthSepConv layers. weights_init = tf.truncated_normal_initializer(stddev=stddev) regularizer = tf.contrib.layers.l2_regularizer(weight_decay) if regularize_depthwise: depthwise_regularizer = regularizer else: depthwise_regularizer = None with slim.arg_scope([slim.conv2d, slim.separable_conv2d], weights_initializer=weights_init, activation_fn=tf.nn.relu6, normalizer_fn=slim.batch_norm): with slim.arg_scope([slim.batch_norm], **batch_norm_params): with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer): with slim.arg_scope([slim.separable_conv2d], weights_regularizer=depthwise_regularizer) as sc: return sc
Example #25
Source File: resnet.py From tensorflow_multigpu_imagenet with MIT License | 5 votes |
def getModel(net, num_output, wd, is_training, num_blocks=[3, 4, 6, 3], # defaults to 50-layer network bottleneck= True, transfer_mode= False): conv_weight_initializer = tf.truncated_normal_initializer(stddev= 0.1) fc_weight_initializer = tf.truncated_normal_initializer(stddev= 0.01) with tf.variable_scope('scale1'): net = spatialConvolution(net, 7, 2, 64, weight_initializer= conv_weight_initializer, wd= wd) net = batchNormalization(net, is_training= is_training) net = tf.nn.relu(net) with tf.variable_scope('scale2'): net = maxPool(net, 3, 2) net = resnetStack(net, num_blocks[0], 1, 64, bottleneck, wd= wd, is_training= is_training) with tf.variable_scope('scale3'): net = resnetStack(net, num_blocks[1], 2, 128, bottleneck, wd= wd, is_training= is_training) with tf.variable_scope('scale4'): net = resnetStack(net, num_blocks[2], 2, 256, bottleneck, wd= wd, is_training= is_training) with tf.variable_scope('scale5'): net = resnetStack(net, num_blocks[3], 2, 512, bottleneck, wd= wd, is_training= is_training) # post-net net = tf.reduce_mean(net, reduction_indices= [1, 2], name= "avg_pool") with tf.variable_scope('output'): net = fullyConnected(net, num_output, weight_initializer= fc_weight_initializer, bias_initializer= tf.zeros_initializer, wd= wd) return net
Example #26
Source File: ops.py From CartoonGAN-Tensorflow with MIT License | 5 votes |
def spectral_norm(w, iteration=1): w_shape = w.shape.as_list() w = tf.reshape(w, [-1, w_shape[-1]]) u = tf.get_variable("u", [1, w_shape[-1]], initializer=tf.truncated_normal_initializer(), trainable=False) u_hat = u v_hat = None for i in range(iteration): """ power iteration Usually iteration = 1 will be enough """ v_ = tf.matmul(u_hat, tf.transpose(w)) v_hat = l2_norm(v_) u_ = tf.matmul(v_hat, w) u_hat = l2_norm(u_) sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat)) w_norm = w / sigma with tf.control_dependencies([u.assign(u_hat)]): w_norm = tf.reshape(w_norm, w_shape) return w_norm
Example #27
Source File: ops.py From Chinese-Character-and-Calligraphic-Image-Processing with MIT License | 5 votes |
def conv(name, inputs, k_size, nums_in, nums_out, strides): pad_size = k_size // 2 inputs = tf.pad(inputs, [[0, 0], [pad_size, pad_size], [pad_size, pad_size], [0, 0]], mode="REFLECT") kernel = tf.get_variable(name+"W", [k_size, k_size, nums_in, nums_out], initializer=tf.truncated_normal_initializer(stddev=0.01)) bias = tf.get_variable(name+"B", [nums_out], initializer=tf.constant_initializer(0.)) return tf.nn.conv2d(inputs, kernel, [1, strides, strides, 1], "VALID") + bias
Example #28
Source File: modeling.py From tudouNLP with MIT License | 5 votes |
def create_initializer(initializer_range=0.02): """Creates a `truncated_normal_initializer` with the given range. 对参数初始化的封装 """ return tf.truncated_normal_initializer(stddev=initializer_range)
Example #29
Source File: freeze_model.py From deep_sort with GNU General Public License v3.0 | 5 votes |
def create_inner_block( incoming, scope, nonlinearity=tf.nn.elu, weights_initializer=tf.truncated_normal_initializer(1e-3), bias_initializer=tf.zeros_initializer(), regularizer=None, increase_dim=False, summarize_activations=True): n = incoming.get_shape().as_list()[-1] stride = 1 if increase_dim: n *= 2 stride = 2 incoming = slim.conv2d( incoming, n, [3, 3], stride, activation_fn=nonlinearity, padding="SAME", normalizer_fn=_batch_norm_fn, weights_initializer=weights_initializer, biases_initializer=bias_initializer, weights_regularizer=regularizer, scope=scope + "/1") if summarize_activations: tf.summary.histogram(incoming.name + "/activations", incoming) incoming = slim.dropout(incoming, keep_prob=0.6) incoming = slim.conv2d( incoming, n, [3, 3], 1, activation_fn=None, padding="SAME", normalizer_fn=None, weights_initializer=weights_initializer, biases_initializer=bias_initializer, weights_regularizer=regularizer, scope=scope + "/2") return incoming
Example #30
Source File: freeze_model.py From deep_sort with GNU General Public License v3.0 | 5 votes |
def residual_block(incoming, scope, nonlinearity=tf.nn.elu, weights_initializer=tf.truncated_normal_initializer(1e3), bias_initializer=tf.zeros_initializer(), regularizer=None, increase_dim=False, is_first=False, summarize_activations=True): def network_builder(x, s): return create_inner_block( x, s, nonlinearity, weights_initializer, bias_initializer, regularizer, increase_dim, summarize_activations) return create_link( incoming, network_builder, scope, nonlinearity, weights_initializer, regularizer, is_first, summarize_activations)