Python tensorflow.contrib.layers.xavier_initializer_conv2d() Examples
The following are 12
code examples of tensorflow.contrib.layers.xavier_initializer_conv2d().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.contrib.layers
, or try the search function
.
Example #1
Source File: utilities.py From versa with MIT License | 6 votes |
def conv2d_transpose_layer(inputs, filters, activation, name): """ A simple de-convolution layer. :param inputs: batch of inputs. :param filters: number of output filters. :param activation: activation function to use. :param name: name used to scope this operation. :return: batch of outputs. """ return tf.layers.conv2d_transpose( inputs=inputs, filters=filters, kernel_size=(4, 4), strides=(2, 2), padding='same', activation=activation, data_format='channels_last', use_bias=False, kernel_initializer=xavier_initializer_conv2d(uniform=False), name=name, reuse=tf.AUTO_REUSE)
Example #2
Source File: mru.py From SketchySceneColorization with MIT License | 6 votes |
def upsample_conv(inputs, num_outputs, kernel_size, sn, activation_fn=None, normalizer_fn=None, normalizer_params=None, weights_regularizer=None, weights_initializer=ly.xavier_initializer_conv2d(), biases_initializer=tf.zeros_initializer(), data_format='NCHW'): output = inputs output = tf.concat([output, output, output, output], axis=1 if data_format == 'NCHW' else 3) if data_format == 'NCHW': output = tf.transpose(output, [0, 2, 3, 1]) output = tf.depth_to_space(output, 2) if data_format == 'NCHW': output = tf.transpose(output, [0, 3, 1, 2]) output = conv2d(output, num_outputs, kernel_size, sn=sn, activation_fn=activation_fn, normalizer_fn=normalizer_fn, normalizer_params=normalizer_params, weights_regularizer=weights_regularizer, weights_initializer=weights_initializer, biases_initializer=biases_initializer, data_format=data_format) return output
Example #3
Source File: VGGNet.py From demo-Network with GNU General Public License v3.0 | 5 votes |
def conv_op(input_op, filter_size, channel_out, step, name): channel_in = input_op.get_shape()[-1].value with tf.name_scope(name) as scope: weights = tf.get_variable(shape=[filter_size, filter_size, channel_in, channel_out], dtype=tf.float32, initializer=xavier_initializer_conv2d(), name=scope + 'weights') biases = tf.Variable(tf.constant(value=0.0, shape=[channel_out], dtype=tf.float32), trainable=True, name='biases') conv = tf.nn.conv2d(input_op, weights, strides=[1, step, step, 1], padding='SAME') + biases conv = tf.nn.relu(conv, name=scope) return conv
Example #4
Source File: VGGNet.py From demo-Network with GNU General Public License v3.0 | 5 votes |
def full_connection(input_op, channel_out, name): channel_in = input_op.get_shape()[-1].value with tf.name_scope(name) as scope: weight = tf.get_variable(shape=[channel_in, channel_out], dtype=tf.float32, initializer=xavier_initializer_conv2d(), name=scope + 'weight') bias = tf.Variable(tf.constant(value=0.0, shape=[channel_out], dtype=tf.float32), name='bias') fc = tf.nn.relu_layer(input_op, weight, bias, name=scope) return fc
Example #5
Source File: utilities.py From versa with MIT License | 5 votes |
def conv2d_pool_block(inputs, use_batch_norm, dropout_keep_prob, pool_padding, name): """ A macro function that implements the following in sequence: - conv2d - batch_norm - relu activation - dropout - max_pool :param inputs: batch of feature maps. :param use_batch_norm: whether to use batch normalization or not. :param dropout_keep_prob: keep probability parameter for dropout. :param pool_padding: type of padding to use on the pooling operation. :param name: first part of the name used to scope this sequence of operations. :return: the processed batch of feature maps. """ h = tf.layers.conv2d( inputs=inputs, strides=(1, 1), filters=64, kernel_size=[3, 3], padding="same", kernel_initializer=xavier_initializer_conv2d(uniform=False), use_bias=False, name=(name + '_conv2d'), reuse=tf.AUTO_REUSE) if use_batch_norm: h = tf.contrib.layers.batch_norm( inputs=h, epsilon=1e-5, scope=(name + '_batch_norm'), reuse=tf.AUTO_REUSE) h = tf.nn.relu(features=h, name=(name + '_batch_relu')) h = tf.nn.dropout(x=h, keep_prob=dropout_keep_prob, name=(name + '_dropout')) h = tf.layers.max_pooling2d(inputs=h, pool_size=[2, 2], strides=2, padding=pool_padding, name=(name + '_pool')) return h
Example #6
Source File: batch.py From batchflow with Apache License 2.0 | 5 votes |
def conv_block(input_tensor, kernel, filters, name, strides=(2, 2)): """ Function to create block of ResNet network which include three convolution layers and one skip-connection layer. Args: input_tensor: input tensorflow layer kernel: tuple of kernel size in convolution layer filters: list of nums filters in convolution layers name: name of block strides: typle of strides in convolution layer Output: x: Block output layer """ filters1, filters2, filters3 = filters x = tf.layers.conv2d(input_tensor, filters1, (1, 1), strides, name='convfir' + name, activation=tf.nn.relu,\ kernel_initializer=xavier()) x = tf.layers.conv2d(x, filters2, kernel, name='convsec' + name, activation=tf.nn.relu, padding='SAME',\ kernel_initializer=xavier()) x = tf.layers.conv2d(x, filters3, (1, 1), name='convthr' + name,\ kernel_initializer=xavier()) shortcut = tf.layers.conv2d(input_tensor, filters3, (1, 1), strides, name='short' + name, \ kernel_initializer=xavier()) x = tf.concat([x, shortcut], axis=1) x = tf.nn.relu(x) return x
Example #7
Source File: batch.py From batchflow with Apache License 2.0 | 5 votes |
def identity_block(input_tensor, kernel, filters, name): """ Function to create block of ResNet network which include three convolution layers. Args: input_tensor: input tensorflow layer. kernel: tuple of kernel size in convolution layer. filters: list of nums filters in convolution layers. name: name of block. Output: x: Block output layer """ filters1, filters2, filters3 = filters x = tf.layers.conv2d(input_tensor, filters1, (1, 1), name='convfir' + name, activation=tf.nn.relu,\ kernel_initializer=xavier()) x = tf.layers.conv2d(x, filters2, kernel, name='convsec' + name, activation=tf.nn.relu, padding='SAME',\ kernel_initializer=xavier()) x = tf.layers.conv2d(x, filters3, (1, 1), name='convthr' + name,\ kernel_initializer=xavier()) x = tf.concat([x, input_tensor], axis=1) x = tf.nn.relu(x) return x
Example #8
Source File: mru.py From SketchySceneColorization with MIT License | 5 votes |
def conv_mean_pool(inputs, num_outputs, kernel_size, sn, rate=1, activation_fn=None, normalizer_fn=None, normalizer_params=None, weights_regularizer=None, weights_initializer=ly.xavier_initializer_conv2d(), biases_initializer=tf.zeros_initializer(), data_format='NCHW'): output = conv2d(inputs, num_outputs, kernel_size, sn=sn, rate=rate, activation_fn=activation_fn, normalizer_fn=normalizer_fn, normalizer_params=normalizer_params, weights_regularizer=weights_regularizer, weights_initializer=weights_initializer, biases_initializer=biases_initializer, data_format=data_format) output = tf.add_n( [output[:, :, ::2, ::2], output[:, :, 1::2, ::2], output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4. return output
Example #9
Source File: mru.py From SketchySceneColorization with MIT License | 5 votes |
def mean_pool_conv(inputs, num_outputs, kernel_size, sn, rate=1, activation_fn=None, normalizer_fn=None, normalizer_params=None, weights_regularizer=None, weights_initializer=ly.xavier_initializer_conv2d(), data_format='NCHW'): output = inputs output = tf.add_n( [output[:, :, ::2, ::2], output[:, :, 1::2, ::2], output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4. output = conv2d(output, num_outputs, kernel_size, sn=sn, rate=rate, activation_fn=activation_fn, normalizer_fn=normalizer_fn, normalizer_params=normalizer_params, weights_regularizer=weights_regularizer, weights_initializer=weights_initializer, data_format=data_format) return output
Example #10
Source File: mru.py From SketchySceneColorization with MIT License | 5 votes |
def upsample_conv_bilinear(inputs, num_outputs, kernel_size, sn, activation_fn=None, normalizer_fn=None, normalizer_params=None, weights_regularizer=None, weights_initializer=ly.xavier_initializer_conv2d(), data_format='NCHW'): output = inputs if data_format == 'NCHW': output = tf.transpose(output, [0, 2, 3, 1]) batch_size, height, width, channel = [int(i) for i in output.get_shape()] # output = tf.Print(output, [tf.reduce_min(output), tf.reduce_max(output)], message='before') output = tf.image.resize_bilinear(output, [height * 2, width * 2]) # output = tf.Print(output, [tf.reduce_min(output), tf.reduce_max(output)], message='after') slice0 = output[:, :, :, 0::4] slice1 = output[:, :, :, 1::4] slice2 = output[:, :, :, 2::4] slice3 = output[:, :, :, 3::4] output = slice0 + slice1 + slice2 + slice3 if data_format == 'NCHW': output = tf.transpose(output, [0, 3, 1, 2]) output = conv2d(output, num_outputs, kernel_size, sn=sn, activation_fn=activation_fn, normalizer_fn=normalizer_fn, normalizer_params=normalizer_params, weights_regularizer=weights_regularizer, weights_initializer=weights_initializer, data_format=data_format) return output # Sigmoid Gates
Example #11
Source File: batch.py From batchflow with Apache License 2.0 | 4 votes |
def resnet(self): """ Simple implementation of Resnet. Args: self Outputs: Method return list with len = 2 and some params: [0][0]: indices - Placeholder which takes batch indices. [0][1]: all_data - Placeholder which takes all images. [0][2]; all_lables - Placeholder for lables. [0][3]: loss - Value of loss function. [0][4]: train - List of train optimizers. [0][5]: prob - softmax output, need to prediction. [1][0]: accuracy - Current accuracy [1][1]: session - tf session """ with tf.Graph().as_default(): indices = tf.placeholder(tf.int32, shape=[None, 1]) all_data = tf.placeholder(tf.float32, shape=[50000, 28, 28]) input_batch = tf.gather_nd(all_data, indices) x1_to_tens = tf.reshape(input_batch, shape=[-1, 28, 28, 1]) net1 = tf.layers.conv2d(x1_to_tens, 32, (7, 7), strides=(2, 2), padding='SAME', activation=tf.nn.relu, \ kernel_initializer=xavier(), name='11') net1 = tf.layers.max_pooling2d(net1, (2, 2), (2, 2)) net1 = conv_block(net1, 3, [32, 32, 128], name='22', strides=(1, 1)) net1 = identity_block(net1, 3, [32, 32, 128], name='33') net1 = conv_block(net1, 3, [64, 64, 256], name='53', strides=(1, 1)) net1 = identity_block(net1, 3, [64, 64, 256], name='63') net1 = tf.layers.average_pooling2d(net1, (7, 7), strides=(1, 1)) net1 = tf.contrib.layers.flatten(net1) with tf.variable_scope('dense3'): net1 = tf.layers.dense(net1, 10, kernel_initializer=tf.contrib.layers.xavier_initializer()) prob1 = tf.nn.softmax(net1) all_lables = tf.placeholder(tf.float32, [None, 10]) y = tf.gather_nd(all_lables, indices) loss1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=net1, labels=y), name='loss3') train1 = tf.train.MomentumOptimizer(0.03, 0.8, use_nesterov=True).minimize(loss1) lables_hat1 = tf.cast(tf.argmax(net1, axis=1), tf.float32, name='lables_3at') lables1 = tf.cast(tf.argmax(y, axis=1), tf.float32, name='labl3es') accuracy1 = tf.reduce_mean(tf.cast(tf.equal(lables_hat1, lables1), tf.float32, name='a3ccuracy')) session = tf.Session() session.run(tf.global_variables_initializer()) return [[indices, all_data, all_lables, loss1, train1, prob1], [accuracy1, session]]
Example #12
Source File: mru.py From SketchySceneColorization with MIT License | 4 votes |
def mru_deconv(x, ht, filter_depth, sn, stride=2, num_blocks=2, last_unit=False, activation_fn=tf.nn.relu, normalizer_fn=None, normalizer_params=None, weights_initializer=ly.xavier_initializer_conv2d(), weight_decay_rate=1e-5, unit_num=0, data_format='NCHW'): assert len(ht) == num_blocks def norm_activ(tensor_in): if normalizer_fn is not None: _normalizer_params = normalizer_params or {} tensor_normed = normalizer_fn(tensor_in, **_normalizer_params) else: tensor_normed = tf.identity(tensor_in) if activation_fn is not None: tensor_normed = activation_fn(tensor_normed) return tensor_normed # cell_block = mru_deconv_block cell_block = mru_deconv_block_v2 hts_new = [] inp = x with tf.variable_scope('mru_deconv_unit_t_%d_layer_0' % unit_num): ht_new = cell_block(inp, ht[0], filter_depth, sn=sn, stride=stride, activation_fn=activation_fn, normalizer_fn=normalizer_fn, normalizer_params=normalizer_params, weights_initializer=weights_initializer, data_format=data_format, weight_decay_rate=weight_decay_rate) hts_new.append(ht_new) inp = ht_new for i in range(1, num_blocks): if stride == 2: ht[i] = upsample(ht[i], data_format=data_format) with tf.variable_scope('mru_deconv_unit_t_%d_layer_%d' % (unit_num, i)): ht_new = cell_block(inp, ht[i], filter_depth, sn=sn, stride=1, activation_fn=activation_fn, normalizer_fn=normalizer_fn, normalizer_params=normalizer_params, weights_initializer=weights_initializer, data_format=data_format, weight_decay_rate=weight_decay_rate) hts_new.append(ht_new) inp = ht_new # if last_unit: # with tf.variable_scope('mru_deconv_unit_last_norm'): # hts_new[-1] = norm_activ(hts_new[-1]) return hts_new