Python tensorflow.contrib.slim.layer_norm() Examples
The following are 18
code examples of tensorflow.contrib.slim.layer_norm().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.contrib.slim
, or try the search function
.
Example #1
Source File: network.py From Neural-EM with MIT License | 6 votes |
def __call__(self, inputs, state, scope=None): if self._apply_to == 'input': with tf.variable_scope(scope or self._name): inputs = slim.layer_norm(inputs) return self._cell(inputs, state) elif self._apply_to == 'output': output, res_state = self._cell(inputs, state) with tf.variable_scope(scope or self._name): output = slim.layer_norm(output) return output, res_state elif self._apply_to == 'state': output, res_state = self._cell(inputs, state) with tf.variable_scope(scope or self._name): res_state = slim.layer_norm(res_state) return output, res_state else: raise ValueError('Unknown apply_to: "{}"'.format(self._apply_to))
Example #2
Source File: network_definition.py From cosine_metric_learning with GNU General Public License v3.0 | 6 votes |
def create_network_factory(is_training, num_classes, add_logits, weight_decay=1e-8, reuse=None): def factory_fn(image): with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training): with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.batch_norm, slim.layer_norm], reuse=reuse): features, logits = create_network( image, num_classes=num_classes, add_logits=add_logits, reuse=reuse, create_summaries=is_training, weight_decay=weight_decay) return features, logits return factory_fn
Example #3
Source File: network.py From Relational-NEM with MIT License | 6 votes |
def _build_layer(inputs, layer): # apply transformation if layer['name'] == 'fc': out = slim.fully_connected(inputs, layer['size'], activation_fn=None) else: raise KeyError('Unknown layer "{}"'.format(layer['name'])) # apply layer normalisation if layer.get('ln', False): out = slim.layer_norm(out) # apply activation function if layer.get('act', False): out = ACTIVATION_FUNCTIONS[layer['act']](out) return out # NETWORK BUILDER
Example #4
Source File: network.py From Relational-NEM with MIT License | 6 votes |
def __call__(self, inputs, state, scope=None): if self._apply_to == 'input': with tf.variable_scope(scope or self._name): inputs = slim.layer_norm(inputs) return self._cell(inputs, state) elif self._apply_to == 'output': output, res_state = self._cell(inputs, state) with tf.variable_scope(scope or self._name): output = slim.layer_norm(output) return output, res_state elif self._apply_to == 'state': output, res_state = self._cell(inputs, state) with tf.variable_scope(scope or self._name): res_state = slim.layer_norm(res_state) return output, res_state else: raise ValueError('Unknown apply_to: "{}"'.format(self._apply_to)) # R-NEM CELL
Example #5
Source File: tf_modules.py From tensor2robot with Apache License 2.0 | 6 votes |
def argscope(is_training=None, normalizer_fn=slim.layer_norm): """Default TF argscope used for convnet-based grasping models. Args: is_training: Whether this argscope is for training or inference. normalizer_fn: Which conv/fc normalizer to use. Returns: Dictionary of argument overrides. """ with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training): with slim.arg_scope( [slim.conv2d, slim.fully_connected], weights_initializer=tf.truncated_normal_initializer(stddev=0.01), activation_fn=tf.nn.relu, normalizer_fn=normalizer_fn): with slim.arg_scope( [slim.conv2d, slim.max_pool2d], stride=2, padding='VALID') as scope: return scope
Example #6
Source File: nem.py From auto_yolo with MIT License | 5 votes |
def _call(self, inputs, output_size, is_training): inputs = self._subcall(inputs, output_size, is_training) if self._spec.get('ln', False): inputs = slim.layer_norm(inputs) act = self._spec.get('act', False) if act: activation = ACTIVATION_FUNCTIONS[act] return activation(inputs) return inputs
Example #7
Source File: resnet_v2_layernorm.py From TwinGAN with Apache License 2.0 | 5 votes |
def resnet_arg_scope(weight_decay=0.0001, activation_fn=tf.nn.relu, use_layer_norm=True): """Defines the default ResNet arg scope. TODO(gpapan): The batch-normalization related default values above are appropriate for use in conjunction with the reference ResNet models released at https://github.com/KaimingHe/deep-residual-networks. When training ResNets from scratch, they might need to be tuned. Args: weight_decay: The weight decay to use for regularizing the model. activation_fn: The activation function which is used in ResNet. use_layer_norm: Whether or not to use layer normalization. Returns: An `arg_scope` to use for the resnet models. """ with slim.arg_scope( [slim.conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=slim.variance_scaling_initializer(), activation_fn=activation_fn, normalizer_fn=slim.layer_norm if use_layer_norm else None, normalizer_params=None): # The following implies padding='SAME' for pool1, which makes feature # alignment easier for dense prediction tasks. This is also used in # https://github.com/facebook/fb.resnet.torch. However the accompanying # code of 'Deep Residual Learning for Image Recognition' uses # padding='VALID' for pool1. You can switch to that choice by setting # slim.arg_scope([slim.max_pool2d], padding='VALID'). with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc: return arg_sc
Example #8
Source File: freeze_model.py From deep_sort with GNU General Public License v3.0 | 5 votes |
def _network_factory(weight_decay=1e-8): def factory_fn(image, reuse): with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=False): with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.batch_norm, slim.layer_norm], reuse=reuse): features, logits = _create_network( image, reuse=reuse, weight_decay=weight_decay) return features, logits return factory_fn
Example #9
Source File: freeze_model.py From Vehicle-Detection-and-Tracking-Usig-YOLO-and-Deep-Sort-with-Keras-and-Tensorflow with MIT License | 5 votes |
def _network_factory(weight_decay=1e-8): def factory_fn(image, reuse): with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=False): with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.batch_norm, slim.layer_norm], reuse=reuse): features, logits = _create_network( image, reuse=reuse, weight_decay=weight_decay) return features, logits return factory_fn
Example #10
Source File: freeze_model.py From deep_sort_yolov3 with GNU General Public License v3.0 | 5 votes |
def _network_factory(weight_decay=1e-8): def factory_fn(image, reuse): with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=False): with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.batch_norm, slim.layer_norm], reuse=reuse): features, logits = _create_network( image, reuse=reuse, weight_decay=weight_decay) return features, logits return factory_fn
Example #11
Source File: freeze_model.py From deep_sort_yolov3 with MIT License | 5 votes |
def _network_factory(weight_decay=1e-8): def factory_fn(image, reuse): with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=False): with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.batch_norm, slim.layer_norm], reuse=reuse): features, logits = _create_network( image, reuse=reuse, weight_decay=weight_decay) return features, logits return factory_fn
Example #12
Source File: utils.py From AttGAN-Tensorflow with MIT License | 5 votes |
def get_norm_layer(norm, training, updates_collections=None): if norm == 'none': return lambda x: x elif norm == 'batch_norm': return functools.partial(slim.batch_norm, scale=True, is_training=training, updates_collections=updates_collections) elif norm == 'instance_norm': return slim.instance_norm elif norm == 'layer_norm': return slim.layer_norm
Example #13
Source File: freeze_model.py From WorkControl with Apache License 2.0 | 5 votes |
def _network_factory(weight_decay=1e-8): def factory_fn(image, reuse): with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=False): with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.batch_norm, slim.layer_norm], reuse=reuse): features, logits = _create_network( image, reuse=reuse, weight_decay=weight_decay) return features, logits return factory_fn
Example #14
Source File: freeze_model.py From WorkControl with Apache License 2.0 | 5 votes |
def _network_factory(weight_decay=1e-8): def factory_fn(image, reuse): with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=False): with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.batch_norm, slim.layer_norm], reuse=reuse): features, logits = _create_network( image, reuse=reuse, weight_decay=weight_decay) return features, logits return factory_fn
Example #15
Source File: freeze_model.py From multi-object-tracking with GNU General Public License v3.0 | 5 votes |
def _network_factory(weight_decay=1e-8): def factory_fn(image, reuse): with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=False): with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.batch_norm, slim.layer_norm], reuse=reuse): features, logits = _create_network( image, reuse=reuse, weight_decay=weight_decay) return features, logits return factory_fn
Example #16
Source File: resnet_v2_layernorm.py From TwinGAN with Apache License 2.0 | 4 votes |
def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1, outputs_collections=None, scope=None): """Bottleneck residual unit variant with BN before convolutions. This is the full preactivation residual unit variant proposed in [2]. See Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck variant which has an extra bottleneck layer. When putting together two consecutive ResNet blocks that use this unit, one should use stride = 2 in the last unit of the first block. Args: inputs: A tensor of size [batch, height, width, channels]. depth: The depth of the ResNet unit output. depth_bottleneck: The depth of the bottleneck layers. stride: The ResNet unit's stride. Determines the amount of downsampling of the units output compared to its input. rate: An integer, rate for atrous convolution. outputs_collections: Collection to add the ResNet unit output. scope: Optional variable_scope. Returns: The ResNet unit's output. """ with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc: depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4) preact = slim.layer_norm(inputs, activation_fn=tf.nn.relu, scope='preact') if depth == depth_in: shortcut = resnet_utils.subsample(inputs, stride, 'shortcut') else: shortcut = slim.conv2d(preact, depth, [1, 1], stride=stride, normalizer_fn=None, activation_fn=None, scope='shortcut') residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1, scope='conv1') residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride, rate=rate, scope='conv2') residual = slim.conv2d(residual, depth, [1, 1], stride=1, normalizer_fn=None, activation_fn=None, scope='conv3') output = shortcut + residual return slim.utils.collect_named_outputs(outputs_collections, sc.name, output)
Example #17
Source File: nets.py From stereo-magnification with Apache License 2.0 | 4 votes |
def mpi_net(inputs, num_outputs, ngf=64, vscope='net', reuse_weights=False): """Network definition for multiplane image (MPI) inference. Args: inputs: stack of input images [batch, height, width, input_channels] num_outputs: number of output channels ngf: number of features for the first conv layer vscope: variable scope reuse_weights: whether to reuse weights (for weight sharing) Returns: pred: network output at the same spatial resolution as the inputs. """ with tf.variable_scope(vscope, reuse=reuse_weights): with slim.arg_scope( [slim.conv2d, slim.conv2d_transpose], normalizer_fn=slim.layer_norm): cnv1_1 = slim.conv2d(inputs, ngf, [3, 3], scope='conv1_1', stride=1) cnv1_2 = slim.conv2d(cnv1_1, ngf * 2, [3, 3], scope='conv1_2', stride=2) cnv2_1 = slim.conv2d(cnv1_2, ngf * 2, [3, 3], scope='conv2_1', stride=1) cnv2_2 = slim.conv2d(cnv2_1, ngf * 4, [3, 3], scope='conv2_2', stride=2) cnv3_1 = slim.conv2d(cnv2_2, ngf * 4, [3, 3], scope='conv3_1', stride=1) cnv3_2 = slim.conv2d(cnv3_1, ngf * 4, [3, 3], scope='conv3_2', stride=1) cnv3_3 = slim.conv2d(cnv3_2, ngf * 8, [3, 3], scope='conv3_3', stride=2) cnv4_1 = slim.conv2d( cnv3_3, ngf * 8, [3, 3], scope='conv4_1', stride=1, rate=2) cnv4_2 = slim.conv2d( cnv4_1, ngf * 8, [3, 3], scope='conv4_2', stride=1, rate=2) cnv4_3 = slim.conv2d( cnv4_2, ngf * 8, [3, 3], scope='conv4_3', stride=1, rate=2) # Adding skips skip = tf.concat([cnv4_3, cnv3_3], axis=3) cnv6_1 = slim.conv2d_transpose( skip, ngf * 4, [4, 4], scope='conv6_1', stride=2) cnv6_2 = slim.conv2d(cnv6_1, ngf * 4, [3, 3], scope='conv6_2', stride=1) cnv6_3 = slim.conv2d(cnv6_2, ngf * 4, [3, 3], scope='conv6_3', stride=1) skip = tf.concat([cnv6_3, cnv2_2], axis=3) cnv7_1 = slim.conv2d_transpose( skip, ngf * 2, [4, 4], scope='conv7_1', stride=2) cnv7_2 = slim.conv2d(cnv7_1, ngf * 2, [3, 3], scope='conv7_2', stride=1) skip = tf.concat([cnv7_2, cnv1_2], axis=3) cnv8_1 = slim.conv2d_transpose( skip, ngf, [4, 4], scope='conv8_1', stride=2) cnv8_2 = slim.conv2d(cnv8_1, ngf, [3, 3], scope='conv8_2', stride=1) feat = cnv8_2 pred = slim.conv2d( feat, num_outputs, [1, 1], stride=1, activation_fn=tf.nn.tanh, normalizer_fn=None, scope='color_pred') return pred
Example #18
Source File: network.py From Relational-NEM with MIT License | 4 votes |
def build_network(K, input, recurrent, output): with tf.name_scope('inner_RNN'): # build recurrent for i, layer in enumerate(recurrent): if layer['name'] == 'rnn': cell = tf.contrib.rnn.BasicRNNCell(layer['size'], activation=ACTIVATION_FUNCTIONS['linear']) cell = LayerNormWrapper(cell, apply_to='output', name='LayerNormR{}'.format(i)) if layer.get('ln') else cell cell = ActivationFunctionWrapper(cell, activation=layer['act'], apply_to='state') cell = ActivationFunctionWrapper(cell, activation=layer['act'], apply_to='output') elif layer['name'] == 'lstm': cell = tf.contrib.rnn.LayerNormBasicLSTMCell(layer['size'], layer_norm=layer.get('ln', False)) if layer.get('act'): print("WARNING: activation function arg for LSTM Cell is ignored. Default (tanh) is used in stead.") elif layer['name'] == 'r_nem': cell = R_NEM(encoder=layer['encoder'], core=layer['core'], context=layer['context'], attention=layer['attention'], actions=layer.get('actions', None), size=layer['size'], K=K) cell = LayerNormWrapper(cell, apply_to='output', name='LayerNormR{}'.format(i)) if layer.get('ln') else cell cell = ActivationFunctionWrapper(cell, activation=layer['act'], apply_to='state') cell = ActivationFunctionWrapper(cell, activation=layer['act'], apply_to='output') else: raise ValueError('Unknown recurrent name "{}"'.format(layer['name'])) # build input for i, layer in reversed(list(enumerate(input))): if layer['name'] == 'reshape': cell = ReshapeWrapper(cell, layer['shape'], apply_to='input') else: cell = ActivationFunctionWrapper(cell, layer['act'], apply_to='input') cell = LayerNormWrapper(cell, apply_to='input', name='LayerNormI{}'.format(i)) if layer.get('ln') else cell cell = InputWrapper(cell, layer, name="InputWrapper{}".format(i)) # build output for i, layer in enumerate(output): if layer['name'] == 'reshape': cell = ReshapeWrapper(cell, layer['shape']) else: n_out = layer.get('n_out', 1) cell = OutputWrapper(cell, layer, n_out=n_out, name="OutputWrapper{}".format(i)) cell = LayerNormWrapper(cell, apply_to='output', name='LayerNormO{}'.format(i)) if layer.get('ln') else cell cell = ActivationFunctionWrapper(cell, layer['act'], apply_to='output') return cell