Python tensorflow.python.ops.nn.batch_normalization() Examples
The following are 13
code examples of tensorflow.python.ops.nn.batch_normalization().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.nn
, or try the search function
.
Example #1
Source File: network_units.py From DOTA_models with Apache License 2.0 | 5 votes |
def normalize(self, inputs): """Apply normalization to input. The shape must match the declared shape in the constructor. [This is copied from tf.contrib.rnn.LayerNormBasicLSTMCell.] Args: inputs: Input tensor Returns: Normalized version of input tensor. Raises: ValueError: if inputs has undefined rank. """ inputs_shape = inputs.get_shape() inputs_rank = inputs_shape.ndims if inputs_rank is None: raise ValueError('Inputs %s has undefined rank.' % inputs.name) axis = range(1, inputs_rank) beta = self._component.get_variable('beta_%s' % self._name) gamma = self._component.get_variable('gamma_%s' % self._name) with tf.variable_scope('layer_norm_%s' % self._name): # Calculate the moments on the last axis (layer activations). mean, variance = nn.moments(inputs, axis, keep_dims=True) # Compute layer normalization using the batch_normalization function. variance_epsilon = 1E-12 outputs = nn.batch_normalization( inputs, mean, variance, beta, gamma, variance_epsilon) outputs.set_shape(inputs_shape) return outputs
Example #2
Source File: backend.py From lambda-packs with MIT License | 5 votes |
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): """Computes mean and std for batch then apply batch_normalization on batch. Arguments: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. reduction_axes: iterable of integers, axes over which to normalize. epsilon: Fuzz factor. Returns: A tuple length of 3, `(normalized_tensor, mean, variance)`. """ mean, var = nn.moments( x, reduction_axes, shift=None, name=None, keep_dims=False) if sorted(reduction_axes) == list(range(ndim(x)))[:-1]: normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon) else: # need broadcasting target_shape = [] for axis in range(ndim(x)): if axis in reduction_axes: target_shape.append(1) else: target_shape.append(array_ops.shape(x)[axis]) target_shape = array_ops.stack(target_shape) broadcast_mean = array_ops.reshape(mean, target_shape) broadcast_var = array_ops.reshape(var, target_shape) if gamma is None: broadcast_gamma = None else: broadcast_gamma = array_ops.reshape(gamma, target_shape) if beta is None: broadcast_beta = None else: broadcast_beta = array_ops.reshape(beta, target_shape) normed = nn.batch_normalization(x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return normed, mean, var
Example #3
Source File: backend.py From lambda-packs with MIT License | 5 votes |
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3): """Applies batch normalization on x given mean, var, beta and gamma. I.e. returns: `output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta` Arguments: x: Input tensor or variable. mean: Mean of batch. var: Variance of batch. beta: Tensor with which to center the input. gamma: Tensor by which to scale the input. epsilon: Fuzz factor. Returns: A tensor. """ return nn.batch_normalization(x, mean, var, beta, gamma, epsilon) # SHAPE OPERATIONS
Example #4
Source File: network_units.py From yolo_v2 with Apache License 2.0 | 5 votes |
def normalize(self, inputs): """Apply normalization to input. The shape must match the declared shape in the constructor. [This is copied from tf.contrib.rnn.LayerNormBasicLSTMCell.] Args: inputs: Input tensor Returns: Normalized version of input tensor. Raises: ValueError: if inputs has undefined rank. """ inputs_shape = inputs.get_shape() inputs_rank = inputs_shape.ndims if inputs_rank is None: raise ValueError('Inputs %s has undefined rank.' % inputs.name) axis = range(1, inputs_rank) beta = self._component.get_variable('beta_%s' % self._name) gamma = self._component.get_variable('gamma_%s' % self._name) with tf.variable_scope('layer_norm_%s' % self._name): # Calculate the moments on the last axis (layer activations). mean, variance = nn.moments(inputs, axis, keep_dims=True) # Compute layer normalization using the batch_normalization function. variance_epsilon = 1E-12 outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma, variance_epsilon) outputs.set_shape(inputs_shape) return outputs
Example #5
Source File: network_units.py From Gun-Detector with Apache License 2.0 | 5 votes |
def normalize(self, inputs): """Apply normalization to input. The shape must match the declared shape in the constructor. [This is copied from tf.contrib.rnn.LayerNormBasicLSTMCell.] Args: inputs: Input tensor Returns: Normalized version of input tensor. Raises: ValueError: if inputs has undefined rank. """ inputs_shape = inputs.get_shape() inputs_rank = inputs_shape.ndims if inputs_rank is None: raise ValueError('Inputs %s has undefined rank.' % inputs.name) axis = range(1, inputs_rank) beta = self._component.get_variable('beta_%s' % self._name) gamma = self._component.get_variable('gamma_%s' % self._name) with tf.variable_scope('layer_norm_%s' % self._name): # Calculate the moments on the last axis (layer activations). mean, variance = nn.moments(inputs, axis, keep_dims=True) # Compute layer normalization using the batch_normalization function. variance_epsilon = 1E-12 outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma, variance_epsilon) outputs.set_shape(inputs_shape) return outputs
Example #6
Source File: network_units.py From hands-detection with MIT License | 5 votes |
def normalize(self, inputs): """Apply normalization to input. The shape must match the declared shape in the constructor. [This is copied from tf.contrib.rnn.LayerNormBasicLSTMCell.] Args: inputs: Input tensor Returns: Normalized version of input tensor. Raises: ValueError: if inputs has undefined rank. """ inputs_shape = inputs.get_shape() inputs_rank = inputs_shape.ndims if inputs_rank is None: raise ValueError('Inputs %s has undefined rank.' % inputs.name) axis = range(1, inputs_rank) beta = self._component.get_variable('beta_%s' % self._name) gamma = self._component.get_variable('gamma_%s' % self._name) with tf.variable_scope('layer_norm_%s' % self._name): # Calculate the moments on the last axis (layer activations). mean, variance = nn.moments(inputs, axis, keep_dims=True) # Compute layer normalization using the batch_normalization function. variance_epsilon = 1E-12 outputs = nn.batch_normalization( inputs, mean, variance, beta, gamma, variance_epsilon) outputs.set_shape(inputs_shape) return outputs
Example #7
Source File: network_units.py From object_detection_kitti with Apache License 2.0 | 5 votes |
def normalize(self, inputs): """Apply normalization to input. The shape must match the declared shape in the constructor. [This is copied from tf.contrib.rnn.LayerNormBasicLSTMCell.] Args: inputs: Input tensor Returns: Normalized version of input tensor. Raises: ValueError: if inputs has undefined rank. """ inputs_shape = inputs.get_shape() inputs_rank = inputs_shape.ndims if inputs_rank is None: raise ValueError('Inputs %s has undefined rank.' % inputs.name) axis = range(1, inputs_rank) beta = self._component.get_variable('beta_%s' % self._name) gamma = self._component.get_variable('gamma_%s' % self._name) with tf.variable_scope('layer_norm_%s' % self._name): # Calculate the moments on the last axis (layer activations). mean, variance = nn.moments(inputs, axis, keep_dims=True) # Compute layer normalization using the batch_normalization function. variance_epsilon = 1E-12 outputs = nn.batch_normalization( inputs, mean, variance, beta, gamma, variance_epsilon) outputs.set_shape(inputs_shape) return outputs
Example #8
Source File: network_units.py From object_detection_with_tensorflow with MIT License | 5 votes |
def normalize(self, inputs): """Apply normalization to input. The shape must match the declared shape in the constructor. [This is copied from tf.contrib.rnn.LayerNormBasicLSTMCell.] Args: inputs: Input tensor Returns: Normalized version of input tensor. Raises: ValueError: if inputs has undefined rank. """ inputs_shape = inputs.get_shape() inputs_rank = inputs_shape.ndims if inputs_rank is None: raise ValueError('Inputs %s has undefined rank.' % inputs.name) axis = range(1, inputs_rank) beta = self._component.get_variable('beta_%s' % self._name) gamma = self._component.get_variable('gamma_%s' % self._name) with tf.variable_scope('layer_norm_%s' % self._name): # Calculate the moments on the last axis (layer activations). mean, variance = nn.moments(inputs, axis, keep_dims=True) # Compute layer normalization using the batch_normalization function. variance_epsilon = 1E-12 outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma, variance_epsilon) outputs.set_shape(inputs_shape) return outputs
Example #9
Source File: network_units.py From HumanRecognition with MIT License | 5 votes |
def normalize(self, inputs): """Apply normalization to input. The shape must match the declared shape in the constructor. [This is copied from tf.contrib.rnn.LayerNormBasicLSTMCell.] Args: inputs: Input tensor Returns: Normalized version of input tensor. Raises: ValueError: if inputs has undefined rank. """ inputs_shape = inputs.get_shape() inputs_rank = inputs_shape.ndims if inputs_rank is None: raise ValueError('Inputs %s has undefined rank.' % inputs.name) axis = range(1, inputs_rank) beta = self._component.get_variable('beta_%s' % self._name) gamma = self._component.get_variable('gamma_%s' % self._name) with tf.variable_scope('layer_norm_%s' % self._name): # Calculate the moments on the last axis (layer activations). mean, variance = nn.moments(inputs, axis, keep_dims=True) # Compute layer normalization using the batch_normalization function. variance_epsilon = 1E-12 outputs = nn.batch_normalization( inputs, mean, variance, beta, gamma, variance_epsilon) outputs.set_shape(inputs_shape) return outputs
Example #10
Source File: network_units.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def normalize(self, inputs): """Apply normalization to input. The shape must match the declared shape in the constructor. [This is copied from tf.contrib.rnn.LayerNormBasicLSTMCell.] Args: inputs: Input tensor Returns: Normalized version of input tensor. Raises: ValueError: if inputs has undefined rank. """ inputs_shape = inputs.get_shape() inputs_rank = inputs_shape.ndims if inputs_rank is None: raise ValueError('Inputs %s has undefined rank.' % inputs.name) axis = range(1, inputs_rank) beta = self._component.get_variable('beta_%s' % self._name) gamma = self._component.get_variable('gamma_%s' % self._name) with tf.variable_scope('layer_norm_%s' % self._name): # Calculate the moments on the last axis (layer activations). mean, variance = nn.moments(inputs, axis, keep_dims=True) # Compute layer normalization using the batch_normalization function. variance_epsilon = 1E-12 outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma, variance_epsilon) outputs.set_shape(inputs_shape) return outputs
Example #11
Source File: backend.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): """Computes mean and std for batch then apply batch_normalization on batch. Arguments: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. reduction_axes: iterable of integers, axes over which to normalize. epsilon: Fuzz factor. Returns: A tuple length of 3, `(normalized_tensor, mean, variance)`. """ mean, var = nn.moments( x, reduction_axes, shift=None, name=None, keep_dims=False) if sorted(reduction_axes) == list(range(ndim(x)))[:-1]: normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon) else: # need broadcasting target_shape = [] for axis in range(ndim(x)): if axis in reduction_axes: target_shape.append(1) else: target_shape.append(array_ops.shape(x)[axis]) target_shape = array_ops.stack(target_shape) broadcast_mean = array_ops.reshape(mean, target_shape) broadcast_var = array_ops.reshape(var, target_shape) if gamma is None: broadcast_gamma = None else: broadcast_gamma = array_ops.reshape(gamma, target_shape) if beta is None: broadcast_beta = None else: broadcast_beta = array_ops.reshape(beta, target_shape) normed = nn.batch_normalization(x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return normed, mean, var
Example #12
Source File: backend.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3): """Applies batch normalization on x given mean, var, beta and gamma. I.e. returns: `output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta` Arguments: x: Input tensor or variable. mean: Mean of batch. var: Variance of batch. beta: Tensor with which to center the input. gamma: Tensor by which to scale the input. epsilon: Fuzz factor. Returns: A tensor. """ return nn.batch_normalization(x, mean, var, beta, gamma, epsilon) # SHAPE OPERATIONS
Example #13
Source File: network_units.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def normalize(self, inputs): """Apply normalization to input. The shape must match the declared shape in the constructor. [This is copied from tf.contrib.rnn.LayerNormBasicLSTMCell.] Args: inputs: Input tensor Returns: Normalized version of input tensor. Raises: ValueError: if inputs has undefined rank. """ inputs_shape = inputs.get_shape() inputs_rank = inputs_shape.ndims if inputs_rank is None: raise ValueError('Inputs %s has undefined rank.' % inputs.name) axis = range(1, inputs_rank) beta = self._component.get_variable('beta_%s' % self._name) gamma = self._component.get_variable('gamma_%s' % self._name) with tf.variable_scope('layer_norm_%s' % self._name): # Calculate the moments on the last axis (layer activations). mean, variance = nn.moments(inputs, axis, keep_dims=True) # Compute layer normalization using the batch_normalization function. variance_epsilon = 1E-12 outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma, variance_epsilon) outputs.set_shape(inputs_shape) return outputs