Python tensorflow.python.ops.nn.moments() Examples
The following are 14
code examples of tensorflow.python.ops.nn.moments().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.nn
, or try the search function
.
Example #1
Source File: network_units.py From DOTA_models with Apache License 2.0 | 5 votes |
def normalize(self, inputs): """Apply normalization to input. The shape must match the declared shape in the constructor. [This is copied from tf.contrib.rnn.LayerNormBasicLSTMCell.] Args: inputs: Input tensor Returns: Normalized version of input tensor. Raises: ValueError: if inputs has undefined rank. """ inputs_shape = inputs.get_shape() inputs_rank = inputs_shape.ndims if inputs_rank is None: raise ValueError('Inputs %s has undefined rank.' % inputs.name) axis = range(1, inputs_rank) beta = self._component.get_variable('beta_%s' % self._name) gamma = self._component.get_variable('gamma_%s' % self._name) with tf.variable_scope('layer_norm_%s' % self._name): # Calculate the moments on the last axis (layer activations). mean, variance = nn.moments(inputs, axis, keep_dims=True) # Compute layer normalization using the batch_normalization function. variance_epsilon = 1E-12 outputs = nn.batch_normalization( inputs, mean, variance, beta, gamma, variance_epsilon) outputs.set_shape(inputs_shape) return outputs
Example #2
Source File: backend.py From lambda-packs with MIT License | 5 votes |
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): """Computes mean and std for batch then apply batch_normalization on batch. Arguments: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. reduction_axes: iterable of integers, axes over which to normalize. epsilon: Fuzz factor. Returns: A tuple length of 3, `(normalized_tensor, mean, variance)`. """ mean, var = nn.moments( x, reduction_axes, shift=None, name=None, keep_dims=False) if sorted(reduction_axes) == list(range(ndim(x)))[:-1]: normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon) else: # need broadcasting target_shape = [] for axis in range(ndim(x)): if axis in reduction_axes: target_shape.append(1) else: target_shape.append(array_ops.shape(x)[axis]) target_shape = array_ops.stack(target_shape) broadcast_mean = array_ops.reshape(mean, target_shape) broadcast_var = array_ops.reshape(var, target_shape) if gamma is None: broadcast_gamma = None else: broadcast_gamma = array_ops.reshape(gamma, target_shape) if beta is None: broadcast_beta = None else: broadcast_beta = array_ops.reshape(beta, target_shape) normed = nn.batch_normalization(x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return normed, mean, var
Example #3
Source File: network_units.py From yolo_v2 with Apache License 2.0 | 5 votes |
def normalize(self, inputs): """Apply normalization to input. The shape must match the declared shape in the constructor. [This is copied from tf.contrib.rnn.LayerNormBasicLSTMCell.] Args: inputs: Input tensor Returns: Normalized version of input tensor. Raises: ValueError: if inputs has undefined rank. """ inputs_shape = inputs.get_shape() inputs_rank = inputs_shape.ndims if inputs_rank is None: raise ValueError('Inputs %s has undefined rank.' % inputs.name) axis = range(1, inputs_rank) beta = self._component.get_variable('beta_%s' % self._name) gamma = self._component.get_variable('gamma_%s' % self._name) with tf.variable_scope('layer_norm_%s' % self._name): # Calculate the moments on the last axis (layer activations). mean, variance = nn.moments(inputs, axis, keep_dims=True) # Compute layer normalization using the batch_normalization function. variance_epsilon = 1E-12 outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma, variance_epsilon) outputs.set_shape(inputs_shape) return outputs
Example #4
Source File: network_units.py From Gun-Detector with Apache License 2.0 | 5 votes |
def normalize(self, inputs): """Apply normalization to input. The shape must match the declared shape in the constructor. [This is copied from tf.contrib.rnn.LayerNormBasicLSTMCell.] Args: inputs: Input tensor Returns: Normalized version of input tensor. Raises: ValueError: if inputs has undefined rank. """ inputs_shape = inputs.get_shape() inputs_rank = inputs_shape.ndims if inputs_rank is None: raise ValueError('Inputs %s has undefined rank.' % inputs.name) axis = range(1, inputs_rank) beta = self._component.get_variable('beta_%s' % self._name) gamma = self._component.get_variable('gamma_%s' % self._name) with tf.variable_scope('layer_norm_%s' % self._name): # Calculate the moments on the last axis (layer activations). mean, variance = nn.moments(inputs, axis, keep_dims=True) # Compute layer normalization using the batch_normalization function. variance_epsilon = 1E-12 outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma, variance_epsilon) outputs.set_shape(inputs_shape) return outputs
Example #5
Source File: network_units.py From hands-detection with MIT License | 5 votes |
def normalize(self, inputs): """Apply normalization to input. The shape must match the declared shape in the constructor. [This is copied from tf.contrib.rnn.LayerNormBasicLSTMCell.] Args: inputs: Input tensor Returns: Normalized version of input tensor. Raises: ValueError: if inputs has undefined rank. """ inputs_shape = inputs.get_shape() inputs_rank = inputs_shape.ndims if inputs_rank is None: raise ValueError('Inputs %s has undefined rank.' % inputs.name) axis = range(1, inputs_rank) beta = self._component.get_variable('beta_%s' % self._name) gamma = self._component.get_variable('gamma_%s' % self._name) with tf.variable_scope('layer_norm_%s' % self._name): # Calculate the moments on the last axis (layer activations). mean, variance = nn.moments(inputs, axis, keep_dims=True) # Compute layer normalization using the batch_normalization function. variance_epsilon = 1E-12 outputs = nn.batch_normalization( inputs, mean, variance, beta, gamma, variance_epsilon) outputs.set_shape(inputs_shape) return outputs
Example #6
Source File: network_units.py From object_detection_kitti with Apache License 2.0 | 5 votes |
def normalize(self, inputs): """Apply normalization to input. The shape must match the declared shape in the constructor. [This is copied from tf.contrib.rnn.LayerNormBasicLSTMCell.] Args: inputs: Input tensor Returns: Normalized version of input tensor. Raises: ValueError: if inputs has undefined rank. """ inputs_shape = inputs.get_shape() inputs_rank = inputs_shape.ndims if inputs_rank is None: raise ValueError('Inputs %s has undefined rank.' % inputs.name) axis = range(1, inputs_rank) beta = self._component.get_variable('beta_%s' % self._name) gamma = self._component.get_variable('gamma_%s' % self._name) with tf.variable_scope('layer_norm_%s' % self._name): # Calculate the moments on the last axis (layer activations). mean, variance = nn.moments(inputs, axis, keep_dims=True) # Compute layer normalization using the batch_normalization function. variance_epsilon = 1E-12 outputs = nn.batch_normalization( inputs, mean, variance, beta, gamma, variance_epsilon) outputs.set_shape(inputs_shape) return outputs
Example #7
Source File: network_units.py From object_detection_with_tensorflow with MIT License | 5 votes |
def normalize(self, inputs): """Apply normalization to input. The shape must match the declared shape in the constructor. [This is copied from tf.contrib.rnn.LayerNormBasicLSTMCell.] Args: inputs: Input tensor Returns: Normalized version of input tensor. Raises: ValueError: if inputs has undefined rank. """ inputs_shape = inputs.get_shape() inputs_rank = inputs_shape.ndims if inputs_rank is None: raise ValueError('Inputs %s has undefined rank.' % inputs.name) axis = range(1, inputs_rank) beta = self._component.get_variable('beta_%s' % self._name) gamma = self._component.get_variable('gamma_%s' % self._name) with tf.variable_scope('layer_norm_%s' % self._name): # Calculate the moments on the last axis (layer activations). mean, variance = nn.moments(inputs, axis, keep_dims=True) # Compute layer normalization using the batch_normalization function. variance_epsilon = 1E-12 outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma, variance_epsilon) outputs.set_shape(inputs_shape) return outputs
Example #8
Source File: network_units.py From HumanRecognition with MIT License | 5 votes |
def normalize(self, inputs): """Apply normalization to input. The shape must match the declared shape in the constructor. [This is copied from tf.contrib.rnn.LayerNormBasicLSTMCell.] Args: inputs: Input tensor Returns: Normalized version of input tensor. Raises: ValueError: if inputs has undefined rank. """ inputs_shape = inputs.get_shape() inputs_rank = inputs_shape.ndims if inputs_rank is None: raise ValueError('Inputs %s has undefined rank.' % inputs.name) axis = range(1, inputs_rank) beta = self._component.get_variable('beta_%s' % self._name) gamma = self._component.get_variable('gamma_%s' % self._name) with tf.variable_scope('layer_norm_%s' % self._name): # Calculate the moments on the last axis (layer activations). mean, variance = nn.moments(inputs, axis, keep_dims=True) # Compute layer normalization using the batch_normalization function. variance_epsilon = 1E-12 outputs = nn.batch_normalization( inputs, mean, variance, beta, gamma, variance_epsilon) outputs.set_shape(inputs_shape) return outputs
Example #9
Source File: network_units.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def normalize(self, inputs): """Apply normalization to input. The shape must match the declared shape in the constructor. [This is copied from tf.contrib.rnn.LayerNormBasicLSTMCell.] Args: inputs: Input tensor Returns: Normalized version of input tensor. Raises: ValueError: if inputs has undefined rank. """ inputs_shape = inputs.get_shape() inputs_rank = inputs_shape.ndims if inputs_rank is None: raise ValueError('Inputs %s has undefined rank.' % inputs.name) axis = range(1, inputs_rank) beta = self._component.get_variable('beta_%s' % self._name) gamma = self._component.get_variable('gamma_%s' % self._name) with tf.variable_scope('layer_norm_%s' % self._name): # Calculate the moments on the last axis (layer activations). mean, variance = nn.moments(inputs, axis, keep_dims=True) # Compute layer normalization using the batch_normalization function. variance_epsilon = 1E-12 outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma, variance_epsilon) outputs.set_shape(inputs_shape) return outputs
Example #10
Source File: temporal_convolutional_network.py From nlp-architect with Apache License 2.0 | 5 votes |
def _data_dep_init(self, inputs): """Data dependent initialization for eager execution""" from tensorflow.python.ops.nn import moments from tensorflow.python.ops.math_ops import sqrt with variable_scope.variable_scope("data_dep_init"): # Generate data dependent init values activation = self.layer.activation self.layer.activation = None x_init = self.layer.call(inputs) m_init, v_init = moments(x_init, self.norm_axes) scale_init = 1.0 / sqrt(v_init + 1e-10) # Assign data dependent init values self.layer.g = self.layer.g * scale_init self.layer.bias = -1 * m_init * scale_init self.layer.activation = activation self.initialized = True # pylint: disable=signature-differs
Example #11
Source File: backend.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): """Computes mean and std for batch then apply batch_normalization on batch. Arguments: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. reduction_axes: iterable of integers, axes over which to normalize. epsilon: Fuzz factor. Returns: A tuple length of 3, `(normalized_tensor, mean, variance)`. """ mean, var = nn.moments( x, reduction_axes, shift=None, name=None, keep_dims=False) if sorted(reduction_axes) == list(range(ndim(x)))[:-1]: normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon) else: # need broadcasting target_shape = [] for axis in range(ndim(x)): if axis in reduction_axes: target_shape.append(1) else: target_shape.append(array_ops.shape(x)[axis]) target_shape = array_ops.stack(target_shape) broadcast_mean = array_ops.reshape(mean, target_shape) broadcast_var = array_ops.reshape(var, target_shape) if gamma is None: broadcast_gamma = None else: broadcast_gamma = array_ops.reshape(gamma, target_shape) if beta is None: broadcast_beta = None else: broadcast_beta = array_ops.reshape(beta, target_shape) normed = nn.batch_normalization(x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return normed, mean, var
Example #12
Source File: network_units.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def normalize(self, inputs): """Apply normalization to input. The shape must match the declared shape in the constructor. [This is copied from tf.contrib.rnn.LayerNormBasicLSTMCell.] Args: inputs: Input tensor Returns: Normalized version of input tensor. Raises: ValueError: if inputs has undefined rank. """ inputs_shape = inputs.get_shape() inputs_rank = inputs_shape.ndims if inputs_rank is None: raise ValueError('Inputs %s has undefined rank.' % inputs.name) axis = range(1, inputs_rank) beta = self._component.get_variable('beta_%s' % self._name) gamma = self._component.get_variable('gamma_%s' % self._name) with tf.variable_scope('layer_norm_%s' % self._name): # Calculate the moments on the last axis (layer activations). mean, variance = nn.moments(inputs, axis, keep_dims=True) # Compute layer normalization using the batch_normalization function. variance_epsilon = 1E-12 outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma, variance_epsilon) outputs.set_shape(inputs_shape) return outputs
Example #13
Source File: normalization.py From lambda-packs with MIT License | 4 votes |
def _renorm_correction_and_moments(self, mean, variance, training): """Returns the correction and update values for renorm.""" stddev = math_ops.sqrt(variance + self.epsilon) # Compute the average mean and standard deviation, as if they were # initialized with this batch's moments. mixed_renorm_mean = (self.renorm_mean + (1. - self.renorm_mean_weight) * mean) mixed_renorm_stddev = (self.renorm_stddev + (1. - self.renorm_stddev_weight) * stddev) # Compute the corrections for batch renorm. r = stddev / mixed_renorm_stddev d = (mean - mixed_renorm_mean) / mixed_renorm_stddev # Ensure the corrections use pre-update moving averages. with ops.control_dependencies([r, d]): mean = array_ops.identity(mean) stddev = array_ops.identity(stddev) rmin, rmax, dmax = [self.renorm_clipping.get(key) for key in ['rmin', 'rmax', 'dmax']] if rmin is not None: r = math_ops.maximum(r, rmin) if rmax is not None: r = math_ops.minimum(r, rmax) if dmax is not None: d = math_ops.maximum(d, -dmax) d = math_ops.minimum(d, dmax) # When not training, use r=1, d=0, and decay=1 meaning no updates. r = _smart_select(training, lambda: r, lambda: array_ops.ones_like(r)) d = _smart_select(training, lambda: d, lambda: array_ops.zeros_like(d)) decay = _smart_select(training, lambda: self.renorm_momentum, lambda: 1.) def _update_renorm_variable(var, weight, value): """Updates a moving average and weight, returns the unbiased value.""" # Update the variables without zero debiasing. The debiasing will be # accomplished by dividing the exponential moving average by the weight. # For example, after a single update, the moving average would be # (1-decay) * value. and the weight will be 1-decay, with their ratio # giving value. # Make sure the weight is not updated until before r and d computation. value = array_ops.identity(value) with ops.control_dependencies([value]): weight_value = array_ops.constant(1., dtype=weight.dtype) new_var = moving_averages.assign_moving_average( var, value, decay, zero_debias=False) new_weight = moving_averages.assign_moving_average( weight, weight_value, decay, zero_debias=False) return new_var / new_weight with ops.colocate_with(self.moving_mean): new_mean = _update_renorm_variable(self.renorm_mean, self.renorm_mean_weight, mean) with ops.colocate_with(self.moving_variance): new_stddev = _update_renorm_variable(self.renorm_stddev, self.renorm_stddev_weight, stddev) # Make sqrt(moving_variance + epsilon) = new_stddev. new_variance = math_ops.square(new_stddev) - self.epsilon return (r, d, new_mean, new_variance)
Example #14
Source File: normalization.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 4 votes |
def _renorm_correction_and_moments(self, mean, variance, training): """Returns the correction and update values for renorm.""" stddev = math_ops.sqrt(variance + self.epsilon) # Compute the average mean and standard deviation, as if they were # initialized with this batch's moments. mixed_renorm_mean = (self.renorm_mean + (1. - self.renorm_mean_weight) * mean) mixed_renorm_stddev = (self.renorm_stddev + (1. - self.renorm_stddev_weight) * stddev) # Compute the corrections for batch renorm. r = stddev / mixed_renorm_stddev d = (mean - mixed_renorm_mean) / mixed_renorm_stddev # Ensure the corrections use pre-update moving averages. with ops.control_dependencies([r, d]): mean = array_ops.identity(mean) stddev = array_ops.identity(stddev) rmin, rmax, dmax = [self.renorm_clipping.get(key) for key in ['rmin', 'rmax', 'dmax']] if rmin is not None: r = math_ops.maximum(r, rmin) if rmax is not None: r = math_ops.minimum(r, rmax) if dmax is not None: d = math_ops.maximum(d, -dmax) d = math_ops.minimum(d, dmax) # When not training, use r=1, d=0, and decay=1 meaning no updates. r = _smart_select(training, lambda: r, lambda: array_ops.ones_like(r)) d = _smart_select(training, lambda: d, lambda: array_ops.zeros_like(d)) decay = _smart_select(training, lambda: self.renorm_momentum, lambda: 1.) def _update_renorm_variable(var, weight, value): """Updates a moving average and weight, returns the unbiased value.""" # Update the variables without zero debiasing. The debiasing will be # accomplished by dividing the exponential moving average by the weight. # For example, after a single update, the moving average would be # (1-decay) * value. and the weight will be 1-decay, with their ratio # giving value. # Make sure the weight is not updated until before r and d computation. value = array_ops.identity(value) with ops.control_dependencies([value]): weight_value = array_ops.constant(1., dtype=weight.dtype) new_var = moving_averages.assign_moving_average( var, value, decay, zero_debias=False) new_weight = moving_averages.assign_moving_average( weight, weight_value, decay, zero_debias=False) return new_var / new_weight with ops.colocate_with(self.moving_mean): new_mean = _update_renorm_variable(self.renorm_mean, self.renorm_mean_weight, mean) with ops.colocate_with(self.moving_variance): new_stddev = _update_renorm_variable(self.renorm_stddev, self.renorm_stddev_weight, stddev) # Make sqrt(moving_variance + epsilon) = new_stddev. new_variance = math_ops.square(new_stddev) - self.epsilon return (r, d, new_mean, new_variance)