Python tensorflow.python.ops.nn.fused_batch_norm() Examples
The following are 1
code examples of tensorflow.python.ops.nn.fused_batch_norm().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.nn
, or try the search function
.
Example #1
Source File: normalization.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 4 votes |
def _fused_batch_norm(self, inputs, training): """Returns the output of fused batch norm.""" # TODO(reedwm): Add support for fp16 inputs. beta = self.beta if self.center else self._beta_const gamma = self.gamma if self.scale else self._gamma_const def _fused_batch_norm_training(): return nn.fused_batch_norm( inputs, gamma, beta, epsilon=self.epsilon, data_format=self._data_format) def _fused_batch_norm_inference(): return nn.fused_batch_norm( inputs, gamma, beta, mean=self.moving_mean, variance=self.moving_variance, epsilon=self.epsilon, is_training=False, data_format=self._data_format) output, mean, variance = utils.smart_cond( training, _fused_batch_norm_training, _fused_batch_norm_inference) if not self._bessels_correction_test_only: # Remove Bessel's correction to be consistent with non-fused batch norm. # Note that the variance computed by fused batch norm is # with Bessel's correction. sample_size = math_ops.cast( array_ops.size(inputs) / array_ops.size(variance), variance.dtype) factor = (sample_size - math_ops.cast(1.0, variance.dtype)) / sample_size variance *= factor training_value = utils.constant_value(training) if training_value is None: one_minus_decay = _smart_select(training, lambda: self._one_minus_decay, lambda: 0.) else: one_minus_decay = ops.convert_to_tensor(self._one_minus_decay) if training_value or training_value is None: mean_update = self._assign_moving_average(self.moving_mean, mean, one_minus_decay) variance_update = self._assign_moving_average(self.moving_variance, variance, one_minus_decay) if context.in_graph_mode(): # Note that in Eager mode, the updates are already executed when running # assign_moving_averages. So we do not need to put them into # collections. self.add_update(mean_update, inputs=inputs) self.add_update(variance_update, inputs=inputs) return output