Python tensorflow.contrib.layers.python.layers.layers.layer_norm() Examples
The following are 11
code examples of tensorflow.contrib.layers.python.layers.layers.layer_norm().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.contrib.layers.python.layers.layers
, or try the search function
.
Example #1
Source File: rnn_cell.py From lambda-packs with MIT License | 5 votes |
def __init__(self, num_units, forget_bias=1.0, input_size=None, activation=math_ops.tanh, layer_norm=True, norm_gain=1.0, norm_shift=0.0, dropout_keep_prob=1.0, dropout_prob_seed=None, reuse=None): """Initializes the basic LSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). input_size: Deprecated and unused. activation: Activation function of the inner states. layer_norm: If `True`, layer normalization will be applied. norm_gain: float, The layer normalization gain initial value. If `layer_norm` has been set to `False`, this argument will be ignored. norm_shift: float, The layer normalization shift initial value. If `layer_norm` has been set to `False`, this argument will be ignored. dropout_keep_prob: unit Tensor or float between 0 and 1 representing the recurrent dropout probability value. If float and 1.0, no dropout will be applied. dropout_prob_seed: (optional) integer, the randomness seed. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. """ super(LayerNormBasicLSTMCell, self).__init__(_reuse=reuse) if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._activation = activation self._forget_bias = forget_bias self._keep_prob = dropout_keep_prob self._seed = dropout_prob_seed self._layer_norm = layer_norm self._g = norm_gain self._b = norm_shift self._reuse = reuse
Example #2
Source File: rnn_cell.py From lambda-packs with MIT License | 5 votes |
def _norm(self, inp, scope): shape = inp.get_shape()[-1:] gamma_init = init_ops.constant_initializer(self._g) beta_init = init_ops.constant_initializer(self._b) with vs.variable_scope(scope): # Initialize beta and gamma for use by layer_norm. vs.get_variable("gamma", shape=shape, initializer=gamma_init) vs.get_variable("beta", shape=shape, initializer=beta_init) normalized = layers.layer_norm(inp, reuse=True, scope=scope) return normalized
Example #3
Source File: rnn_cell.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def __init__(self, num_units, forget_bias=1.0, input_size=None, activation=math_ops.tanh, layer_norm=True, norm_gain=1.0, norm_shift=0.0, dropout_keep_prob=1.0, dropout_prob_seed=None): """Initializes the basic LSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). input_size: Deprecated and unused. activation: Activation function of the inner states. layer_norm: If `True`, layer normalization will be applied. norm_gain: float, The layer normalization gain initial value. If `layer_norm` has been set to `False`, this argument will be ignored. norm_shift: float, The layer normalization shift initial value. If `layer_norm` has been set to `False`, this argument will be ignored. dropout_keep_prob: unit Tensor or float between 0 and 1 representing the recurrent dropout probability value. If float and 1.0, no dropout will be applied. dropout_prob_seed: (optional) integer, the randomness seed. """ if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._activation = activation self._forget_bias = forget_bias self._keep_prob = dropout_keep_prob self._seed = dropout_prob_seed self._layer_norm = layer_norm self._g = norm_gain self._b = norm_shift
Example #4
Source File: rnn_cell.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _norm(self, inp, scope): shape = inp.get_shape()[-1:] gamma_init = init_ops.constant_initializer(self._g) beta_init = init_ops.constant_initializer(self._b) with vs.variable_scope(scope): # Initialize beta and gamma for use by layer_norm. vs.get_variable("gamma", shape=shape, initializer=gamma_init) vs.get_variable("beta", shape=shape, initializer=beta_init) normalized = layers.layer_norm(inp, reuse=True, scope=scope) return normalized
Example #5
Source File: rnn_cell.py From Multiview2Novelview with MIT License | 5 votes |
def _norm(g, b, inp, scope): shape = inp.get_shape()[-1:] gamma_init = init_ops.constant_initializer(g) beta_init = init_ops.constant_initializer(b) with vs.variable_scope(scope): # Initialize beta and gamma for use by layer_norm. vs.get_variable("gamma", shape=shape, initializer=gamma_init) vs.get_variable("beta", shape=shape, initializer=beta_init) normalized = layers.layer_norm(inp, reuse=True, scope=scope) return normalized
Example #6
Source File: rnn_cell.py From Multiview2Novelview with MIT License | 5 votes |
def __init__(self, num_units, forget_bias=1.0, input_size=None, activation=math_ops.tanh, layer_norm=True, norm_gain=1.0, norm_shift=0.0, dropout_keep_prob=1.0, dropout_prob_seed=None, reuse=None): """Initializes the basic LSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). input_size: Deprecated and unused. activation: Activation function of the inner states. layer_norm: If `True`, layer normalization will be applied. norm_gain: float, The layer normalization gain initial value. If `layer_norm` has been set to `False`, this argument will be ignored. norm_shift: float, The layer normalization shift initial value. If `layer_norm` has been set to `False`, this argument will be ignored. dropout_keep_prob: unit Tensor or float between 0 and 1 representing the recurrent dropout probability value. If float and 1.0, no dropout will be applied. dropout_prob_seed: (optional) integer, the randomness seed. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. """ super(LayerNormBasicLSTMCell, self).__init__(_reuse=reuse) if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._activation = activation self._forget_bias = forget_bias self._keep_prob = dropout_keep_prob self._seed = dropout_prob_seed self._layer_norm = layer_norm self._norm_gain = norm_gain self._norm_shift = norm_shift self._reuse = reuse
Example #7
Source File: rnn_cell.py From Multiview2Novelview with MIT License | 5 votes |
def _norm(self, inp, scope, dtype=dtypes.float32): shape = inp.get_shape()[-1:] gamma_init = init_ops.constant_initializer(self._norm_gain) beta_init = init_ops.constant_initializer(self._norm_shift) with vs.variable_scope(scope): # Initialize beta and gamma for use by layer_norm. vs.get_variable("gamma", shape=shape, initializer=gamma_init, dtype=dtype) vs.get_variable("beta", shape=shape, initializer=beta_init, dtype=dtype) normalized = layers.layer_norm(inp, reuse=True, scope=scope) return normalized
Example #8
Source File: rnn_cell.py From deep_image_model with Apache License 2.0 | 5 votes |
def __init__(self, num_units, forget_bias=1.0, input_size=None, activation=math_ops.tanh, layer_norm=True, norm_gain=1.0, norm_shift=0.0, dropout_keep_prob=1.0, dropout_prob_seed=None): """Initializes the basic LSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). input_size: Deprecated and unused. activation: Activation function of the inner states. layer_norm: If `True`, layer normalization will be applied. norm_gain: float, The layer normalization gain initial value. If `layer_norm` has been set to `False`, this argument will be ignored. norm_shift: float, The layer normalization shift initial value. If `layer_norm` has been set to `False`, this argument will be ignored. dropout_keep_prob: unit Tensor or float between 0 and 1 representing the recurrent dropout probability value. If float and 1.0, no dropout will be applied. dropout_prob_seed: (optional) integer, the randomness seed. """ if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._activation = activation self._forget_bias = forget_bias self._keep_prob = dropout_keep_prob self._seed = dropout_prob_seed self._layer_norm = layer_norm self._g = norm_gain self._b = norm_shift
Example #9
Source File: rnn_cell.py From deep_image_model with Apache License 2.0 | 5 votes |
def _norm(self, inp, scope): with vs.variable_scope(scope) as scope: shape = inp.get_shape()[-1:] gamma_init = init_ops.constant_initializer(self._g) beta_init = init_ops.constant_initializer(self._b) gamma = vs.get_variable("gamma", shape=shape, initializer=gamma_init) # pylint: disable=unused-variable beta = vs.get_variable("beta", shape=shape, initializer=beta_init) # pylint: disable=unused-variable normalized = layers.layer_norm(inp, reuse=True, scope=scope) return normalized
Example #10
Source File: rnn_cell.py From keras-lambda with MIT License | 5 votes |
def __init__(self, num_units, forget_bias=1.0, input_size=None, activation=math_ops.tanh, layer_norm=True, norm_gain=1.0, norm_shift=0.0, dropout_keep_prob=1.0, dropout_prob_seed=None): """Initializes the basic LSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). input_size: Deprecated and unused. activation: Activation function of the inner states. layer_norm: If `True`, layer normalization will be applied. norm_gain: float, The layer normalization gain initial value. If `layer_norm` has been set to `False`, this argument will be ignored. norm_shift: float, The layer normalization shift initial value. If `layer_norm` has been set to `False`, this argument will be ignored. dropout_keep_prob: unit Tensor or float between 0 and 1 representing the recurrent dropout probability value. If float and 1.0, no dropout will be applied. dropout_prob_seed: (optional) integer, the randomness seed. """ if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._activation = activation self._forget_bias = forget_bias self._keep_prob = dropout_keep_prob self._seed = dropout_prob_seed self._layer_norm = layer_norm self._g = norm_gain self._b = norm_shift
Example #11
Source File: rnn_cell.py From keras-lambda with MIT License | 5 votes |
def _norm(self, inp, scope): shape = inp.get_shape()[-1:] gamma_init = init_ops.constant_initializer(self._g) beta_init = init_ops.constant_initializer(self._b) with vs.variable_scope(scope): # Initialize beta and gamma for use by layer_norm. vs.get_variable("gamma", shape=shape, initializer=gamma_init) vs.get_variable("beta", shape=shape, initializer=beta_init) normalized = layers.layer_norm(inp, reuse=True, scope=scope) return normalized