Python tensorflow.python.ops.nn_ops.dropout() Examples
The following are 30
code examples of tensorflow.python.ops.nn_ops.dropout().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.nn_ops
, or try the search function
.
Example #1
Source File: rnn_cell.py From lambda-packs with MIT License | 6 votes |
def call(self, inputs, state): """LSTM cell with layer normalization and recurrent dropout.""" c, h = state args = array_ops.concat([inputs, h], 1) concat = self._linear(args) i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1) if self._layer_norm: i = self._norm(i, "input") j = self._norm(j, "transform") f = self._norm(f, "forget") o = self._norm(o, "output") g = self._activation(j) if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1: g = nn_ops.dropout(g, self._keep_prob, seed=self._seed) new_c = (c * math_ops.sigmoid(f + self._forget_bias) + math_ops.sigmoid(i) * g) if self._layer_norm: new_c = self._norm(new_c, "state") new_h = self._activation(new_c) * math_ops.sigmoid(o) new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h) return new_h, new_state
Example #2
Source File: rnn_cell_impl.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def __call__(self, inputs, state, scope=None): """Run the cell with the declared dropouts.""" def _should_dropout(p): return (not isinstance(p, float)) or p < 1 if _should_dropout(self._input_keep_prob): inputs = self._dropout(inputs, "input", self._recurrent_input_noise, self._input_keep_prob) output, new_state = self._cell(inputs, state, scope) if _should_dropout(self._state_keep_prob): # Identify which subsets of the state to perform dropout on and # which ones to keep. shallow_filtered_substructure = nest.get_traverse_shallow_structure( self._dropout_state_filter, new_state) new_state = self._dropout(new_state, "state", self._recurrent_state_noise, self._state_keep_prob, shallow_filtered_substructure) if _should_dropout(self._output_keep_prob): output = self._dropout(output, "output", self._recurrent_output_noise, self._output_keep_prob) return output, new_state
Example #3
Source File: rnn_cell.py From qrn with MIT License | 6 votes |
def pre(self, inputs, scope=None): """Preprocess inputs to be used by the cell. Assumes [N, J, *] [x, u]""" is_train = self._is_train keep_prob = self._keep_prob gate_size = self._gate_size with tf.variable_scope(scope or "pre"): x, u, _, _ = tf.split(2, 4, tf.slice(inputs, [0, 0, gate_size], [-1, -1, -1])) # [N, J, d] a_raw = linear([x * u], gate_size, True, scope='a_raw', var_on_cpu=self._var_on_cpu, wd=self._wd, initializer=self._initializer) a = tf.sigmoid(a_raw - self._forget_bias, name='a') if keep_prob < 1.0: x = tf.cond(is_train, lambda: tf.nn.dropout(x, keep_prob), lambda: x) u = tf.cond(is_train, lambda: tf.nn.dropout(u, keep_prob), lambda: u) v_t = tf.nn.tanh(linear([x, u], self._num_units, True, var_on_cpu=self._var_on_cpu, wd=self._wd, scope='v_raw'), name='v') new_inputs = tf.concat(2, [a, x, u, v_t]) # [N, J, 3*d + 1] return new_inputs
Example #4
Source File: rnn_dropout.py From GtS with MIT License | 6 votes |
def __call__(self, inputs, state, scope=None): """Run the cell with the declared dropouts.""" def _should_dropout(p): return (not isinstance(p, float)) or p < 1 if _should_dropout(self._input_keep_prob): inputs = self._dropout(inputs, "input", self._recurrent_input_noise, self._input_keep_prob) output, new_state = self._cell(inputs, state, scope=scope) if _should_dropout(self._state_keep_prob): # Identify which subsets of the state to perform dropout on and # which ones to keep. #shallow_filtered_substructure = nest.get_traverse_shallow_structure( # self._dropout_state_filter, new_state) shallow_filtered_substructure = self._dropout_state_filter(new_state) # TODO: GK hack new_state = self._dropout(new_state, "state", self._recurrent_state_noise, self._state_keep_prob, shallow_filtered_substructure) if _should_dropout(self._output_keep_prob): output = self._dropout(output, "output", self._recurrent_output_noise, self._output_keep_prob) return output, new_state
Example #5
Source File: zoneout.py From OpenSeq2Seq with Apache License 2.0 | 5 votes |
def __call__(self, inputs, state, scope=None): if isinstance(self.state_size, tuple) != isinstance(self._zoneout_prob, tuple): raise TypeError("Subdivided states need subdivided zoneouts.") if isinstance(self.state_size, tuple) and len(tuple(self.state_size) ) != len(tuple(self._zoneout_prob)): raise ValueError("State and zoneout need equally many parts.") output, new_state = self._cell(inputs, state, scope) if isinstance(self.state_size, tuple): if self._is_training: new_state = tuple( (1 - state_part_zoneout_prob) * dropout( new_state_part - state_part, (1 - state_part_zoneout_prob), seed=self._seed ) + state_part for new_state_part, state_part, state_part_zoneout_prob in zip(new_state, state, self._zoneout_prob) ) else: new_state = tuple( state_part_zoneout_prob * state_part + (1 - state_part_zoneout_prob) * new_state_part for new_state_part, state_part, state_part_zoneout_prob in zip(new_state, state, self._zoneout_prob) ) new_state = rnn_cell_impl.LSTMStateTuple(new_state[0], new_state[1]) else: raise ValueError("Only states that are tuples are supported") return output, new_state
Example #6
Source File: rnn_cell.py From deep_image_model with Apache License 2.0 | 5 votes |
def __call__(self, inputs, state, scope=None): """LSTM cell with layer normalization and recurrent dropout.""" with vs.variable_scope(scope or type(self).__name__) as scope: # LayerNormBasicLSTMCell # pylint: disable=unused-variables c, h = state args = array_ops.concat(1, [inputs, h]) concat = self._linear(args) i, j, f, o = array_ops.split(1, 4, concat) if self._layer_norm: i = self._norm(i, "input") j = self._norm(j, "transform") f = self._norm(f, "forget") o = self._norm(o, "output") g = self._activation(j) if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1: g = nn_ops.dropout(g, self._keep_prob, seed=self._seed) new_c = (c * math_ops.sigmoid(f + self._forget_bias) + math_ops.sigmoid(i) * g) if self._layer_norm: new_c = self._norm(new_c, "state") new_h = self._activation(new_c) * math_ops.sigmoid(o) new_state = rnn_cell.LSTMStateTuple(new_c, new_h) return new_h, new_state
Example #7
Source File: rnn_cell_extended.py From rgn with MIT License | 5 votes |
def __call__(self, inputs, state, scope=None): """Run the cell with the declared zoneouts.""" # compute output and new state as before output, new_state = self._cell(inputs, state, scope) # if either hidden state or memory cell zoneout is applied, then split state and process if self._has_hidden_state_zoneout or self._has_memory_cell_zoneout: # split state c_old, m_old = state c_new, m_new = new_state # apply zoneout to memory cell and hidden state c_and_m = [] for s_old, s_new, p, has_zoneout in [(c_old, c_new, self._memory_cell_keep_prob, self._has_memory_cell_zoneout), (m_old, m_new, self._hidden_state_keep_prob, self._has_hidden_state_zoneout)]: if has_zoneout: if self._is_training: mask = nn_ops.dropout(array_ops.ones_like(s_new), p, seed=self._seed) * p # this should just random ops instead. See dropout code for how. s = ((1. - mask) * s_old) + (mask * s_new) else: s = ((1. - p) * s_old) + (p * s_new) else: s = s_new c_and_m.append(s) # package final results new_state = LSTMStateTuple(*c_and_m) output = new_state.h return output, new_state
Example #8
Source File: rnn_cell.py From ecm with Apache License 2.0 | 5 votes |
def __call__(self, inputs, state, scope=None): """Run the cell with the declared dropouts.""" if (not isinstance(self._input_keep_prob, float) or self._input_keep_prob < 1): inputs = nn_ops.dropout(inputs, self._input_keep_prob, seed=self._seed) output, new_state = self._cell(inputs, state, scope) if (not isinstance(self._output_keep_prob, float) or self._output_keep_prob < 1): output = nn_ops.dropout(output, self._output_keep_prob, seed=self._seed) return output, new_state
Example #9
Source File: mod_core_rnn_cell_impl.py From RGAN with MIT License | 5 votes |
def _variational_recurrent_dropout_value( self, index, value, noise, keep_prob): """Performs dropout given the pre-calculated noise tensor.""" # uniform [keep_prob, 1.0 + keep_prob) random_tensor = keep_prob + noise # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) binary_tensor = math_ops.floor(random_tensor) ret = math_ops.div(value, keep_prob) * binary_tensor ret.set_shape(value.get_shape()) return ret
Example #10
Source File: mod_core_rnn_cell_impl.py From RGAN with MIT License | 5 votes |
def _dropout(self, values, salt_prefix, recurrent_noise, keep_prob): """Decides whether to perform standard dropout or recurrent dropout.""" if not self._variational_recurrent: def dropout(i, v): return nn_ops.dropout( v, keep_prob=keep_prob, seed=self._gen_seed(salt_prefix, i)) return _enumerated_map_structure(dropout, values) else: def dropout(i, v, n): return self._variational_recurrent_dropout_value(i, v, n, keep_prob) return _enumerated_map_structure(dropout, values, recurrent_noise)
Example #11
Source File: rnn_ops.py From video_prediction with MIT License | 5 votes |
def call(self, inputs, state): """2D Convolutional LSTM cell with (optional) normalization and recurrent dropout.""" c, h = state tile_concat = isinstance(inputs, (list, tuple)) if tile_concat: inputs, inputs_non_spatial = inputs args = array_ops.concat([inputs, h], -1) concat = self._conv2d(args) if tile_concat: concat = concat + self._dense(inputs_non_spatial)[:, None, None, :] if self._normalizer_fn and not self._separate_norms: concat = self._norm(concat, "input_transform_forget_output") i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=-1) if self._normalizer_fn and self._separate_norms: i = self._norm(i, "input") j = self._norm(j, "transform") f = self._norm(f, "forget") o = self._norm(o, "output") g = self._activation_fn(j) if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1: g = nn_ops.dropout(g, self._keep_prob, seed=self._seed) new_c = (c * math_ops.sigmoid(f + self._forget_bias) + math_ops.sigmoid(i) * g) if self._normalizer_fn: new_c = self._norm(new_c, "state") new_h = self._activation_fn(new_c) * math_ops.sigmoid(o) if self._skip_connection: new_h = array_ops.concat([new_h, inputs], axis=-1) new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h) return new_h, new_state
Example #12
Source File: rnn_dropout.py From GtS with MIT License | 5 votes |
def _default_dropout_state_filter_visitor(substate): if isinstance(substate, LSTMStateTuple): # Do not perform dropout on the memory state. return LSTMStateTuple(c=False, h=True) elif isinstance(substate, tensor_array_ops.TensorArray): return False return True
Example #13
Source File: rnn_dropout.py From GtS with MIT License | 5 votes |
def _variational_recurrent_dropout_value( self, index, value, noise, keep_prob): """Performs dropout given the pre-calculated noise tensor.""" # uniform [keep_prob, 1.0 + keep_prob) random_tensor = keep_prob + noise # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) binary_tensor = math_ops.floor(random_tensor) ret = math_ops.div(value, keep_prob) * binary_tensor ret.set_shape(value.get_shape()) return ret
Example #14
Source File: rnn_dropout.py From GtS with MIT License | 5 votes |
def _dropout(self, values, salt_prefix, recurrent_noise, keep_prob, shallow_filtered_substructure=None): """Decides whether to perform standard dropout or recurrent dropout.""" if shallow_filtered_substructure is None: # Put something so we traverse the entire structure; inside the # dropout function we check to see if leafs of this are bool or not. shallow_filtered_substructure = values if not self._variational_recurrent: def dropout(i, do_dropout, v): if not isinstance(do_dropout, bool) or do_dropout: return nn_ops.dropout( v, keep_prob=keep_prob, seed=self._gen_seed(salt_prefix, i)) else: return v return _enumerated_map_structure_up_to( shallow_filtered_substructure, dropout, *[shallow_filtered_substructure, values]) else: def dropout(i, do_dropout, v, n): if not isinstance(do_dropout, bool) or do_dropout: return self._variational_recurrent_dropout_value(i, v, n, keep_prob) else: return v return _enumerated_map_structure_up_to( shallow_filtered_substructure, dropout, *[shallow_filtered_substructure, values, recurrent_noise])
Example #15
Source File: rnn_cell.py From ecm with Apache License 2.0 | 5 votes |
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0, seed=None): """Create a cell with added input and/or output dropout. Dropout is never used on the state. Args: cell: an RNNCell, a projection to output_size is added to it. input_keep_prob: unit Tensor or float between 0 and 1, input keep probability; if it is float and 1, no input dropout will be added. output_keep_prob: unit Tensor or float between 0 and 1, output keep probability; if it is float and 1, no output dropout will be added. seed: (optional) integer, the randomness seed. Raises: TypeError: if cell is not an RNNCell. ValueError: if keep_prob is not between 0 and 1. """ if not isinstance(cell, RNNCell): raise TypeError("The parameter cell is not a RNNCell.") if (isinstance(input_keep_prob, float) and not (input_keep_prob >= 0.0 and input_keep_prob <= 1.0)): raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d" % input_keep_prob) if (isinstance(output_keep_prob, float) and not (output_keep_prob >= 0.0 and output_keep_prob <= 1.0)): raise ValueError("Parameter output_keep_prob must be between 0 and 1: %d" % output_keep_prob) self._cell = cell self._input_keep_prob = input_keep_prob self._output_keep_prob = output_keep_prob self._seed = seed
Example #16
Source File: rnn_cell_impl.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _default_dropout_state_filter_visitor(substate): if isinstance(substate, LSTMStateTuple): # Do not perform dropout on the memory state. return LSTMStateTuple(c=False, h=True) elif isinstance(substate, tensor_array_ops.TensorArray): return False return True
Example #17
Source File: rnn_cell_impl.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _variational_recurrent_dropout_value( self, index, value, noise, keep_prob): """Performs dropout given the pre-calculated noise tensor.""" # uniform [keep_prob, 1.0 + keep_prob) random_tensor = keep_prob + noise # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) binary_tensor = math_ops.floor(random_tensor) ret = math_ops.div(value, keep_prob) * binary_tensor ret.set_shape(value.get_shape()) return ret
Example #18
Source File: rnn_cell_impl.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _dropout(self, values, salt_prefix, recurrent_noise, keep_prob, shallow_filtered_substructure=None): """Decides whether to perform standard dropout or recurrent dropout.""" if shallow_filtered_substructure is None: # Put something so we traverse the entire structure; inside the # dropout function we check to see if leafs of this are bool or not. shallow_filtered_substructure = values if not self._variational_recurrent: def dropout(i, do_dropout, v): if not isinstance(do_dropout, bool) or do_dropout: return nn_ops.dropout( v, keep_prob=keep_prob, seed=self._gen_seed(salt_prefix, i)) else: return v return _enumerated_map_structure_up_to( shallow_filtered_substructure, dropout, *[shallow_filtered_substructure, values]) else: def dropout(i, do_dropout, v, n): if not isinstance(do_dropout, bool) or do_dropout: return self._variational_recurrent_dropout_value(i, v, n, keep_prob) else: return v return _enumerated_map_structure_up_to( shallow_filtered_substructure, dropout, *[shallow_filtered_substructure, values, recurrent_noise])
Example #19
Source File: rnn_cell.py From keras-lambda with MIT License | 5 votes |
def __init__(self, num_units, forget_bias=1.0, input_size=None, activation=math_ops.tanh, layer_norm=True, norm_gain=1.0, norm_shift=0.0, dropout_keep_prob=1.0, dropout_prob_seed=None): """Initializes the basic LSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). input_size: Deprecated and unused. activation: Activation function of the inner states. layer_norm: If `True`, layer normalization will be applied. norm_gain: float, The layer normalization gain initial value. If `layer_norm` has been set to `False`, this argument will be ignored. norm_shift: float, The layer normalization shift initial value. If `layer_norm` has been set to `False`, this argument will be ignored. dropout_keep_prob: unit Tensor or float between 0 and 1 representing the recurrent dropout probability value. If float and 1.0, no dropout will be applied. dropout_prob_seed: (optional) integer, the randomness seed. """ if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._activation = activation self._forget_bias = forget_bias self._keep_prob = dropout_keep_prob self._seed = dropout_prob_seed self._layer_norm = layer_norm self._g = norm_gain self._b = norm_shift
Example #20
Source File: rnn_cell.py From keras-lambda with MIT License | 5 votes |
def __call__(self, inputs, state, scope=None): """LSTM cell with layer normalization and recurrent dropout.""" with vs.variable_scope(scope or "layer_norm_basic_lstm_cell"): c, h = state args = array_ops.concat([inputs, h], 1) concat = self._linear(args) i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1) if self._layer_norm: i = self._norm(i, "input") j = self._norm(j, "transform") f = self._norm(f, "forget") o = self._norm(o, "output") g = self._activation(j) if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1: g = nn_ops.dropout(g, self._keep_prob, seed=self._seed) new_c = (c * math_ops.sigmoid(f + self._forget_bias) + math_ops.sigmoid(i) * g) if self._layer_norm: new_c = self._norm(new_c, "state") new_h = self._activation(new_c) * math_ops.sigmoid(o) new_state = core_rnn_cell.LSTMStateTuple(new_c, new_h) return new_h, new_state
Example #21
Source File: core_rnn_cell_impl.py From keras-lambda with MIT License | 5 votes |
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0, seed=None): """Create a cell with added input and/or output dropout. Dropout is never used on the state. Args: cell: an RNNCell, a projection to output_size is added to it. input_keep_prob: unit Tensor or float between 0 and 1, input keep probability; if it is float and 1, no input dropout will be added. output_keep_prob: unit Tensor or float between 0 and 1, output keep probability; if it is float and 1, no output dropout will be added. seed: (optional) integer, the randomness seed. Raises: TypeError: if cell is not an RNNCell. ValueError: if keep_prob is not between 0 and 1. """ if not isinstance(cell, RNNCell): raise TypeError("The parameter cell is not a RNNCell.") if (isinstance(input_keep_prob, float) and not (input_keep_prob >= 0.0 and input_keep_prob <= 1.0)): raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d" % input_keep_prob) if (isinstance(output_keep_prob, float) and not (output_keep_prob >= 0.0 and output_keep_prob <= 1.0)): raise ValueError("Parameter output_keep_prob must be between 0 and 1: %d" % output_keep_prob) self._cell = cell self._input_keep_prob = input_keep_prob self._output_keep_prob = output_keep_prob self._seed = seed
Example #22
Source File: core_rnn_cell_impl.py From keras-lambda with MIT License | 5 votes |
def __call__(self, inputs, state, scope=None): """Run the cell with the declared dropouts.""" if (not isinstance(self._input_keep_prob, float) or self._input_keep_prob < 1): inputs = nn_ops.dropout(inputs, self._input_keep_prob, seed=self._seed) output, new_state = self._cell(inputs, state, scope) if (not isinstance(self._output_keep_prob, float) or self._output_keep_prob < 1): output = nn_ops.dropout(output, self._output_keep_prob, seed=self._seed) return output, new_state
Example #23
Source File: rnn_cell.py From deep_image_model with Apache License 2.0 | 5 votes |
def __call__(self, inputs, state, scope=None): """Run the cell with the declared dropouts.""" if (not isinstance(self._input_keep_prob, float) or self._input_keep_prob < 1): inputs = nn_ops.dropout(inputs, self._input_keep_prob, seed=self._seed) output, new_state = self._cell(inputs, state, scope) if (not isinstance(self._output_keep_prob, float) or self._output_keep_prob < 1): output = nn_ops.dropout(output, self._output_keep_prob, seed=self._seed) return output, new_state
Example #24
Source File: rnn_cell_impl.py From lambda-packs with MIT License | 5 votes |
def _variational_recurrent_dropout_value( self, index, value, noise, keep_prob): """Performs dropout given the pre-calculated noise tensor.""" # uniform [keep_prob, 1.0 + keep_prob) random_tensor = keep_prob + noise # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) binary_tensor = math_ops.floor(random_tensor) ret = math_ops.div(value, keep_prob) * binary_tensor ret.set_shape(value.get_shape()) return ret
Example #25
Source File: rnn_cell_impl.py From lambda-packs with MIT License | 5 votes |
def _dropout(self, values, salt_prefix, recurrent_noise, keep_prob): """Decides whether to perform standard dropout or recurrent dropout.""" if not self._variational_recurrent: def dropout(i, v): return nn_ops.dropout( v, keep_prob=keep_prob, seed=self._gen_seed(salt_prefix, i)) return _enumerated_map_structure(dropout, values) else: def dropout(i, v, n): return self._variational_recurrent_dropout_value(i, v, n, keep_prob) return _enumerated_map_structure(dropout, values, recurrent_noise)
Example #26
Source File: rnn_cell.py From lambda-packs with MIT License | 5 votes |
def __init__(self, num_units, forget_bias=1.0, input_size=None, activation=math_ops.tanh, layer_norm=True, norm_gain=1.0, norm_shift=0.0, dropout_keep_prob=1.0, dropout_prob_seed=None, reuse=None): """Initializes the basic LSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). input_size: Deprecated and unused. activation: Activation function of the inner states. layer_norm: If `True`, layer normalization will be applied. norm_gain: float, The layer normalization gain initial value. If `layer_norm` has been set to `False`, this argument will be ignored. norm_shift: float, The layer normalization shift initial value. If `layer_norm` has been set to `False`, this argument will be ignored. dropout_keep_prob: unit Tensor or float between 0 and 1 representing the recurrent dropout probability value. If float and 1.0, no dropout will be applied. dropout_prob_seed: (optional) integer, the randomness seed. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. """ super(LayerNormBasicLSTMCell, self).__init__(_reuse=reuse) if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._activation = activation self._forget_bias = forget_bias self._keep_prob = dropout_keep_prob self._seed = dropout_prob_seed self._layer_norm = layer_norm self._g = norm_gain self._b = norm_shift self._reuse = reuse
Example #27
Source File: rnn_cell.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def __init__(self, num_units, forget_bias=1.0, input_size=None, activation=math_ops.tanh, layer_norm=True, norm_gain=1.0, norm_shift=0.0, dropout_keep_prob=1.0, dropout_prob_seed=None): """Initializes the basic LSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). input_size: Deprecated and unused. activation: Activation function of the inner states. layer_norm: If `True`, layer normalization will be applied. norm_gain: float, The layer normalization gain initial value. If `layer_norm` has been set to `False`, this argument will be ignored. norm_shift: float, The layer normalization shift initial value. If `layer_norm` has been set to `False`, this argument will be ignored. dropout_keep_prob: unit Tensor or float between 0 and 1 representing the recurrent dropout probability value. If float and 1.0, no dropout will be applied. dropout_prob_seed: (optional) integer, the randomness seed. """ if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._activation = activation self._forget_bias = forget_bias self._keep_prob = dropout_keep_prob self._seed = dropout_prob_seed self._layer_norm = layer_norm self._g = norm_gain self._b = norm_shift
Example #28
Source File: rnn_cell.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def __call__(self, inputs, state, scope=None): """LSTM cell with layer normalization and recurrent dropout.""" with vs.variable_scope(scope or "layer_norm_basic_lstm_cell"): c, h = state args = array_ops.concat([inputs, h], 1) concat = self._linear(args) i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1) if self._layer_norm: i = self._norm(i, "input") j = self._norm(j, "transform") f = self._norm(f, "forget") o = self._norm(o, "output") g = self._activation(j) if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1: g = nn_ops.dropout(g, self._keep_prob, seed=self._seed) new_c = (c * math_ops.sigmoid(f + self._forget_bias) + math_ops.sigmoid(i) * g) if self._layer_norm: new_c = self._norm(new_c, "state") new_h = self._activation(new_c) * math_ops.sigmoid(o) new_state = core_rnn_cell.LSTMStateTuple(new_c, new_h) return new_h, new_state
Example #29
Source File: core_rnn_cell_impl.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0, seed=None): """Create a cell with added input and/or output dropout. Dropout is never used on the state. Args: cell: an RNNCell, a projection to output_size is added to it. input_keep_prob: unit Tensor or float between 0 and 1, input keep probability; if it is float and 1, no input dropout will be added. output_keep_prob: unit Tensor or float between 0 and 1, output keep probability; if it is float and 1, no output dropout will be added. seed: (optional) integer, the randomness seed. Raises: TypeError: if cell is not an RNNCell. ValueError: if keep_prob is not between 0 and 1. """ if not isinstance(cell, RNNCell): raise TypeError("The parameter cell is not a RNNCell.") if (isinstance(input_keep_prob, float) and not (input_keep_prob >= 0.0 and input_keep_prob <= 1.0)): raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d" % input_keep_prob) if (isinstance(output_keep_prob, float) and not (output_keep_prob >= 0.0 and output_keep_prob <= 1.0)): raise ValueError("Parameter output_keep_prob must be between 0 and 1: %d" % output_keep_prob) self._cell = cell self._input_keep_prob = input_keep_prob self._output_keep_prob = output_keep_prob self._seed = seed
Example #30
Source File: core_rnn_cell_impl.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def __call__(self, inputs, state, scope=None): """Run the cell with the declared dropouts.""" if (not isinstance(self._input_keep_prob, float) or self._input_keep_prob < 1): inputs = nn_ops.dropout(inputs, self._input_keep_prob, seed=self._seed) output, new_state = self._cell(inputs, state, scope) if (not isinstance(self._output_keep_prob, float) or self._output_keep_prob < 1): output = nn_ops.dropout(output, self._output_keep_prob, seed=self._seed) return output, new_state