Python tensorflow.python.ops.rnn_cell._linear() Examples
The following are 9
code examples of tensorflow.python.ops.rnn_cell._linear().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.rnn_cell
, or try the search function
.
Example #1
Source File: rnn_cell_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testLinear(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(1.0)): x = tf.zeros([1, 2]) l = linear([x], 2, False) sess.run([tf.global_variables_initializer()]) res = sess.run([l], {x.name: np.array([[1., 2.]])}) self.assertAllClose(res[0], [[3.0, 3.0]]) # Checks prevent you from accidentally creating a shared function. with self.assertRaises(ValueError): l1 = linear([x], 2, False) # But you can create a new one in a new scope and share the variables. with tf.variable_scope("l1") as new_scope: l1 = linear([x], 2, False) with tf.variable_scope(new_scope, reuse=True): linear([l1], 2, False) self.assertEqual(len(tf.trainable_variables()), 2)
Example #2
Source File: rnn_cell_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def basic_rnn_cell(inputs, state, num_units, scope=None): if state is None: if inputs is not None: batch_size = inputs.get_shape()[0] dtype = inputs.dtype else: batch_size = 0 dtype = tf.float32 init_output = tf.zeros(tf.stack([batch_size, num_units]), dtype=dtype) init_state = tf.zeros(tf.stack([batch_size, num_units]), dtype=dtype) init_output.set_shape([batch_size, num_units]) init_state.set_shape([batch_size, num_units]) return init_output, init_state else: with tf.variable_scope(scope, "BasicRNNCell", [inputs, state]): output = tf.tanh(linear([inputs, state], num_units, True)) return output, output
Example #3
Source File: rnn_cell.py From deep_image_model with Apache License 2.0 | 6 votes |
def _attention(self, query, attn_states): conv2d = nn_ops.conv2d reduce_sum = math_ops.reduce_sum softmax = nn_ops.softmax tanh = math_ops.tanh with vs.variable_scope("Attention"): k = vs.get_variable("AttnW", [1, 1, self._attn_size, self._attn_vec_size]) v = vs.get_variable("AttnV", [self._attn_vec_size]) hidden = array_ops.reshape(attn_states, [-1, self._attn_length, 1, self._attn_size]) hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME") y = _linear(query, self._attn_vec_size, True) y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size]) s = reduce_sum(v * tanh(hidden_features + y), [2, 3]) a = softmax(s) d = reduce_sum( array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2]) new_attns = array_ops.reshape(d, [-1, self._attn_size]) new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1]) return new_attns, new_attn_states
Example #4
Source File: nn.py From convai-bot-1337 with GNU General Public License v3.0 | 6 votes |
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0, is_train=None): if args is None or (nest.is_sequence(args) and not args): raise ValueError("`args` must be specified") if not nest.is_sequence(args): args = [args] flat_args = [flatten(arg, 1) for arg in args] if input_keep_prob < 1.0: assert is_train is not None flat_args = [tf.cond(is_train, lambda: tf.nn.dropout(arg, input_keep_prob), lambda: arg) for arg in flat_args] flat_out = _linear(flat_args, output_size, bias, bias_start=bias_start, scope=scope) out = reconstruct(flat_out, args[0], 1) if squeeze: out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1]) if wd: add_wd(wd) return out
Example #5
Source File: nn.py From adversarial-squad with MIT License | 6 votes |
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0, is_train=None): if args is None or (nest.is_sequence(args) and not args): raise ValueError("`args` must be specified") if not nest.is_sequence(args): args = [args] flat_args = [flatten(arg, 1) for arg in args] if input_keep_prob < 1.0: assert is_train is not None flat_args = [tf.cond(is_train, lambda: tf.nn.dropout(arg, input_keep_prob), lambda: arg) for arg in flat_args] flat_out = _linear(flat_args, output_size, bias, bias_start=bias_start, scope=scope) out = reconstruct(flat_out, args[0], 1) if squeeze: out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1]) if wd: add_wd(wd) return out
Example #6
Source File: nn.py From bi-att-flow with Apache License 2.0 | 6 votes |
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0, is_train=None): if args is None or (nest.is_sequence(args) and not args): raise ValueError("`args` must be specified") if not nest.is_sequence(args): args = [args] flat_args = [flatten(arg, 1) for arg in args] if input_keep_prob < 1.0: assert is_train is not None flat_args = [tf.cond(is_train, lambda: tf.nn.dropout(arg, input_keep_prob), lambda: arg) for arg in flat_args] flat_out = _linear(flat_args, output_size, bias, bias_start=bias_start, scope=scope) out = reconstruct(flat_out, args[0], 1) if squeeze: out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1]) if wd: add_wd(wd) return out
Example #7
Source File: nvdm.py From variational-text-tensorflow with MIT License | 6 votes |
def build_encoder(self): """Inference Network. q(h|X)""" with tf.variable_scope("encoder"): self.l1_lin = linear(tf.expand_dims(self.x, 0), self.embed_dim, bias=True, scope="l1") self.l1 = tf.nn.relu(self.l1_lin) self.l2_lin = linear(self.l1, self.embed_dim, bias=True, scope="l2") self.l2 = tf.nn.relu(self.l2_lin) self.mu = linear(self.l2, self.h_dim, bias=True, scope="mu") self.log_sigma_sq = linear(self.l2, self.h_dim, bias=True, scope="log_sigma_sq") self.eps = tf.random_normal((1, self.h_dim), 0, 1, dtype=tf.float32) self.sigma = tf.sqrt(tf.exp(self.log_sigma_sq)) self.h = tf.add(self.mu, tf.mul(self.sigma, self.eps)) _ = tf.histogram_summary("mu", self.mu) _ = tf.histogram_summary("sigma", self.sigma) _ = tf.histogram_summary("h", self.h) _ = tf.histogram_summary("mu + sigma", self.mu + self.sigma)
Example #8
Source File: rnn_cell.py From deep_image_model with Apache License 2.0 | 5 votes |
def __call__(self, inputs, state, scope=None): """Long short-term memory cell with attention (LSTMA).""" with vs.variable_scope(scope or type(self).__name__): if self._state_is_tuple: state, attns, attn_states = state else: states = state state = array_ops.slice(states, [0, 0], [-1, self._cell.state_size]) attns = array_ops.slice( states, [0, self._cell.state_size], [-1, self._attn_size]) attn_states = array_ops.slice( states, [0, self._cell.state_size + self._attn_size], [-1, self._attn_size * self._attn_length]) attn_states = array_ops.reshape(attn_states, [-1, self._attn_length, self._attn_size]) input_size = self._input_size if input_size is None: input_size = inputs.get_shape().as_list()[1] inputs = _linear([inputs, attns], input_size, True) lstm_output, new_state = self._cell(inputs, state) if self._state_is_tuple: new_state_cat = array_ops.concat(1, nest.flatten(new_state)) else: new_state_cat = new_state new_attns, new_attn_states = self._attention(new_state_cat, attn_states) with vs.variable_scope("AttnOutputProjection"): output = _linear([lstm_output, new_attns], self._attn_size, True) new_attn_states = array_ops.concat(1, [new_attn_states, array_ops.expand_dims(output, 1)]) new_attn_states = array_ops.reshape( new_attn_states, [-1, self._attn_length * self._attn_size]) new_state = (new_state, new_attns, new_attn_states) if not self._state_is_tuple: new_state = array_ops.concat(1, list(new_state)) return output, new_state
Example #9
Source File: rnn_cell.py From deep_image_model with Apache License 2.0 | 5 votes |
def _linear(self, args, scope="linear"): out_size = 4 * self._num_units proj_size = args.get_shape()[-1] with vs.variable_scope(scope) as scope: weights = vs.get_variable("weights", [proj_size, out_size]) out = math_ops.matmul(args, weights) if not self._layer_norm: bias = vs.get_variable("b", [out_size]) out += bias return out