Python tensorflow.python.ops.rnn_cell.BasicLSTMCell() Examples

The following are 15 code examples of tensorflow.python.ops.rnn_cell.BasicLSTMCell(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.rnn_cell , or try the search function .
Example #1
Source File: test_tf_qrnn_work.py    From tensorflow_end2end_speech_recognition with MIT License 6 votes vote down vote up
def baseline_forward(self, X, size, n_class):
        shape = X.get_shape()
        # batch_size x sentence_length x word_length -> batch_size x sentence_length x word_length
        _X = tf.transpose(X, [1, 0, 2])
        _X = tf.reshape(_X, [-1, int(shape[2])])  # (batch_size x sentence_length) x word_length
        seq = tf.split(0, int(shape[1]), _X)  # sentence_length x (batch_size x word_length)

        with tf.name_scope("LSTM"):
            lstm_cell = rnn_cell.BasicLSTMCell(size, forget_bias=1.0)
            outputs, states = rnn.rnn(lstm_cell, seq, dtype=tf.float32)

        with tf.name_scope("LSTM-Classifier"):
            W = tf.Variable(tf.random_normal([size, n_class]), name="W")
            b = tf.Variable(tf.random_normal([n_class]), name="b")
            output = tf.matmul(outputs[-1], W) + b

        return output 
Example #2
Source File: lw.py    From iwcs2017-answer-selection with Apache License 2.0 6 votes vote down vote up
def initialize_weights(self):
        cell_size = self.lw_cell_size
        self.dense_weighting_Q = weight_variable('dense_weighting_Q', [cell_size + cell_size, 1])
        self.dense_weighting_A = weight_variable('dense_weighting_A', [cell_size + cell_size, 1])

        with tf.variable_scope('lstm_cell_weighting_Q_fw'):
            self.lstm_cell_weighting_Q_fw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)

        with tf.variable_scope('lstm_cell_weighting_Q_bw'):
            self.lstm_cell_weighting_Q_bw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)

        with tf.variable_scope('lstm_cell_weighting_A_fw'):
            self.lstm_cell_weighting_A_fw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)

        with tf.variable_scope('lstm_cell_weighting_A_bw'):
            self.lstm_cell_weighting_A_bw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True) 
Example #3
Source File: lstm_weighting.py    From acl2017-non-factoid-qa with Apache License 2.0 6 votes vote down vote up
def initialize_weights(self):
        cell_size = self.lstm_pooling_cell_size
        self.mul_Q = weight_variable('mul_Q', [cell_size * 2, cell_size * 2])
        self.reduction_Q = weight_variable('reduction_Q', [cell_size * 2, 1])
        self.mul_A = weight_variable('mul_A', [cell_size * 2, cell_size * 2])
        self.reduction_A = weight_variable('reduction_A', [cell_size * 2, 1])

        with tf.variable_scope('lstm_cell_weighting_Q_fw'):
            self.lstm_cell_weighting_Q_fw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)

        with tf.variable_scope('lstm_cell_weighting_Q_bw'):
            self.lstm_cell_weighting_Q_bw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)

        with tf.variable_scope('lstm_cell_weighting_A_fw'):
            self.lstm_cell_weighting_A_fw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)

        with tf.variable_scope('lstm_cell_weighting_A_bw'):
            self.lstm_cell_weighting_A_bw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True) 
Example #4
Source File: lstm1d.py    From lambda-packs with MIT License 5 votes vote down vote up
def ndlstm_base_unrolled(inputs, noutput, scope=None, reverse=False):
  """Run an LSTM, either forward or backward.

  This is a 1D LSTM implementation using unrolling and the TensorFlow
  LSTM op.

  Args:
    inputs: input sequence (length, batch_size, ninput)
    noutput: depth of output
    scope: optional scope name
    reverse: run LSTM in reverse

  Returns:
    Output sequence (length, batch_size, noutput)

  """
  with variable_scope.variable_scope(scope, "SeqLstmUnrolled", [inputs]):
    length, batch_size, _ = _shape(inputs)
    lstm_cell = rnn_cell.BasicLSTMCell(noutput, state_is_tuple=False)
    state = array_ops.zeros([batch_size, lstm_cell.state_size])
    output_u = []
    inputs_u = array_ops.unstack(inputs)
    if reverse:
      inputs_u = list(reversed(inputs_u))
    for i in xrange(length):
      if i > 0:
        variable_scope.get_variable_scope().reuse_variables()
      output, state = lstm_cell(inputs_u[i], state)
      output_u += [output]
    if reverse:
      output_u = list(reversed(output_u))
    outputs = array_ops.stack(output_u)
    return outputs 
Example #5
Source File: lstm1d.py    From lambda-packs with MIT License 5 votes vote down vote up
def ndlstm_base_dynamic(inputs, noutput, scope=None, reverse=False):
  """Run an LSTM, either forward or backward.

  This is a 1D LSTM implementation using dynamic_rnn and
  the TensorFlow LSTM op.

  Args:
    inputs: input sequence (length, batch_size, ninput)
    noutput: depth of output
    scope: optional scope name
    reverse: run LSTM in reverse

  Returns:
    Output sequence (length, batch_size, noutput)
  """
  with variable_scope.variable_scope(scope, "SeqLstm", [inputs]):
    # TODO(tmb) make batch size, sequence_length dynamic
    # example: sequence_length = tf.shape(inputs)[0]
    _, batch_size, _ = _shape(inputs)
    lstm_cell = rnn_cell.BasicLSTMCell(noutput, state_is_tuple=False)
    state = array_ops.zeros([batch_size, lstm_cell.state_size])
    sequence_length = int(inputs.get_shape()[0])
    sequence_lengths = math_ops.to_int64(
        array_ops.fill([batch_size], sequence_length))
    if reverse:
      inputs = array_ops.reverse_v2(inputs, [0])
    outputs, _ = rnn.dynamic_rnn(
        lstm_cell, inputs, sequence_lengths, state, time_major=True)
    if reverse:
      outputs = array_ops.reverse_v2(outputs, [0])
    return outputs 
Example #6
Source File: lstm1d.py    From lambda-packs with MIT License 5 votes vote down vote up
def sequence_to_final(inputs, noutput, scope=None, name=None, reverse=False):
  """Run an LSTM across all steps and returns only the final state.

  Args:
    inputs: (length, batch_size, depth) tensor
    noutput: size of output vector
    scope: optional scope name
    name: optional name for output tensor
    reverse: run in reverse

  Returns:
    Batch of size (batch_size, noutput).
  """
  with variable_scope.variable_scope(scope, "SequenceToFinal", [inputs]):
    length, batch_size, _ = _shape(inputs)
    lstm = rnn_cell.BasicLSTMCell(noutput, state_is_tuple=False)
    state = array_ops.zeros([batch_size, lstm.state_size])
    inputs_u = array_ops.unstack(inputs)
    if reverse:
      inputs_u = list(reversed(inputs_u))
    for i in xrange(length):
      if i > 0:
        variable_scope.get_variable_scope().reuse_variables()
      output, state = lstm(inputs_u[i], state)
    outputs = array_ops.reshape(output, [batch_size, noutput], name=name)
    return outputs 
Example #7
Source File: lstm.py    From iwcs2017-answer-selection with Apache License 2.0 5 votes vote down vote up
def initialize_weights(self):
        """Global initialization of weights for the representation layer

        """
        with tf.variable_scope('lstm_cell_fw'):
            self.lstm_cell_forward = rnn_cell.BasicLSTMCell(self.lstm_cell_size, state_is_tuple=True)
        with tf.variable_scope('lstm_cell_bw'):
            self.lstm_cell_backward = rnn_cell.BasicLSTMCell(self.lstm_cell_size, state_is_tuple=True) 
Example #8
Source File: TimeSeriesPredictor.py    From Deep-Learning-with-TensorFlow-Second-Edition with MIT License 5 votes vote down vote up
def LSTM_Model():
        """
        :param x: inputs of size [T, batch_size, input_size]
        :param W: matrix of fully-connected output layer weights
        :param b: vector of fully-connected output layer biases
        """
        cell = rnn_cell.BasicLSTMCell(hidden_dim)
        outputs, states = rnn.dynamic_rnn(cell, x, dtype=tf.float32)
        num_examples = tf.shape(x)[0]
        W_repeated = tf.tile(tf.expand_dims(W_out, 0), [num_examples, 1, 1])
        out = tf.matmul(outputs, W_repeated) + b_out
        out = tf.squeeze(out)
        return out 
Example #9
Source File: grid_rnn_cell.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def __init__(self, num_units, forget_bias=1):
    super(Grid1BasicLSTMCell, self).__init__(
        num_units=num_units, num_dims=1,
        input_dims=0, output_dims=0, priority_dims=0, tied=False,
        cell_fn=lambda n, i: rnn_cell.BasicLSTMCell(
            num_units=n,
            forget_bias=forget_bias, input_size=i,
            state_is_tuple=False)) 
Example #10
Source File: grid_rnn_cell.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               num_units,
               tied=False,
               non_recurrent_fn=None,
               forget_bias=1):
    super(Grid2BasicLSTMCell, self).__init__(
        num_units=num_units, num_dims=2,
        input_dims=0, output_dims=0, priority_dims=0, tied=tied,
        non_recurrent_dims=None if non_recurrent_fn is None else 0,
        cell_fn=lambda n, i: rnn_cell.BasicLSTMCell(
            num_units=n, forget_bias=forget_bias, input_size=i,
            state_is_tuple=False),
        non_recurrent_fn=non_recurrent_fn) 
Example #11
Source File: predict.py    From image-classification-rnn with Apache License 2.0 5 votes vote down vote up
def rnn_model(x, weights, biases):
	"""Build a rnn model for image"""
	x = tf.transpose(x, [1, 0, 2])
	x = tf.reshape(x, [-1, n_input])
	x = tf.split(0, n_steps, x)

	lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
	outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
	return tf.matmul(outputs[-1], weights) + biases 
Example #12
Source File: train.py    From image-classification-rnn with Apache License 2.0 5 votes vote down vote up
def rnn_model(x, weights, biases):
	"""RNN (LSTM or GRU) model for image"""
	x = tf.transpose(x, [1, 0, 2])
	x = tf.reshape(x, [-1, n_input])
	x = tf.split(0, n_steps, x)

	lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
	outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
	return tf.matmul(outputs[-1], weights) + biases 
Example #13
Source File: bilstm.py    From acl2017-non-factoid-qa with Apache License 2.0 5 votes vote down vote up
def initialize_weights(self):
        """Global initialization of weights for the representation layer

        """
        with tf.variable_scope('lstm_cell_fw'):
            self.lstm_cell_forward = rnn_cell.BasicLSTMCell(self.lstm_cell_size, state_is_tuple=True)
        with tf.variable_scope('lstm_cell_bw'):
            self.lstm_cell_backward = rnn_cell.BasicLSTMCell(self.lstm_cell_size, state_is_tuple=True) 
Example #14
Source File: LSTM_model.py    From AssociativeRetrieval with Apache License 2.0 4 votes vote down vote up
def build_graph(self):
    config = self.config
    self.reader = utils.DataReader(seq_len=config.seq_length, batch_size=config.batch_size, data_filename=config.data_filename)

    self.cell = rnn_cell.BasicLSTMCell(config.rnn_size, state_is_tuple=True)

    self.input_data = tf.placeholder(tf.int32, [None, config.input_length])
    self.targets = tf.placeholder(tf.int32, [None, 1])
    self.initial_state = self.cell.zero_state(tf.shape(self.targets)[0], tf.float32)

    with tf.variable_scope("input_embedding"):
      embedding = tf.get_variable("embedding", [config.vocab_size, config.rnn_size])
      inputs = tf.split(1, config.input_length, tf.nn.embedding_lookup(embedding, self.input_data))
      inputs = [tf.squeeze(input, [1]) for input in inputs]

    with tf.variable_scope("send_to_rnn"):
      state = self.initial_state
      output = None

      for i, input in enumerate(inputs):
        if i > 0:
          tf.get_variable_scope().reuse_variables()
        output, state = self.cell(input, state)

    with tf.variable_scope("softmax"):
      softmax_w = tf.get_variable("softmax_w", [config.rnn_size, config.vocab_size])
      softmax_b = tf.get_variable("softmax_b", [config.vocab_size])
      self.logits = tf.matmul(output, softmax_w) + softmax_b
      self.probs = tf.nn.softmax(self.logits)
      self.output = tf.cast(tf.reshape(tf.arg_max(self.probs, 1), [-1, 1]), tf.int32)
      self.accuracy = tf.reduce_mean(tf.cast(tf.equal(self.output, self.targets), tf.float32))

    loss = seq2seq.sequence_loss_by_example([self.logits],
                                            [tf.reshape(self.targets, [-1])],
                                            [tf.ones([config.batch_size])],
                                            config.vocab_size)

    self.cost = tf.reduce_mean(loss)
    self.final_state = state

    # self.lr = tf.Variable(0.001, trainable=False)
    tvars = tf.trainable_variables()
    grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars),
                                      config.grad_clip)
    optimizer = tf.train.AdamOptimizer()#self.lr)
    self.train_op = optimizer.apply_gradients(zip(grads, tvars))

    self.summary_accuracy = tf.scalar_summary('accuracy', self.accuracy)
    tf.scalar_summary('cost', self.cost)
    self.summary_all = tf.merge_all_summaries() 
Example #15
Source File: lstm_train.py    From ChatBotCourse with MIT License 4 votes vote down vote up
def create_model(max_word_id, is_test=False):
    GO_VALUE = max_word_id + 1
    network = tflearn.input_data(shape=[None, max_seq_len + max_seq_len], dtype=tf.int32, name="XY")
    encoder_inputs = tf.slice(network, [0, 0], [-1, max_seq_len], name="enc_in")
    encoder_inputs = tf.unpack(encoder_inputs, axis=1)
    decoder_inputs = tf.slice(network, [0, max_seq_len], [-1, max_seq_len], name="dec_in")
    decoder_inputs = tf.unpack(decoder_inputs, axis=1)
    go_input = tf.mul( tf.ones_like(decoder_inputs[0], dtype=tf.int32), GO_VALUE )
    decoder_inputs = [go_input] + decoder_inputs[: max_max_seq_len-1]
    num_encoder_symbols = max_word_id + 1 # 从0起始
    num_decoder_symbols = max_word_id + 2 # 包括GO

    cell = rnn_cell.BasicLSTMCell(16*max_seq_len, state_is_tuple=True)

    model_outputs, states = seq2seq.embedding_rnn_seq2seq(
            encoder_inputs,
            decoder_inputs,
            cell,
            num_encoder_symbols=num_encoder_symbols,
            num_decoder_symbols=num_decoder_symbols,
            embedding_size=max_word_id,
            feed_previous=is_test)

    network = tf.pack(model_outputs, axis=1)




    targetY = tf.placeholder(shape=[None, max_seq_len], dtype=tf.float32, name="Y")

    network = tflearn.regression(
            network,
            placeholder=targetY,
            optimizer='adam',
            learning_rate=learning_rate,
            loss=sequence_loss,
            metric=accuracy,
            name="Y")

    print "begin create DNN model"
    model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path=None)
    print "create DNN model finish"
    return model