Python tensorflow.python.ops.rnn_cell.LSTMCell() Examples

The following are 17 code examples of tensorflow.python.ops.rnn_cell.LSTMCell(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.rnn_cell , or try the search function .
Example #1
Source File: nns.py    From Dense_BiLSTM with MIT License 5 votes vote down vote up
def __init__(self, num_layers, num_units, scope='stacked_bi_rnn'):
        self.num_layers = num_layers
        self.num_units = num_units
        self.cells_fw = [LSTMCell(self.num_units) for _ in range(self.num_layers)]
        self.cells_bw = [LSTMCell(self.num_units) for _ in range(self.num_layers)]
        self.scope = scope 
Example #2
Source File: grid_rnn_cell.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def __init__(self, num_units, use_peepholes=False, forget_bias=1.0):
    super(Grid1LSTMCell, self).__init__(
        num_units=num_units, num_dims=1,
        input_dims=0, output_dims=0, priority_dims=0,
        cell_fn=lambda n, i: rnn_cell.LSTMCell(
            num_units=n, input_size=i, use_peepholes=use_peepholes,
            forget_bias=forget_bias, state_is_tuple=False)) 
Example #3
Source File: grid_rnn_cell.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               num_units,
               tied=False,
               non_recurrent_fn=None,
               use_peepholes=False,
               forget_bias=1.0):
    super(Grid2LSTMCell, self).__init__(
        num_units=num_units, num_dims=2,
        input_dims=0, output_dims=0, priority_dims=0, tied=tied,
        non_recurrent_dims=None if non_recurrent_fn is None else 0,
        cell_fn=lambda n, i: rnn_cell.LSTMCell(
            num_units=n, input_size=i, forget_bias=forget_bias,
            use_peepholes=use_peepholes, state_is_tuple=False),
        non_recurrent_fn=non_recurrent_fn) 
Example #4
Source File: grid_rnn_cell.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               num_units,
               tied=False,
               non_recurrent_fn=None,
               use_peepholes=False,
               forget_bias=1.0):
    super(Grid3LSTMCell, self).__init__(
        num_units=num_units, num_dims=3,
        input_dims=0, output_dims=0, priority_dims=0, tied=tied,
        non_recurrent_dims=None if non_recurrent_fn is None else 0,
        cell_fn=lambda n, i: rnn_cell.LSTMCell(
            num_units=n, input_size=i, forget_bias=forget_bias,
            use_peepholes=use_peepholes, state_is_tuple=False),
        non_recurrent_fn=non_recurrent_fn) 
Example #5
Source File: rnn.py    From rnnprop with MIT License 5 votes vote down vote up
def _build_pre(self):
        self.dimA = 20
        self.cellA = MultiRNNCell([LSTMCell(self.dimA)] * 2)
        self.b1 = 0.95
        self.b2 = 0.95
        self.lr = 0.1
        self.eps = 1e-8 
Example #6
Source File: deepmind.py    From rnnprop with MIT License 5 votes vote down vote up
def _build_pre(self):
        self.dimH = 20
        self.cellH = MultiRNNCell([LSTMCell(self.dimH)] * 2)
        self.lr = 0.1 
Example #7
Source File: seq2seq_model.py    From AmusingPythonCodes with MIT License 5 votes vote down vote up
def _create_rnn_cell(self):
        cell = GRUCell(self.cfg.num_units) if self.cfg.cell_type == "gru" else LSTMCell(self.cfg.num_units)
        if self.cfg.use_dropout:
            cell = DropoutWrapper(cell, output_keep_prob=self.keep_prob)
        if self.cfg.use_residual:
            cell = ResidualWrapper(cell)
        return cell 
Example #8
Source File: lstm_ops.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               num_units,
               forget_bias=1.0,
               use_peephole=False,
               use_compatible_names=False):
    """Initialize the basic LSTM cell.

    Args:
      num_units: int, The number of units in the LSTM cell.
      forget_bias: float, The bias added to forget gates (see above).
      use_peephole: Whether to use peephole connections or not.
      use_compatible_names: If True, use the same variable naming as
        rnn_cell.LSTMCell
    """
    self._num_units = num_units
    self._forget_bias = forget_bias
    self._use_peephole = use_peephole
    if use_compatible_names:
      self._names = {
          "W": "W_0",
          "b": "B",
          "wci": "W_I_diag",
          "wco": "W_O_diag",
          "wcf": "W_F_diag",
          "scope": "LSTMCell"
      }
    else:
      self._names = {
          "W": "W",
          "b": "b",
          "wci": "wci",
          "wco": "wco",
          "wcf": "wcf",
          "scope": "LSTMBlockCell"
      } 
Example #9
Source File: rnns.py    From AmusingPythonCodes with MIT License 5 votes vote down vote up
def __init__(self, num_units, cell_type='lstm', scope='bi_rnn'):
        self.cell_fw = LSTMCell(num_units) if cell_type == 'lstm' else GRUCell(num_units)
        self.cell_bw = LSTMCell(num_units) if cell_type == 'lstm' else GRUCell(num_units)
        self.scope = scope 
Example #10
Source File: rnns.py    From AmusingPythonCodes with MIT License 5 votes vote down vote up
def __init__(self, num_units, memory, pmemory, cell_type='lstm'):
        super(AttentionCell, self).__init__()
        self._cell = LSTMCell(num_units) if cell_type == 'lstm' else GRUCell(num_units)
        self.num_units = num_units
        self.memory = memory
        self.pmemory = pmemory
        self.mem_units = memory.get_shape().as_list()[-1] 
Example #11
Source File: base_model.py    From neural_sequence_labeling with MIT License 5 votes vote down vote up
def _create_single_rnn_cell(self, num_units):
        cell = GRUCell(num_units) if self.cfg["cell_type"] == "gru" else LSTMCell(num_units)
        return cell 
Example #12
Source File: multi_attention_model.py    From neural_sequence_labeling with MIT License 5 votes vote down vote up
def _create_single_rnn_cell(self, num_units):
        cell = GRUCell(num_units) if self.cfg["cell_type"] == "gru" else LSTMCell(num_units)
        if self.cfg["use_dropout"]:
            cell = DropoutWrapper(cell, output_keep_prob=self.rnn_keep_prob)
        if self.cfg["use_residual"]:
            cell = ResidualWrapper(cell)
        return cell 
Example #13
Source File: nns.py    From neural_sequence_labeling with MIT License 5 votes vote down vote up
def __init__(self, num_units, cell_type='lstm', scope=None):
        self.cell_fw = GRUCell(num_units) if cell_type == 'gru' else LSTMCell(num_units)
        self.cell_bw = GRUCell(num_units) if cell_type == 'gru' else LSTMCell(num_units)
        self.scope = scope or "bi_rnn" 
Example #14
Source File: nns.py    From neural_sequence_labeling with MIT License 5 votes vote down vote up
def __init__(self, num_units, memory, pmemory, cell_type='lstm'):
        super(AttentionCell, self).__init__()
        self._cell = LSTMCell(num_units) if cell_type == 'lstm' else GRUCell(num_units)
        self.num_units = num_units
        self.memory = memory
        self.pmemory = pmemory
        self.mem_units = memory.get_shape().as_list()[-1] 
Example #15
Source File: nns.py    From Dense_BiLSTM with MIT License 5 votes vote down vote up
def __init__(self, num_units, scope='bi_rnn'):
        self.num_units = num_units
        self.cell_fw = LSTMCell(self.num_units)
        self.cell_bw = LSTMCell(self.num_units)
        self.scope = scope 
Example #16
Source File: seq2seq_model.py    From AmusingPythonCodes with MIT License 4 votes vote down vote up
def _build_model(self):
        with tf.variable_scope("embeddings"):
            self.source_embs = tf.get_variable(name="source_embs", shape=[self.cfg.source_vocab_size, self.cfg.emb_dim],
                                               dtype=tf.float32, trainable=True)
            self.target_embs = tf.get_variable(name="embeddings", shape=[self.cfg.vocab_size, self.cfg.emb_dim],
                                               dtype=tf.float32, trainable=True)
            source_emb = tf.nn.embedding_lookup(self.source_embs, self.enc_source)
            target_emb = tf.nn.embedding_lookup(self.target_embs, self.dec_target_in)
            print("source embedding shape: {}".format(source_emb.get_shape().as_list()))
            print("target input embedding shape: {}".format(target_emb.get_shape().as_list()))

        with tf.variable_scope("encoder"):
            if self.cfg.use_bi_rnn:
                with tf.variable_scope("bi-directional_rnn"):
                    cell_fw = GRUCell(self.cfg.num_units) if self.cfg.cell_type == "gru" else \
                        LSTMCell(self.cfg.num_units)
                    cell_bw = GRUCell(self.cfg.num_units) if self.cfg.cell_type == "gru" else \
                        LSTMCell(self.cfg.num_units)
                    bi_outputs, _ = bidirectional_dynamic_rnn(cell_fw, cell_bw, source_emb, dtype=tf.float32,
                                                              sequence_length=self.enc_seq_len)
                    source_emb = tf.concat(bi_outputs, axis=-1)
                    print("bi-directional rnn output shape: {}".format(source_emb.get_shape().as_list()))
            input_project = tf.layers.Dense(units=self.cfg.num_units, dtype=tf.float32, name="input_projection")
            source_emb = input_project(source_emb)
            print("encoder input projection shape: {}".format(source_emb.get_shape().as_list()))
            enc_cells = self._create_encoder_cell()
            self.enc_outputs, self.enc_states = dynamic_rnn(enc_cells, source_emb, sequence_length=self.enc_seq_len,
                                                            dtype=tf.float32)
            print("encoder output shape: {}".format(self.enc_outputs.get_shape().as_list()))

        with tf.variable_scope("decoder"):
            self.max_dec_seq_len = tf.reduce_max(self.dec_seq_len, name="max_dec_seq_len")
            self.dec_cells, self.dec_init_states = self._create_decoder_cell()
            # define input and output projection layer
            input_project = tf.layers.Dense(units=self.cfg.num_units, name="input_projection")
            self.dense_layer = tf.layers.Dense(units=self.cfg.vocab_size, name="output_projection")
            if self.mode == "train":  # either "train" or "decode"
                # for training
                target_emb = input_project(target_emb)
                train_helper = TrainingHelper(target_emb, sequence_length=self.dec_seq_len, name="train_helper")
                train_decoder = BasicDecoder(self.dec_cells, helper=train_helper, output_layer=self.dense_layer,
                                             initial_state=self.dec_init_states)
                self.dec_output, _, _ = dynamic_decode(train_decoder, impute_finished=True,
                                                       maximum_iterations=self.max_dec_seq_len)
                print("decoder output shape: {} (vocab size)".format(self.dec_output.rnn_output.get_shape().as_list()))

                # for decode
                start_token = tf.ones(shape=[self.batch_size, ], dtype=tf.int32) * self.cfg.target_dict[GO]
                end_token = self.cfg.target_dict[EOS]

                def inputs_project(inputs):
                    return input_project(tf.nn.embedding_lookup(self.target_embs, inputs))

                dec_helper = GreedyEmbeddingHelper(embedding=inputs_project, start_tokens=start_token,
                                                   end_token=end_token)
                infer_decoder = BasicDecoder(self.dec_cells, helper=dec_helper, initial_state=self.dec_init_states,
                                             output_layer=self.dense_layer)
                infer_dec_output, _, _ = dynamic_decode(infer_decoder, maximum_iterations=self.cfg.maximum_iterations)
                self.dec_predicts = infer_dec_output.sample_id 
Example #17
Source File: grid_rnn_cell.py    From deep_image_model with Apache License 2.0 4 votes vote down vote up
def __init__(self,
               num_units,
               num_dims=1,
               input_dims=None,
               output_dims=None,
               priority_dims=None,
               non_recurrent_dims=None,
               tied=False,
               cell_fn=None,
               non_recurrent_fn=None):
    """Initialize the parameters of a Grid RNN cell

    Args:
      num_units: int, The number of units in all dimensions of this GridRNN cell
      num_dims: int, Number of dimensions of this grid.
      input_dims: int or list, List of dimensions which will receive input data.
      output_dims: int or list, List of dimensions from which the output will be
        recorded.
      priority_dims: int or list, List of dimensions to be considered as
        priority dimensions.
              If None, no dimension is prioritized.
      non_recurrent_dims: int or list, List of dimensions that are not
        recurrent.
              The transfer function for non-recurrent dimensions is specified
                via `non_recurrent_fn`,
              which is default to be `tensorflow.nn.relu`.
      tied: bool, Whether to share the weights among the dimensions of this
        GridRNN cell.
              If there are non-recurrent dimensions in the grid, weights are
                shared between each
              group of recurrent and non-recurrent dimensions.
      cell_fn: function, a function which returns the recurrent cell object. Has
        to be in the following signature:
              def cell_func(num_units, input_size):
                # ...

              and returns an object of type `RNNCell`. If None, LSTMCell with
                default parameters will be used.
      non_recurrent_fn: a tensorflow Op that will be the transfer function of
        the non-recurrent dimensions
    """
    if num_dims < 1:
      raise ValueError('dims must be >= 1: {}'.format(num_dims))

    self._config = _parse_rnn_config(num_dims, input_dims, output_dims,
                                     priority_dims, non_recurrent_dims,
                                     non_recurrent_fn or nn.relu, tied,
                                     num_units)

    cell_input_size = (self._config.num_dims - 1) * num_units
    if cell_fn is None:
      self._cell = rnn_cell.LSTMCell(
          num_units=num_units, input_size=cell_input_size, state_is_tuple=False)
    else:
      self._cell = cell_fn(num_units, cell_input_size)
      if not isinstance(self._cell, rnn_cell.RNNCell):
        raise ValueError('cell_fn must return an object of type RNNCell')