Python tensorflow.contrib.rnn.MultiRNNCell() Examples
The following are 30
code examples of tensorflow.contrib.rnn.MultiRNNCell().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.contrib.rnn
, or try the search function
.
Example #1
Source File: word_rnn.py From tensorflow-nlp-examples with MIT License | 7 votes |
def RNN(x, weights, biases): # reshape to [1, n_input] x = tf.reshape(x, [-1, n_input]) # Generate a n_input-element sequence of inputs # (eg. [had] [a] [general] -> [20] [6] [33]) x = tf.split(x, n_input, 1) # 2-layer LSTM, each layer has n_hidden units. # Average Accuracy= 95.20% at 50k iter rnn_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(n_hidden), rnn.BasicLSTMCell(n_hidden)]) # 1-layer LSTM with n_hidden units but with lower accuracy. # Average Accuracy= 90.60% 50k iter # Uncomment line below to test but comment out the 2-layer rnn.MultiRNNCell above # rnn_cell = rnn.BasicLSTMCell(n_hidden) # generate prediction outputs, states = rnn.static_rnn(rnn_cell, x, dtype=tf.float32) # there are n_input outputs but # we only want the last output return tf.matmul(outputs[-1], weights['out']) + biases['out']
Example #2
Source File: rnn.py From chemopt with MIT License | 6 votes |
def __init__(self, nlayers, num_units, input_size=None, use_peepholes=False, cell_clip=None, initializer=None, num_proj=None, proj_clip=None, num_unit_shards=1, num_proj_shards=1, forget_bias=1.0, state_is_tuple=True, activation=tanh): super(MultiInputLSTM, self).__init__(num_units, input_size=None, use_peepholes=False,cell_clip=None, initializer=None, num_proj=None, proj_clip=None, num_unit_shards=1, num_proj_shards=1, forget_bias=1.0,state_is_tuple=True, activation=tanh) self.cell = super(MultiInputLSTM, self).__call__ if nlayers > 1: self.cell = MultiRNNCell([self.cell] * nlayers) self.nlayers = nlayers
Example #3
Source File: stacked_lstm.py From leaf with BSD 2-Clause "Simplified" License | 6 votes |
def create_model(self): features = tf.placeholder(tf.int32, [None, self.seq_len]) embedding = tf.get_variable("embedding", [self.num_classes, 8]) x = tf.nn.embedding_lookup(embedding, features) labels = tf.placeholder(tf.int32, [None, self.num_classes]) stacked_lstm = rnn.MultiRNNCell( [rnn.BasicLSTMCell(self.n_hidden) for _ in range(2)]) outputs, _ = tf.nn.dynamic_rnn(stacked_lstm, x, dtype=tf.float32) pred = tf.layers.dense(inputs=outputs[:,-1,:], units=self.num_classes) loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=labels)) train_op = self.optimizer.minimize( loss=loss, global_step=tf.train.get_global_step()) correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(labels, 1)) eval_metric_ops = tf.count_nonzero(correct_pred) return features, labels, train_op, eval_metric_ops, loss
Example #4
Source File: _rnn.py From DeepChatModels with MIT License | 6 votes |
def __init__(self, state_size, num_layers, dropout_prob, base_cell): """Define the cell by composing/wrapping with tf.contrib.rnn functions. Args: state_size: number of units in the cell. num_layers: how many cells to include in the MultiRNNCell. dropout_prob: probability of a node being dropped. base_cell: (str) name of underling cell to use (e.g. 'GRUCell') """ self._state_size = state_size self._num_layers = num_layers self._dropout_prob = dropout_prob self._base_cell = base_cell def single_cell(): """Convert cell name (str) to class, and create it.""" return getattr(tf.contrib.rnn, base_cell)(num_units=state_size) if num_layers == 1: self._cell = single_cell() else: self._cell = MultiRNNCell( [single_cell() for _ in range(num_layers)])
Example #5
Source File: _rnn.py From DeepChatModels with MIT License | 6 votes |
def __init__(self, state_size, embed_size, dropout_prob, num_layers, base_cell="GRUCell", state_wrapper=None): """ Args: state_size: number of units in underlying rnn cell. embed_size: dimension size of word-embedding space. dropout_prob: probability of a node being dropped. num_layers: how many cells to include in the MultiRNNCell. base_cell: (str) name of underling cell to use (e.g. 'GRUCell') state_wrapper: allow states to store their wrapper class. See the wrapper method docstring below for more info. """ self.state_size = state_size self.embed_size = embed_size self.num_layers = num_layers self.dropout_prob = dropout_prob self.base_cell = base_cell self._wrapper = state_wrapper
Example #6
Source File: stacked_lstm.py From leaf with BSD 2-Clause "Simplified" License | 6 votes |
def create_model(self): features = tf.placeholder(tf.int32, [None, self.seq_len]) embedding = tf.get_variable( 'embedding', [self.vocab_size + 1, self.n_hidden], dtype=tf.float32) x = tf.cast(tf.nn.embedding_lookup(embedding, features), tf.float32) labels = tf.placeholder(tf.float32, [None, self.num_classes]) stacked_lstm = rnn.MultiRNNCell( [rnn.BasicLSTMCell(self.n_hidden) for _ in range(2)]) outputs, _ = tf.nn.dynamic_rnn(stacked_lstm, x, dtype=tf.float32) fc1 = tf.layers.dense(inputs=outputs[:, -1, :], units=128) pred = tf.layers.dense(inputs=fc1, units=self.num_classes) loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=labels)) train_op = self.optimizer.minimize( loss=loss, global_step=tf.train.get_global_step()) correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(labels, 1)) eval_metric_ops = tf.count_nonzero(correct_pred) return features, labels, train_op, eval_metric_ops, loss
Example #7
Source File: model.py From Machine-Learning-Study-Notes with Apache License 2.0 | 6 votes |
def build_lstm(self): def build_cell(): cell = rnn.BasicLSTMCell(self._hidden_size, forget_bias=1.0, state_is_tuple=True) cell = rnn.DropoutWrapper(cell, output_keep_prob=self._keep_prob) return cell mul_cell = rnn.MultiRNNCell([build_cell() for _ in range(self._num_layer)], state_is_tuple=True) self._init_state = mul_cell.zero_state(self._num_seq, dtype=tf.float32) outputs, self._final_state = tf.nn.dynamic_rnn(mul_cell, self._inputs, initial_state=self._init_state) outputs = tf.reshape(outputs, [-1, self._hidden_size]) W = tf.Variable(tf.truncated_normal([self._hidden_size, self._corpus.word_num], stddev=0.1, dtype=tf.float32)) bais = tf.Variable(tf.zeros([1, self._corpus.word_num], dtype=tf.float32), dtype=tf.float32) self._prediction = tf.nn.softmax(tf.matmul(outputs, W) + bais)
Example #8
Source File: Model.py From Deep-Lyrics with MIT License | 5 votes |
def build(self, input_number, sequence_length, layers_number, units_number, output_number): self.x = tf.placeholder("float", [None, sequence_length, input_number]) self.y = tf.placeholder("float", [None, output_number]) self.sequence_length = sequence_length self.weights = { 'out': tf.Variable(tf.random_normal([units_number, output_number])) } self.biases = { 'out': tf.Variable(tf.random_normal([output_number])) } x = tf.transpose(self.x, [1, 0, 2]) x = tf.reshape(x, [-1, input_number]) x = tf.split(x, sequence_length, 0) lstm_layers = [] for i in range(0, layers_number): lstm_layer = rnn.BasicLSTMCell(units_number) lstm_layers.append(lstm_layer) deep_lstm = rnn.MultiRNNCell(lstm_layers) self.outputs, states = rnn.static_rnn(deep_lstm, x, dtype=tf.float32) print "Build model with input_number: {}, sequence_length: {}, layers_number: {}, " \ "units_number: {}, output_number: {}".format(input_number, sequence_length, layers_number, units_number, output_number) self.save(input_number, sequence_length, layers_number, units_number, output_number)
Example #9
Source File: rnn.py From anticipating-activities with MIT License | 5 votes |
def __build(self): w_fc_in = self.__weight_variable([self.nClasses+1, 128], 'w_fc_in') b_fc_in = self.__bias_variable([128], 'b_fc_in') w_fc_o = self.__weight_variable([self.rnn_size, 128], 'w_fc_o') b_fc_o = self.__bias_variable([128], 'b_fc_o') w_output_action = self.__weight_variable([128, self.nClasses], 'w_fc_in') b_output_action = self.__bias_variable([self.nClasses], 'b_fc_in') w_output_len = self.__weight_variable([128, 2], 'w_fc_in') b_output_len = self.__bias_variable([2], 'b_fc_in') x = tf.reshape(self.input_seq, [-1, self.nClasses+1]) h1 = tf.nn.relu(tf.matmul(x, w_fc_in) + b_fc_in) h1 = tf.reshape(h1, [-1,self.max_seq_sz,128]) #rnn h1 = tf.unstack(h1, axis=1) def get_cell(): return rnn.GRUCell(self.rnn_size) gru_cell = rnn.MultiRNNCell([get_cell() for _ in range(self.num_layers)]) outputs, states = rnn.static_rnn(gru_cell, h1, dtype=tf.float32) #fc_o h2 = tf.nn.relu(tf.matmul(outputs[-1], w_fc_o) + b_fc_o) #output output_label = tf.matmul(h2, w_output_action) + b_output_action output_len = tf.nn.relu(tf.matmul(h2, w_output_len) + b_output_len) # self.prediction = tf.concat([output_label, output_len], 1) self.saver = tf.train.Saver(write_version=tf.train.SaverDef.V2, max_to_keep=100)
Example #10
Source File: lstm_crf_layer.py From KBQA-BERT with MIT License | 5 votes |
def blstm_layer(self, embedding_chars): """ :return: """ with tf.variable_scope('rnn_layer'): cell_fw, cell_bw = self._bi_dir_rnn() if self.num_layers > 1: cell_fw = rnn.MultiRNNCell([cell_fw] * self.num_layers, state_is_tuple=True) cell_bw = rnn.MultiRNNCell([cell_bw] * self.num_layers, state_is_tuple=True) outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, embedding_chars, dtype=tf.float32) outputs = tf.concat(outputs, axis=2) return outputs
Example #11
Source File: predictor_builder.py From aster with MIT License | 5 votes |
def _build_language_model_rnn_cell(config): if not isinstance(config, predictor_pb2.LanguageModelRnnCell): raise ValueError('config not of type predictor_pb2.LanguageModelRnnCell') rnn_cell_list = [ rnn_cell_builder.build(rnn_cell_config) for rnn_cell_config in config.rnn_cell ] lm_rnn_cell = rnn.MultiRNNCell(rnn_cell_list) return lm_rnn_cell
Example #12
Source File: tacotron_v2.py From tacotron2 with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, out_units, attention_cell: AttentionRNN, is_training, zoneout_factor_cell=0.0, zoneout_factor_output=0.0, lstm_impl=LSTMImpl.LSTMCell, trainable=True, name=None, dtype=None, **kwargs): super(DecoderRNNV2, self).__init__(name=name, trainable=trainable, **kwargs) self._cell = MultiRNNCell([ attention_cell, ZoneoutLSTMCell(out_units, is_training, zoneout_factor_cell, zoneout_factor_output, lstm_impl=lstm_impl, dtype=dtype), ZoneoutLSTMCell(out_units, is_training, zoneout_factor_cell, zoneout_factor_output, lstm_impl=lstm_impl, dtype=dtype), ], state_is_tuple=True)
Example #13
Source File: lstm_utils.py From synvae with MIT License | 5 votes |
def rnn_cell(rnn_cell_size, dropout_keep_prob, residual, is_training=True): """Builds an LSTMBlockCell based on the given parameters.""" dropout_keep_prob = dropout_keep_prob if is_training else 1.0 cells = [] for i in range(len(rnn_cell_size)): cell = rnn.LSTMBlockCell(rnn_cell_size[i]) if residual: cell = rnn.ResidualWrapper(cell) if i == 0 or rnn_cell_size[i] != rnn_cell_size[i - 1]: cell = rnn.InputProjectionWrapper(cell, rnn_cell_size[i]) cell = rnn.DropoutWrapper( cell, input_keep_prob=dropout_keep_prob) cells.append(cell) return rnn.MultiRNNCell(cells)
Example #14
Source File: rnn_cell_util.py From tanda with MIT License | 5 votes |
def _build_cell(self, m, n_stack=1, wrappers=[]): if n_stack == 1: cell = self.c(m) cell = rnn.MultiRNNCell([self.c(m) for _ in range(n_stack)]) # Apply wrappers; use functools.partial to bind other arguments for wrapper in wrappers: cell = wrapper(cell) return cell
Example #15
Source File: base_controller.py From auptimizer with GNU General Public License v3.0 | 5 votes |
def build_cell(units, cell_type='lstm', num_layers=1): if num_layers > 1: cell = rnn.MultiRNNCell([ build_cell(units, cell_type, 1) for _ in range(num_layers) ]) else: if cell_type == "lstm": cell = rnn.LSTMCell(units) elif cell_type == "gru": cell = rnn.GRUCell(units) else: raise ValueError('Do not support %s' % cell_type) return cell
Example #16
Source File: lstm_crf_layer.py From pynlp with MIT License | 5 votes |
def blstm_layer(self, embedding_chars): """ :return: """ with tf.variable_scope('rnn_layer'): cell_fw, cell_bw = self._bi_dir_rnn() if self.num_layers > 1: cell_fw = rnn.MultiRNNCell([cell_fw] * self.num_layers, state_is_tuple=True) cell_bw = rnn.MultiRNNCell([cell_bw] * self.num_layers, state_is_tuple=True) outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, embedding_chars, dtype=tf.float32) outputs = tf.concat(outputs, axis=2) return outputs
Example #17
Source File: ck_model.py From cutkum with MIT License | 5 votes |
def _inference(self): logging.info('...create inference') fw_state_tuple = self.unstack_fw_states(self.fw_state) fw_cells = list() for i in range(0, self.num_layers): if (self.cell_type == 'lstm'): cell = rnn.LSTMCell(num_units=self.cell_sizes[i], state_is_tuple=True) elif (self.cell_type == 'gru'): # change to GRU cell = rnn.GRUCell(num_units=self.cell_sizes[i]) else: cell = rnn.BasicRNNCell(num_units=self.cell_sizes[i]) cell = rnn.DropoutWrapper(cell, output_keep_prob=self.keep_prob) fw_cells.append(cell) self.fw_cells = rnn.MultiRNNCell(fw_cells, state_is_tuple=True) rnn_outputs, states = tf.nn.dynamic_rnn( self.fw_cells, self.inputs, initial_state=fw_state_tuple, sequence_length=self.seq_lengths, dtype=tf.float32, time_major=True) # project output from rnn output size to OUTPUT_SIZE. Sometimes it is worth adding # an extra layer here. self.projection = lambda x: layers.linear(x, num_outputs=self.label_classes, activation_fn=tf.nn.sigmoid) self.logits = tf.map_fn(self.projection, rnn_outputs, name="logits") self.probs = tf.nn.softmax(self.logits, name="probs") self.states = states tf.add_to_collection('probs', self.probs)
Example #18
Source File: rnn_base.py From auDeep with GNU General Public License v3.0 | 5 votes |
def _create_cells(self) -> List[MultiRNNCell]: """ Creates the multilayer-RNN cells required by the architecture of this RNN. Returns ------- list of MultiRNNCell A list of MultiRNNCells containing one entry if the RNN is unidirectional, and two identical entries if the RNN is bidirectional """ cells = [[self._create_rnn_cell() for _ in range(self.num_layers)] for _ in range(2 if self.bidirectional else 1)] return [MultiRNNCell(x) for x in cells]
Example #19
Source File: abstract_recurrent_estimator.py From icecaps with MIT License | 5 votes |
def build_deep_cell(self, cell_list=None, name=None, return_raw_list=False): if name is None: name = "cell" if cell_list is None: cell_list = [] for i in range(self.hparams.depth): cell = self.build_cell(name=name+"_"+str(i)) cell = DropoutWrapper(cell, output_keep_prob=self.keep_prob) cell_list.append(cell) if return_raw_list: return cell_list if len(cell_list) == 1: return cell_list[0] return MultiRNNCell(cell_list, state_is_tuple=False)
Example #20
Source File: layers.py From R-net with MIT License | 5 votes |
def bidirectional_GRU(inputs, inputs_len, cell = None, cell_fn = tf.contrib.rnn.GRUCell, units = Params.attn_size, layers = 1, scope = "Bidirectional_GRU", output = 0, is_training = True, reuse = None): ''' Bidirectional recurrent neural network with GRU cells. Args: inputs: rnn input of shape (batch_size, timestep, dim) inputs_len: rnn input_len of shape (batch_size, ) cell: rnn cell of type RNN_Cell. output: if 0, output returns rnn output for every timestep, if 1, output returns concatenated state of backward and forward rnn. ''' with tf.variable_scope(scope, reuse = reuse): if cell is not None: (cell_fw, cell_bw) = cell else: shapes = inputs.get_shape().as_list() if len(shapes) > 3: inputs = tf.reshape(inputs,(shapes[0]*shapes[1],shapes[2],-1)) inputs_len = tf.reshape(inputs_len,(shapes[0]*shapes[1],)) # if no cells are provided, use standard GRU cell implementation if layers > 1: cell_fw = MultiRNNCell([apply_dropout(cell_fn(units), size = inputs.shape[-1] if i == 0 else units, is_training = is_training) for i in range(layers)]) cell_bw = MultiRNNCell([apply_dropout(cell_fn(units), size = inputs.shape[-1] if i == 0 else units, is_training = is_training) for i in range(layers)]) else: cell_fw, cell_bw = [apply_dropout(cell_fn(units), size = inputs.shape[-1], is_training = is_training) for _ in range(2)] outputs, states = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length = inputs_len, dtype=tf.float32) if output == 0: return tf.concat(outputs, 2) elif output == 1: return tf.reshape(tf.concat(states,1),(Params.batch_size, shapes[1], 2*units))
Example #21
Source File: rnn.py From chemopt with MIT License | 5 votes |
def __init__(self, cell, kwargs, nlayers=1, reuse=False): self.cell = cell(**kwargs, name="lstm") self.nlayers = nlayers self.rnncell = self.cell if nlayers > 1: if reuse: self.rnncell = MultiRNNCell([self.cell] * nlayers) else: self.rnncell = MultiRNNCell([cell(**kwargs, name='lstm_{}'.format(i)) for i in range(nlayers)])
Example #22
Source File: rnn.py From chemopt with MIT License | 5 votes |
def __init__(self, cell, kwargs, nlayers=1, reuse=False): self.cell = cell(**kwargs, name="lstm") self.nlayers = nlayers self.rnncell = self.cell if nlayers > 1: if reuse: self.rnncell = MultiRNNCell([self.cell] * nlayers) else: self.rnncell = MultiRNNCell([cell(**kwargs, name='lstm_{}'.format(i)) for i in range(nlayers)])
Example #23
Source File: dynamic_rnn_estimator.py From keras-lambda with MIT License | 5 votes |
def _to_rnn_cell(cell_or_type, num_units, num_layers): """Constructs and return an `RNNCell`. Args: cell_or_type: Either a string identifying the `RNNCell` type, a subclass of `RNNCell` or an instance of an `RNNCell`. num_units: The number of units in the `RNNCell`. num_layers: The number of layers in the RNN. Returns: An initialized `RNNCell`. Raises: ValueError: `cell_or_type` is an invalid `RNNCell` name. TypeError: `cell_or_type` is not a string or a subclass of `RNNCell`. """ if isinstance(cell_or_type, contrib_rnn.RNNCell): return cell_or_type if isinstance(cell_or_type, str): cell_or_type = _CELL_TYPES.get(cell_or_type) if cell_or_type is None: raise ValueError('The supported cell types are {}; got {}'.format( list(_CELL_TYPES.keys()), cell_or_type)) if not issubclass(cell_or_type, contrib_rnn.RNNCell): raise TypeError( 'cell_or_type must be a subclass of RNNCell or one of {}.'.format( list(_CELL_TYPES.keys()))) cell = cell_or_type(num_units=num_units) if num_layers > 1: cell = contrib_rnn.MultiRNNCell( [cell] * num_layers, state_is_tuple=True) return cell
Example #24
Source File: base.py From RecommenderSystems with MIT License | 5 votes |
def __call__(self, inputs, mask): ''' inputs: the embeddings of a batch of sequences. (batch_size, seq_length, emb_size) mask: mask for imcomplete sequences. (batch_size, seq_length, 1) ''' cells = [] for _ in range(self.layers): cell = rnn.BasicLSTMCell(self.hidden_units, activation=self.hidden_activation) cell = rnn.DropoutWrapper(cell, output_keep_prob=1.-self.dropout) cells.append(cell) self.cell = cell = rnn.MultiRNNCell(cells) zero_state = cell.zero_state(tf.shape(inputs)[0], dtype=tf.float32) sequence_length = tf.count_nonzero(tf.squeeze(mask, [-1]), -1) outputs, state = tf.nn.dynamic_rnn(cell, inputs, sequence_length=sequence_length, initial_state=zero_state) return outputs
Example #25
Source File: cell.py From glas with Apache License 2.0 | 5 votes |
def cell(self): """ Return the cell """ with tf.variable_scope(self.variable_scope, reuse=self.reuse): cell = rnn.LSTMCell(self.num_units, reuse=self.reuse) if self.num_layers > 1: cell = rnn.MultiRNNCell([cell] * self.num_layers) return cell
Example #26
Source File: rnn_common.py From lambda-packs with MIT License | 5 votes |
def construct_rnn_cell(num_units, cell_type='basic_rnn', dropout_keep_probabilities=None): """Constructs cells, applies dropout and assembles a `MultiRNNCell`. The cell type chosen by DynamicRNNEstimator.__init__() is the same as returned by this function when called with the same arguments. Args: num_units: A single `int` or a list/tuple of `int`s. The size of the `RNNCell`s. cell_type: A string identifying the `RNNCell` type or a subclass of `RNNCell`. dropout_keep_probabilities: a list of dropout probabilities or `None`. If a list is given, it must have length `len(cell_type) + 1`. Returns: An initialized `RNNCell`. """ if not isinstance(num_units, (list, tuple)): num_units = (num_units,) cells = [_get_single_cell(cell_type, n) for n in num_units] if dropout_keep_probabilities: cells = apply_dropout(cells, dropout_keep_probabilities) if len(cells) == 1: return cells[0] return contrib_rnn.MultiRNNCell(cells)
Example #27
Source File: dynamic_rnn_estimator.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _to_rnn_cell(cell_or_type, num_units, num_layers): """Constructs and return an `RNNCell`. Args: cell_or_type: Either a string identifying the `RNNCell` type, a subclass of `RNNCell` or an instance of an `RNNCell`. num_units: The number of units in the `RNNCell`. num_layers: The number of layers in the RNN. Returns: An initialized `RNNCell`. Raises: ValueError: `cell_or_type` is an invalid `RNNCell` name. TypeError: `cell_or_type` is not a string or a subclass of `RNNCell`. """ if isinstance(cell_or_type, contrib_rnn.RNNCell): return cell_or_type if isinstance(cell_or_type, str): cell_or_type = _CELL_TYPES.get(cell_or_type) if cell_or_type is None: raise ValueError('The supported cell types are {}; got {}'.format( list(_CELL_TYPES.keys()), cell_or_type)) if not issubclass(cell_or_type, contrib_rnn.RNNCell): raise TypeError( 'cell_or_type must be a subclass of RNNCell or one of {}.'.format( list(_CELL_TYPES.keys()))) cell = cell_or_type(num_units=num_units) if num_layers > 1: cell = contrib_rnn.MultiRNNCell( [cell] * num_layers, state_is_tuple=True) return cell
Example #28
Source File: __init__.py From ADTLib with BSD 2-Clause "Simplified" License | 5 votes |
def cell_create(self,scope_name): with tf.variable_scope(scope_name): if self.cell_type == 'tanh': cells = rnn.MultiRNNCell([rnn.BasicRNNCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True) elif self.cell_type == 'LSTM': cells = rnn.MultiRNNCell([rnn.BasicLSTMCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True) elif self.cell_type == 'GRU': cells = rnn.MultiRNNCell([rnn.GRUCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True) elif self.cell_type == 'LSTMP': cells = rnn.MultiRNNCell([rnn.LSTMCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True) cells = rnn.DropoutWrapper(cells, input_keep_prob=self.dropout_ph,output_keep_prob=self.dropout_ph) return cells
Example #29
Source File: seq2seq.py From seq2seq-couplet with GNU Affero General Public License v3.0 | 5 votes |
def getLayeredCell(layer_size, num_units, input_keep_prob, output_keep_prob=1.0): return rnn.MultiRNNCell([rnn.DropoutWrapper(rnn.BasicLSTMCell(num_units), input_keep_prob, output_keep_prob) for i in range(layer_size)])
Example #30
Source File: test_ac_models.py From sonic_contest with MIT License | 5 votes |
def test_multi_rnn(): """ Test a stacked LSTM with nested tuple state. """ def make_cell(): return MultiRNNCell([LSTMCell(16), LSTMCell(32)]) run_ac_test(partial(RNNCellAC, make_cell=make_cell))