Python tflearn.lstm() Examples
The following are 19
code examples of tflearn.lstm().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tflearn
, or try the search function
.
Example #1
Source File: models.py From pygta5 with GNU General Public License v3.0 | 5 votes |
def sentnet_LSTM_gray(width, height, frame_count, lr, output=9): network = input_data(shape=[None, width, height], name='input') #network = tflearn.input_data(shape=[None, 28, 28], name='input') network = tflearn.lstm(network, 128, return_seq=True) network = tflearn.lstm(network, 128) network = tflearn.fully_connected(network, 9, activation='softmax') network = tflearn.regression(network, optimizer='adam', loss='categorical_crossentropy', name="output1") model = tflearn.DNN(network, checkpoint_path='model_lstm', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
Example #2
Source File: my_seq2seq_v2.py From ChatBotCourse with MIT License | 5 votes |
def model(self, feed_previous=False): # 通过输入的XY生成encoder_inputs和带GO头的decoder_inputs input_data = tflearn.input_data(shape=[None, self.max_seq_len*2, self.word_vec_dim], dtype=tf.float32, name = "XY") encoder_inputs = tf.slice(input_data, [0, 0, 0], [-1, self.max_seq_len, self.word_vec_dim], name="enc_in") decoder_inputs_tmp = tf.slice(input_data, [0, self.max_seq_len, 0], [-1, self.max_seq_len-1, self.word_vec_dim], name="dec_in_tmp") go_inputs = tf.ones_like(decoder_inputs_tmp) go_inputs = tf.slice(go_inputs, [0, 0, 0], [-1, 1, self.word_vec_dim]) decoder_inputs = tf.concat(1, [go_inputs, decoder_inputs_tmp], name="dec_in") # 编码器 # 把encoder_inputs交给编码器,返回一个输出(预测序列的第一个值)和一个状态(传给解码器) (encoder_output_tensor, states) = tflearn.lstm(encoder_inputs, self.word_vec_dim, return_state=True, scope='encoder_lstm') encoder_output_sequence = tf.pack([encoder_output_tensor], axis=1) # 解码器 # 预测过程用前一个时间序的输出作为下一个时间序的输入 # 先用编码器的最后一个输出作为第一个输入 if feed_previous: first_dec_input = go_inputs else: first_dec_input = tf.slice(decoder_inputs, [0, 0, 0], [-1, 1, self.word_vec_dim]) decoder_output_tensor = tflearn.lstm(first_dec_input, self.word_vec_dim, initial_state=states, return_seq=False, reuse=False, scope='decoder_lstm') decoder_output_sequence_single = tf.pack([decoder_output_tensor], axis=1) decoder_output_sequence_list = [decoder_output_tensor] # 再用解码器的输出作为下一个时序的输入 for i in range(self.max_seq_len-1): if feed_previous: next_dec_input = decoder_output_sequence_single else: next_dec_input = tf.slice(decoder_inputs, [0, i+1, 0], [-1, 1, self.word_vec_dim]) decoder_output_tensor = tflearn.lstm(next_dec_input, self.word_vec_dim, return_seq=False, reuse=True, scope='decoder_lstm') decoder_output_sequence_single = tf.pack([decoder_output_tensor], axis=1) decoder_output_sequence_list.append(decoder_output_tensor) decoder_output_sequence = tf.pack(decoder_output_sequence_list, axis=1) real_output_sequence = tf.concat(1, [encoder_output_sequence, decoder_output_sequence]) net = tflearn.regression(real_output_sequence, optimizer='sgd', learning_rate=0.1, loss='mean_square') model = tflearn.DNN(net) return model
Example #3
Source File: one_lstm_sequence_generate.py From ChatBotCourse with MIT License | 5 votes |
def main(): load_vectors("./vectors.bin") init_seq() xlist = [] ylist = [] test_X = None #for i in range(len(seq)-100): for i in range(1000): sequence = seq[i:i+20] xlist.append(sequence) ylist.append(seq[i+20]) if test_X is None: test_X = np.array(sequence) (match_word, max_cos) = vector2word(seq[i+20]) print "right answer=", match_word, max_cos X = np.array(xlist) Y = np.array(ylist) net = tflearn.input_data([None, 20, 200]) net = tflearn.lstm(net, 200) net = tflearn.fully_connected(net, 200, activation='linear') net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='mean_square') model = tflearn.DNN(net) model.fit(X, Y, n_epoch=1000, batch_size=1,snapshot_epoch=False,show_metric=True) model.save("model") predict = model.predict([test_X]) #print predict #for v in test_X: # print vector2word(v) (match_word, max_cos) = vector2word(predict[0]) print "predict=", match_word, max_cos
Example #4
Source File: my_seq2seq.py From ChatBotCourse with MIT License | 5 votes |
def model(self, feed_previous=False): # 通过输入的XY生成encoder_inputs和带GO头的decoder_inputs input_data = tflearn.input_data(shape=[None, self.max_seq_len*2, self.word_vec_dim], dtype=tf.float32, name = "XY") encoder_inputs = tf.slice(input_data, [0, 0, 0], [-1, self.max_seq_len, self.word_vec_dim], name="enc_in") decoder_inputs_tmp = tf.slice(input_data, [0, self.max_seq_len, 0], [-1, self.max_seq_len-1, self.word_vec_dim], name="dec_in_tmp") go_inputs = tf.ones_like(decoder_inputs_tmp) go_inputs = tf.slice(go_inputs, [0, 0, 0], [-1, 1, self.word_vec_dim]) decoder_inputs = tf.concat(1, [go_inputs, decoder_inputs_tmp], name="dec_in") # 编码器 # 把encoder_inputs交给编码器,返回一个输出(预测序列的第一个值)和一个状态(传给解码器) (encoder_output_tensor, states) = tflearn.lstm(encoder_inputs, self.word_vec_dim, return_state=True, scope='encoder_lstm') encoder_output_sequence = tf.pack([encoder_output_tensor], axis=1) # 解码器 # 预测过程用前一个时间序的输出作为下一个时间序的输入 # 先用编码器的最后一个输出作为第一个输入 if feed_previous: first_dec_input = go_inputs else: first_dec_input = tf.slice(decoder_inputs, [0, 0, 0], [-1, 1, self.word_vec_dim]) decoder_output_tensor = tflearn.lstm(first_dec_input, self.word_vec_dim, initial_state=states, return_seq=False, reuse=False, scope='decoder_lstm') decoder_output_sequence_single = tf.pack([decoder_output_tensor], axis=1) decoder_output_sequence_list = [decoder_output_tensor] # 再用解码器的输出作为下一个时序的输入 for i in range(self.max_seq_len-1): if feed_previous: next_dec_input = decoder_output_sequence_single else: next_dec_input = tf.slice(decoder_inputs, [0, i+1, 0], [-1, 1, self.word_vec_dim]) decoder_output_tensor = tflearn.lstm(next_dec_input, self.word_vec_dim, return_seq=False, reuse=True, scope='decoder_lstm') decoder_output_sequence_single = tf.pack([decoder_output_tensor], axis=1) decoder_output_sequence_list.append(decoder_output_tensor) decoder_output_sequence = tf.pack(decoder_output_sequence_list, axis=1) real_output_sequence = tf.concat(1, [encoder_output_sequence, decoder_output_sequence]) net = tflearn.regression(real_output_sequence, optimizer='sgd', learning_rate=0.1, loss='mean_square') model = tflearn.DNN(net) return model
Example #5
Source File: test_models.py From FRU with MIT License | 5 votes |
def test_sequencegenerator(self): with tf.Graph().as_default(): text = "123456789101234567891012345678910123456789101234567891012345678910" maxlen = 5 X, Y, char_idx = \ tflearn.data_utils.string_to_semi_redundant_sequences(text, seq_maxlen=maxlen, redun_step=3) g = tflearn.input_data(shape=[None, maxlen, len(char_idx)]) g = tflearn.lstm(g, 32) g = tflearn.dropout(g, 0.5) g = tflearn.fully_connected(g, len(char_idx), activation='softmax') g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.1) m = tflearn.SequenceGenerator(g, dictionary=char_idx, seq_maxlen=maxlen, clip_gradients=5.0) m.fit(X, Y, validation_set=0.1, n_epoch=100, snapshot_epoch=False) res = m.generate(10, temperature=.5, seq_seed="12345") #self.assertEqual(res, "123456789101234", "SequenceGenerator test failed! Generated sequence: " + res + " expected '123456789101234'") # Testing save method m.save("test_seqgen.tflearn") self.assertTrue(os.path.exists("test_seqgen.tflearn.index")) # Testing load method m.load("test_seqgen.tflearn") res = m.generate(10, temperature=.5, seq_seed="12345") # TODO: Fix test #self.assertEqual(res, "123456789101234", "SequenceGenerator test failed after loading model! Generated sequence: " + res + " expected '123456789101234'")
Example #6
Source File: test_layers.py From FRU with MIT License | 5 votes |
def test_recurrent_layers(self): X = [[1, 3, 5, 7], [2, 4, 8, 10], [1, 5, 9, 11], [2, 6, 8, 0]] Y = [[0., 1.], [1., 0.], [0., 1.], [1., 0.]] with tf.Graph().as_default(): g = tflearn.input_data(shape=[None, 4]) g = tflearn.embedding(g, input_dim=12, output_dim=4) g = tflearn.lstm(g, 6) g = tflearn.fully_connected(g, 2, activation='softmax') g = tflearn.regression(g, optimizer='sgd', learning_rate=1.) m = tflearn.DNN(g) m.fit(X, Y, n_epoch=300, snapshot_epoch=False) self.assertGreater(m.predict([[5, 9, 11, 1]])[0][1], 0.9)
Example #7
Source File: sentiment.py From TaobaoAnalysis with MIT License | 5 votes |
def _create_model(self): reset_default_graph() net = input_data([None, SEQUENCE_LEN]) net = embedding(net, input_dim=len(self._vocab.vocabulary_), output_dim=WORD_FEATURE_DIM) net = lstm(net, DOC_FEATURE_DIM, dropout=0.8) net = fully_connected(net, 2, activation='softmax') net = regression(net, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy') return DNN(net)
Example #8
Source File: models.py From pygta5 with GNU General Public License v3.0 | 5 votes |
def sentnet_LSTM_gray(width, height, frame_count, lr, output=9): network = input_data(shape=[None, width, height], name='input') #network = tflearn.input_data(shape=[None, 28, 28], name='input') network = tflearn.lstm(network, 128, return_seq=True) network = tflearn.lstm(network, 128) network = tflearn.fully_connected(network, 9, activation='softmax') network = tflearn.regression(network, optimizer='adam', loss='categorical_crossentropy', name="output1") model = tflearn.DNN(network, checkpoint_path='model_lstm', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
Example #9
Source File: models.py From pygta5 with GNU General Public License v3.0 | 5 votes |
def sentnet_LSTM_gray(width, height, frame_count, lr, output=9): network = input_data(shape=[None, width, height], name='input') #network = tflearn.input_data(shape=[None, 28, 28], name='input') network = tflearn.lstm(network, 128, return_seq=True) network = tflearn.lstm(network, 128) network = tflearn.fully_connected(network, 9, activation='softmax') network = tflearn.regression(network, optimizer='adam', loss='categorical_crossentropy', name="output1") model = tflearn.DNN(network, checkpoint_path='model_lstm', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
Example #10
Source File: rnn_utils.py From sign-language-gesture-recognition with MIT License | 5 votes |
def get_network(frames, input_size, num_classes): """Create our LSTM""" net = tflearn.input_data(shape=[None, frames, input_size]) net = tflearn.lstm(net, 128, dropout=0.8, return_seq=True) net = tflearn.lstm(net, 128) net = tflearn.fully_connected(net, num_classes, activation='softmax') net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy', name="output1") return net
Example #11
Source File: models.py From pygta5 with GNU General Public License v3.0 | 5 votes |
def sentnet_LSTM_gray(width, height, frame_count, lr, output=9): network = input_data(shape=[None, width, height], name='input') #network = tflearn.input_data(shape=[None, 28, 28], name='input') network = tflearn.lstm(network, 128, return_seq=True) network = tflearn.lstm(network, 128) network = tflearn.fully_connected(network, 9, activation='softmax') network = tflearn.regression(network, optimizer='adam', loss='categorical_crossentropy', name="output1") model = tflearn.DNN(network, checkpoint_path='model_lstm', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
Example #12
Source File: rnn_utils.py From continuous-online-video-classification-blog with MIT License | 5 votes |
def get_network_wider(frames, input_size, num_classes): """Create a wider LSTM""" net = tflearn.input_data(shape=[None, frames, input_size]) net = tflearn.lstm(net, 512, dropout=0.2) net = tflearn.fully_connected(net, num_classes, activation='softmax') net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy', name='output1') return net
Example #13
Source File: rnn_utils.py From continuous-online-video-classification-blog with MIT License | 5 votes |
def get_network_wide(frames, input_size, num_classes): """Create a wider LSTM""" net = tflearn.input_data(shape=[None, frames, input_size]) net = tflearn.lstm(net, 256, dropout=0.2) net = tflearn.fully_connected(net, num_classes, activation='softmax') net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy', name='output1') return net
Example #14
Source File: rnn_utils.py From continuous-online-video-classification-blog with MIT License | 5 votes |
def get_network_deep(frames, input_size, num_classes): """Create a deeper LSTM""" net = tflearn.input_data(shape=[None, frames, input_size]) net = tflearn.lstm(net, 64, dropout=0.2, return_seq=True) net = tflearn.lstm(net, 64, dropout=0.2, return_seq=True) net = tflearn.lstm(net, 64, dropout=0.2) net = tflearn.fully_connected(net, num_classes, activation='softmax') net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy', name="output1") return net
Example #15
Source File: rnn_utils.py From continuous-online-video-classification-blog with MIT License | 5 votes |
def get_network(frames, input_size, num_classes): """Create our LSTM""" net = tflearn.input_data(shape=[None, frames, input_size]) net = tflearn.lstm(net, 128, dropout=0.8, return_seq=True) net = tflearn.lstm(net, 128) net = tflearn.fully_connected(net, num_classes, activation='softmax') net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy', name="output1") return net
Example #16
Source File: rnn_utils.py From sign-language-gesture-recognition with MIT License | 5 votes |
def get_network_wider(frames, input_size, num_classes): """Create a wider LSTM""" net = tflearn.input_data(shape=[None, frames, input_size]) net = tflearn.lstm(net, 512, dropout=0.2) net = tflearn.fully_connected(net, num_classes, activation='softmax') net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy', name='output1') return net
Example #17
Source File: rnn_utils.py From sign-language-gesture-recognition with MIT License | 5 votes |
def get_network_wide(frames, input_size, num_classes): """Create a wider LSTM""" net = tflearn.input_data(shape=[None, frames, input_size]) net = tflearn.lstm(net, 256, dropout=0.2) net = tflearn.fully_connected(net, num_classes, activation='softmax') net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy', name='output1') return net
Example #18
Source File: rnn_utils.py From sign-language-gesture-recognition with MIT License | 5 votes |
def get_network_deep(frames, input_size, num_classes): """Create a deeper LSTM""" net = tflearn.input_data(shape=[None, frames, input_size]) net = tflearn.lstm(net, 64, dropout=0.2, return_seq=True) net = tflearn.lstm(net, 64, dropout=0.2, return_seq=True) net = tflearn.lstm(net, 64, dropout=0.2) net = tflearn.fully_connected(net, num_classes, activation='softmax') net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy', name="output1") return net
Example #19
Source File: test_models.py From FRU with MIT License | 4 votes |
def test_sequencegenerator_words(self): with tf.Graph().as_default(): text = ["hello","world"]*100 word_idx = {"hello": 0, "world": 1} maxlen = 2 vec = [x for x in map(word_idx.get, text) if x is not None] sequences = [] next_words = [] for i in range(0, len(vec) - maxlen, 3): sequences.append(vec[i: i + maxlen]) next_words.append(vec[i + maxlen]) X = np.zeros((len(sequences), maxlen, len(word_idx)), dtype=np.bool) Y = np.zeros((len(sequences), len(word_idx)), dtype=np.bool) for i, seq in enumerate(sequences): for t, idx in enumerate(seq): X[i, t, idx] = True Y[i, next_words[i]] = True g = tflearn.input_data(shape=[None, maxlen, len(word_idx)]) g = tflearn.lstm(g, 32) g = tflearn.dropout(g, 0.5) g = tflearn.fully_connected(g, len(word_idx), activation='softmax') g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.1) m = tflearn.SequenceGenerator(g, dictionary=word_idx, seq_maxlen=maxlen, clip_gradients=5.0) m.fit(X, Y, validation_set=0.1, n_epoch=100, snapshot_epoch=False) res = m.generate(4, temperature=.5, seq_seed=["hello","world"]) res_str = " ".join(res[-2:]) self.assertEqual(res_str, "hello world", "SequenceGenerator (word level) test failed! Generated sequence: " + res_str + " expected 'hello world'") # Testing save method m.save("test_seqgen_word.tflearn") self.assertTrue(os.path.exists("test_seqgen_word.tflearn.index")) # Testing load method m.load("test_seqgen_word.tflearn") res = m.generate(4, temperature=.5, seq_seed=["hello","world"]) res_str = " ".join(res[-2:]) self.assertEqual(res_str, "hello world", "Reloaded SequenceGenerator (word level) test failed! Generated sequence: " + res_str + " expected 'hello world'")