Python tflearn.dropout() Examples
The following are 15
code examples of tflearn.dropout().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tflearn
, or try the search function
.
Example #1
Source File: a3c.py From QARC with BSD 3-Clause "New" or "Revised" License | 6 votes |
def create_critic_network(self): with tf.variable_scope('critic'): inputs = tflearn.input_data( shape=[None, self.s_dim[0], self.s_dim[1]]) _input = tf.expand_dims(inputs, -1) merge_net = tflearn.conv_2d( _input, FEATURE_NUM, KERNEL, activation='relu') merge_net = tflearn.conv_2d( merge_net, FEATURE_NUM, KERNEL, activation='relu') avg_net = tflearn.global_avg_pool(merge_net) # dense_net_0 = tflearn.fully_connected( # merge_net, 64, activation='relu') #dense_net_0 = tflearn.dropout(dense_net_0, 0.8) out = tflearn.fully_connected(avg_net, 1, activation='linear') return inputs, out
Example #2
Source File: a3c.py From QARC with BSD 3-Clause "New" or "Revised" License | 6 votes |
def create_critic_network(self): with tf.variable_scope('critic'): inputs = tflearn.input_data( shape=[None, self.s_dim[0], self.s_dim[1]]) _input = tf.expand_dims(inputs, -1) merge_net = tflearn.conv_2d( _input, FEATURE_NUM, KERNEL, activation='relu') merge_net = tflearn.conv_2d( merge_net, FEATURE_NUM, KERNEL, activation='relu') avg_net = tflearn.global_avg_pool(merge_net) # dense_net_0 = tflearn.fully_connected( # merge_net, 64, activation='relu') #dense_net_0 = tflearn.dropout(dense_net_0, 0.8) out = tflearn.fully_connected(avg_net, 1, activation='linear') return inputs, out
Example #3
Source File: a3c.py From QARC with BSD 3-Clause "New" or "Revised" License | 6 votes |
def create_actor_network(self): with tf.variable_scope('actor'): inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1]]) split_array = [] for i in xrange(self.s_dim[0] - 1): split = tflearn.conv_1d(inputs[:, i:i + 1, :], FEATURE_NUM, KERNEL, activation='relu') flattern = tflearn.flatten(split) split_array.append(flattern) dense_net= tflearn.fully_connected(inputs[:, -1:, :], FEATURE_NUM, activation='relu') split_array.append(dense_net) merge_net = tflearn.merge(split_array, 'concat') dense_net_0 = tflearn.fully_connected(merge_net, 64, activation='relu') # dense_net_0 = tflearn.dropout(dense_net_0, 0.8) out = tflearn.fully_connected(dense_net_0, self.a_dim, activation='softmax') return inputs, out
Example #4
Source File: a3c.py From QARC with BSD 3-Clause "New" or "Revised" License | 6 votes |
def create_critic_network(self): with tf.variable_scope('critic'): inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1]]) split_array = [] for i in xrange(self.s_dim[0] - 1): split = tflearn.conv_1d(inputs[:, i:i + 1, :], FEATURE_NUM, KERNEL, activation='relu') flattern = tflearn.flatten(split) split_array.append(flattern) dense_net= tflearn.fully_connected(inputs[:, -1:, :], FEATURE_NUM, activation='relu') split_array.append(dense_net) merge_net = tflearn.merge(split_array, 'concat') dense_net_0 = tflearn.fully_connected(merge_net, 64, activation='relu') #dense_net_0 = tflearn.dropout(dense_net_0, 0.8) out = tflearn.fully_connected(dense_net_0, 1, activation='linear') return inputs, out
Example #5
Source File: model.py From tensorflow2caffe with MIT License | 5 votes |
def vgg_net_19(width, height): network = input_data(shape=[None, height, width, 3], name='input') network = conv_2d(network, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = max_pool_2d(network, 2, strides=2) network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4) network = dropout(network, keep_prob=0.5) network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4) network = dropout(network, keep_prob=0.5) network = fully_connected(network, 1000, activation='softmax', weight_decay=5e-4) opt = Momentum(learning_rate=0, momentum = 0.9) network = regression(network, optimizer=opt, loss='categorical_crossentropy', name='targets') model = DNN(network, checkpoint_path='', max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir='') return model #model of vgg-19 for testing of the activations #rename the output you want to test, connect it to the next layer and change the output layer at the bottom (model = DNN(...)) #make sure to use the correct test function (depending if your output is a tensor or a vector)
Example #6
Source File: model.py From tensorflow2caffe with MIT License | 5 votes |
def vgg_net_19_activations(width, height): network = input_data(shape=[None, height, width, 3], name='input') network1 = conv_2d(network, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network2 = conv_2d(network1, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = max_pool_2d(network2, 2, strides=2) network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = max_pool_2d(network, 2, strides=2) network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4) network = dropout(network, keep_prob=0.5) network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4) network = dropout(network, keep_prob=0.5) network = fully_connected(network, 1000, activation='softmax', weight_decay=5e-4) opt = Momentum(learning_rate=0, momentum = 0.9) network = regression(network, optimizer=opt, loss='categorical_crossentropy', name='targets') model = DNN(network1, checkpoint_path='', max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir='') return model
Example #7
Source File: vqn.py From QARC with BSD 3-Clause "New" or "Revised" License | 5 votes |
def vgg16(input, num_class): x = tflearn.conv_2d(input, 64, 3, activation='relu', scope='conv1_1') x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_2') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool1') x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_1') x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_2') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool2') x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_1') x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_2') x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_3') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool3') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_1') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_2') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool4') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool5') x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc6') x = tflearn.dropout(x, 0.5, name='dropout1') x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc7') x = tflearn.dropout(x, 0.5, name='dropout2') x = tflearn.fully_connected( x, num_class, activation='sigmoid', scope='fc8', restore=False) return x
Example #8
Source File: vgg16.py From models with MIT License | 5 votes |
def vgg16(placeholderX=None): x = tflearn.input_data(shape=[None, 224, 224, 3], name='input', placeholder=placeholderX) x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_1') x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_2') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool1') x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_1') x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_2') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool2') x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_1') x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_2') x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_3') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool3') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_1') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_2') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool4') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool5') x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc6') x = tflearn.dropout(x, 0.5, name='dropout1') x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc7') x = tflearn.dropout(x, 0.5, name='dropout2') x = tflearn.fully_connected(x, 1000, activation='softmax', scope='fc8') return x
Example #9
Source File: weights_loading_scope.py From FRU with MIT License | 5 votes |
def make_core_network(network): network = tflearn.reshape(network, [-1, 28, 28, 1], name="reshape") network = conv_2d(network, 32, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 64, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 10, activation='softmax') return network
Example #10
Source File: weights_loading_scope.py From FRU with MIT License | 5 votes |
def make_core_network(network): dense1 = tflearn.fully_connected(network, 64, activation='tanh', regularizer='L2', weight_decay=0.001, name="dense1") dropout1 = tflearn.dropout(dense1, 0.8) dense2 = tflearn.fully_connected(dropout1, 64, activation='tanh', regularizer='L2', weight_decay=0.001, name="dense2") dropout2 = tflearn.dropout(dense2, 0.8) softmax = tflearn.fully_connected(dropout2, 10, activation='softmax', name="softmax") return softmax
Example #11
Source File: vgg_network_finetuning.py From FRU with MIT License | 5 votes |
def vgg16(input, num_class): x = tflearn.conv_2d(input, 64, 3, activation='relu', scope='conv1_1') x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_2') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool1') x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_1') x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_2') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool2') x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_1') x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_2') x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_3') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool3') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_1') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_2') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool4') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool5') x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc6') x = tflearn.dropout(x, 0.5, name='dropout1') x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc7') x = tflearn.dropout(x, 0.5, name='dropout2') x = tflearn.fully_connected(x, num_class, activation='softmax', scope='fc8', restore=False) return x
Example #12
Source File: recommender_wide_and_deep.py From FRU with MIT License | 5 votes |
def deep_model(self, wide_inputs, n_inputs, n_nodes=[100, 50], use_dropout=False): ''' Model - deep, i.e. two-layer fully connected network model ''' cc_input_var = {} cc_embed_var = {} flat_vars = [] if self.verbose: print ("--> deep model: %s categories, %d continuous" % (len(self.categorical_columns), n_inputs)) for cc, cc_size in self.categorical_columns.items(): cc_input_var[cc] = tflearn.input_data(shape=[None, 1], name="%s_in" % cc, dtype=tf.int32) # embedding layers only work on CPU! No GPU implementation in tensorflow, yet! cc_embed_var[cc] = tflearn.layers.embedding_ops.embedding(cc_input_var[cc], cc_size, 8, name="deep_%s_embed" % cc) if self.verbose: print (" %s_embed = %s" % (cc, cc_embed_var[cc])) flat_vars.append(tf.squeeze(cc_embed_var[cc], squeeze_dims=[1], name="%s_squeeze" % cc)) network = tf.concat([wide_inputs] + flat_vars, 1, name="deep_concat") for k in range(len(n_nodes)): network = tflearn.fully_connected(network, n_nodes[k], activation="relu", name="deep_fc%d" % (k+1)) if use_dropout: network = tflearn.dropout(network, 0.5, name="deep_dropout%d" % (k+1)) if self.verbose: print ("Deep model network before output %s" % network) network = tflearn.fully_connected(network, 1, activation="linear", name="deep_fc_output", bias=False) network = tf.reshape(network, [-1, 1]) # so that accuracy is binary_accuracy if self.verbose: print ("Deep model network %s" % network) return network
Example #13
Source File: tflearn_wide_and_deep.py From tflearn_wide_and_deep with MIT License | 5 votes |
def deep_model(self, wide_inputs, n_inputs, n_nodes=[100, 50], use_dropout=False): ''' Model - deep, i.e. two-layer fully connected network model ''' cc_input_var = {} cc_embed_var = {} flat_vars = [] if self.verbose: print ("--> deep model: %s categories, %d continuous" % (len(self.categorical_columns), n_inputs)) for cc, cc_size in self.categorical_columns.items(): cc_input_var[cc] = tflearn.input_data(shape=[None, 1], name="%s_in" % cc, dtype=tf.int32) # embedding layers only work on CPU! No GPU implementation in tensorflow, yet! cc_embed_var[cc] = tflearn.layers.embedding_ops.embedding(cc_input_var[cc], cc_size, 8, name="deep_%s_embed" % cc) if self.verbose: print (" %s_embed = %s" % (cc, cc_embed_var[cc])) flat_vars.append(tf.squeeze(cc_embed_var[cc], squeeze_dims=[1], name="%s_squeeze" % cc)) network = tf.concat(1, [wide_inputs] + flat_vars, name="deep_concat") for k in range(len(n_nodes)): network = tflearn.fully_connected(network, n_nodes[k], activation="relu", name="deep_fc%d" % (k+1)) if use_dropout: network = tflearn.dropout(network, 0.5, name="deep_dropout%d" % (k+1)) if self.verbose: print ("Deep model network before output %s" % network) network = tflearn.fully_connected(network, 1, activation="linear", name="deep_fc_output", bias=False) network = tf.reshape(network, [-1, 1]) # so that accuracy is binary_accuracy if self.verbose: print ("Deep model network %s" % network) return network
Example #14
Source File: test_layers.py From FRU with MIT License | 4 votes |
def test_core_layers(self): X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]] Y_nand = [[1.], [1.], [1.], [0.]] Y_or = [[0.], [1.], [1.], [1.]] # Graph definition with tf.Graph().as_default(): # Building a network with 2 optimizers g = tflearn.input_data(shape=[None, 2]) # Nand operator definition g_nand = tflearn.fully_connected(g, 32, activation='linear') g_nand = tflearn.fully_connected(g_nand, 32, activation='linear') g_nand = tflearn.fully_connected(g_nand, 1, activation='sigmoid') g_nand = tflearn.regression(g_nand, optimizer='sgd', learning_rate=2., loss='binary_crossentropy') # Or operator definition g_or = tflearn.fully_connected(g, 32, activation='linear') g_or = tflearn.fully_connected(g_or, 32, activation='linear') g_or = tflearn.fully_connected(g_or, 1, activation='sigmoid') g_or = tflearn.regression(g_or, optimizer='sgd', learning_rate=2., loss='binary_crossentropy') # XOR merging Nand and Or operators g_xor = tflearn.merge([g_nand, g_or], mode='elemwise_mul') # Training m = tflearn.DNN(g_xor) m.fit(X, [Y_nand, Y_or], n_epoch=400, snapshot_epoch=False) # Testing self.assertLess(m.predict([[0., 0.]])[0][0], 0.01) self.assertGreater(m.predict([[0., 1.]])[0][0], 0.9) self.assertGreater(m.predict([[1., 0.]])[0][0], 0.9) self.assertLess(m.predict([[1., 1.]])[0][0], 0.01) # Bulk Tests with tf.Graph().as_default(): net = tflearn.input_data(shape=[None, 2]) net = tflearn.flatten(net) net = tflearn.reshape(net, new_shape=[-1]) net = tflearn.activation(net, 'relu') net = tflearn.dropout(net, 0.5) net = tflearn.single_unit(net)
Example #15
Source File: test_models.py From FRU with MIT License | 4 votes |
def test_sequencegenerator_words(self): with tf.Graph().as_default(): text = ["hello","world"]*100 word_idx = {"hello": 0, "world": 1} maxlen = 2 vec = [x for x in map(word_idx.get, text) if x is not None] sequences = [] next_words = [] for i in range(0, len(vec) - maxlen, 3): sequences.append(vec[i: i + maxlen]) next_words.append(vec[i + maxlen]) X = np.zeros((len(sequences), maxlen, len(word_idx)), dtype=np.bool) Y = np.zeros((len(sequences), len(word_idx)), dtype=np.bool) for i, seq in enumerate(sequences): for t, idx in enumerate(seq): X[i, t, idx] = True Y[i, next_words[i]] = True g = tflearn.input_data(shape=[None, maxlen, len(word_idx)]) g = tflearn.lstm(g, 32) g = tflearn.dropout(g, 0.5) g = tflearn.fully_connected(g, len(word_idx), activation='softmax') g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.1) m = tflearn.SequenceGenerator(g, dictionary=word_idx, seq_maxlen=maxlen, clip_gradients=5.0) m.fit(X, Y, validation_set=0.1, n_epoch=100, snapshot_epoch=False) res = m.generate(4, temperature=.5, seq_seed=["hello","world"]) res_str = " ".join(res[-2:]) self.assertEqual(res_str, "hello world", "SequenceGenerator (word level) test failed! Generated sequence: " + res_str + " expected 'hello world'") # Testing save method m.save("test_seqgen_word.tflearn") self.assertTrue(os.path.exists("test_seqgen_word.tflearn.index")) # Testing load method m.load("test_seqgen_word.tflearn") res = m.generate(4, temperature=.5, seq_seed=["hello","world"]) res_str = " ".join(res[-2:]) self.assertEqual(res_str, "hello world", "Reloaded SequenceGenerator (word level) test failed! Generated sequence: " + res_str + " expected 'hello world'")