Python tflearn.max_pool_2d() Examples
The following are 15
code examples of tflearn.max_pool_2d().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tflearn
, or try the search function
.
Example #1
Source File: cnn.py From QARC with BSD 3-Clause "New" or "Revised" License | 6 votes |
def CNN_Core(x, reuse=False): with tf.variable_scope('cnn_core', reuse=reuse): network = tflearn.conv_2d( x, KERNEL, 5, activation='relu', regularizer="L2", weight_decay=0.0001) network = tflearn.max_pool_2d(network, 2) network = tflearn.conv_2d( network, KERNEL, 3, activation='relu', regularizer="L2", weight_decay=0.0001) network = tflearn.max_pool_2d(network, 2) network = tflearn.conv_2d( network, KERNEL, 3, activation='relu', regularizer="L2", weight_decay=0.0001) network = tflearn.max_pool_2d(network, 2) network = tflearn.conv_2d( network, KERNEL // 2, 3, activation='relu', regularizer="L2", weight_decay=0.0001) # network = tflearn.fully_connected( # network, DENSE_SIZE, activation='relu') split_flat = tflearn.flatten(network) return split_flat
Example #2
Source File: cnn.py From QARC with BSD 3-Clause "New" or "Revised" License | 6 votes |
def CNN_Core(x, reuse=False): with tf.variable_scope('cnn_core', reuse=reuse): network = tflearn.conv_2d( x, KERNEL, 5, activation='relu', regularizer="L2", weight_decay=0.0001) network = tflearn.max_pool_2d(network, 2) network = tflearn.conv_2d( network, KERNEL, 3, activation='relu', regularizer="L2", weight_decay=0.0001) network = tflearn.max_pool_2d(network, 2) network = tflearn.conv_2d( network, KERNEL, 3, activation='relu', regularizer="L2", weight_decay=0.0001) network = tflearn.max_pool_2d(network, 2) network = tflearn.conv_2d( network, KERNEL // 2, 3, activation='relu', regularizer="L2", weight_decay=0.0001) # network = tflearn.fully_connected( # network, DENSE_SIZE, activation='relu') split_flat = tflearn.flatten(network) return split_flat
Example #3
Source File: test_layers.py From FRU with MIT License | 6 votes |
def test_feed_dict_no_None(self): X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]] Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]] with tf.Graph().as_default(): g = tflearn.input_data(shape=[None, 4], name="X_in") g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1]) g = tflearn.conv_2d(g, 4, 2) g = tflearn.conv_2d(g, 4, 1) g = tflearn.max_pool_2d(g, 2) g = tflearn.fully_connected(g, 2, activation='softmax') g = tflearn.regression(g, optimizer='sgd', learning_rate=1.) m = tflearn.DNN(g) def do_fit(): m.fit({"X_in": X, 'non_existent': X}, Y, n_epoch=30, snapshot_epoch=False) self.assertRaisesRegexp(Exception, "Feed dict asks for variable named 'non_existent' but no such variable is known to exist", do_fit)
Example #4
Source File: model.py From tensorflow2caffe with MIT License | 5 votes |
def vgg_net_19(width, height): network = input_data(shape=[None, height, width, 3], name='input') network = conv_2d(network, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = max_pool_2d(network, 2, strides=2) network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4) network = dropout(network, keep_prob=0.5) network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4) network = dropout(network, keep_prob=0.5) network = fully_connected(network, 1000, activation='softmax', weight_decay=5e-4) opt = Momentum(learning_rate=0, momentum = 0.9) network = regression(network, optimizer=opt, loss='categorical_crossentropy', name='targets') model = DNN(network, checkpoint_path='', max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir='') return model #model of vgg-19 for testing of the activations #rename the output you want to test, connect it to the next layer and change the output layer at the bottom (model = DNN(...)) #make sure to use the correct test function (depending if your output is a tensor or a vector)
Example #5
Source File: model.py From tensorflow2caffe with MIT License | 5 votes |
def vgg_net_19_activations(width, height): network = input_data(shape=[None, height, width, 3], name='input') network1 = conv_2d(network, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network2 = conv_2d(network1, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = max_pool_2d(network2, 2, strides=2) network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = max_pool_2d(network, 2, strides=2) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4) network = max_pool_2d(network, 2, strides=2) network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4) network = dropout(network, keep_prob=0.5) network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4) network = dropout(network, keep_prob=0.5) network = fully_connected(network, 1000, activation='softmax', weight_decay=5e-4) opt = Momentum(learning_rate=0, momentum = 0.9) network = regression(network, optimizer=opt, loss='categorical_crossentropy', name='targets') model = DNN(network1, checkpoint_path='', max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir='') return model
Example #6
Source File: gray.py From QARC with BSD 3-Clause "New" or "Revised" License | 5 votes |
def CNN_Core(x, reuse=False): with tf.variable_scope('cnn_core', reuse=reuse): network = tflearn.conv_2d( x, KERNEL, 3, activation='relu', regularizer="L2", weight_decay=0.0001) network = tflearn.max_pool_2d(network, 2) cnn_network = tflearn.conv_2d( network, KERNEL * 2, 3, activation='relu', regularizer="L2", weight_decay=0.0001) #network = tflearn.max_pool_2d(network, 2) # network = tflearn.conv_2d( # network, KERNEL * 4, 3, activation='relu', regularizer="L2", weight_decay=0.0001) #network = tflearn.max_pool_2d(network, 2) network = tflearn.global_avg_pool(cnn_network) split_flat = tflearn.flatten(network) #print split_flat.get_shape().as_list() return split_flat, cnn_network
Example #7
Source File: qarc.py From QARC with BSD 3-Clause "New" or "Revised" License | 5 votes |
def vqn_model(self, x): with tf.variable_scope('vqn'): inputs = tflearn.input_data(placeholder=x) _split_array = [] for i in range(INPUT_SEQ): tmp_network = tf.reshape( inputs[:, i:i+1, :, :, :], [-1, INPUT_H, INPUT_W, INPUT_D]) if i == 0: _split_array.append(self.CNN_Core(tmp_network)) else: _split_array.append(self.CNN_Core(tmp_network, True)) merge_net = tflearn.merge(_split_array, 'concat') merge_net = tflearn.flatten(merge_net) _count = merge_net.get_shape().as_list()[1] with tf.variable_scope('full-cnn'): net = tf.reshape( merge_net, [-1, INPUT_SEQ, _count / INPUT_SEQ, 1]) network = tflearn.conv_2d( net, KERNEL, 5, activation='relu', regularizer="L2", weight_decay=0.0001) network = tflearn.max_pool_2d(network, 3) network = tflearn.layers.normalization.batch_normalization( network) network = tflearn.conv_2d( network, KERNEL, 3, activation='relu', regularizer="L2", weight_decay=0.0001) network = tflearn.max_pool_2d(network, 2) network = tflearn.layers.normalization.batch_normalization( network) cnn_result = tflearn.fully_connected( network, DENSE_SIZE, activation='relu') out = tflearn.fully_connected( cnn_result, OUTPUT_DIM, activation='sigmoid') return out
Example #8
Source File: vqn-new.py From QARC with BSD 3-Clause "New" or "Revised" License | 5 votes |
def CNN_Core(x,reuse=False): with tf.variable_scope('cnn_core',reuse=reuse): network = tflearn.conv_2d( x, KERNEL, 3, activation='relu', regularizer="L2",weight_decay=0.0001) network = tflearn.max_pool_2d(network, 2) network = tflearn.conv_2d( network, KERNEL, 2, activation='relu', regularizer="L2",weight_decay=0.0001) network = tflearn.max_pool_2d(network, 2) network = tflearn.conv_2d( network, KERNEL, 2, activation='relu', regularizer="L2",weight_decay=0.0001) network = tflearn.max_pool_2d(network, 2) split_flat = tflearn.flatten(network) return split_flat
Example #9
Source File: vqn.py From QARC with BSD 3-Clause "New" or "Revised" License | 5 votes |
def vgg16(input, num_class): x = tflearn.conv_2d(input, 64, 3, activation='relu', scope='conv1_1') x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_2') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool1') x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_1') x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_2') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool2') x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_1') x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_2') x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_3') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool3') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_1') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_2') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool4') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool5') x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc6') x = tflearn.dropout(x, 0.5, name='dropout1') x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc7') x = tflearn.dropout(x, 0.5, name='dropout2') x = tflearn.fully_connected( x, num_class, activation='sigmoid', scope='fc8', restore=False) return x
Example #10
Source File: convert_VQPN.py From QARC with BSD 3-Clause "New" or "Revised" License | 5 votes |
def vqn_model(self, x): with tf.variable_scope('vqn'): inputs = tflearn.input_data(placeholder=x) _split_array = [] for i in range(INPUT_SEQ): tmp_network = tf.reshape( inputs[:, i:i+1, :, :, :], [-1, INPUT_H, INPUT_W, INPUT_D]) if i == 0: _split_array.append(self.CNN_Core(tmp_network)) else: _split_array.append(self.CNN_Core(tmp_network, True)) merge_net = tflearn.merge(_split_array, 'concat') merge_net = tflearn.flatten(merge_net) _count = merge_net.get_shape().as_list()[1] with tf.variable_scope('full-cnn'): net = tf.reshape( merge_net, [-1, INPUT_SEQ, _count / INPUT_SEQ, 1]) network = tflearn.conv_2d( net, KERNEL, 5, activation='relu', regularizer="L2", weight_decay=0.0001) network = tflearn.max_pool_2d(network, 3) network = tflearn.layers.normalization.batch_normalization( network) network = tflearn.conv_2d( network, KERNEL, 3, activation='relu', regularizer="L2", weight_decay=0.0001) network = tflearn.max_pool_2d(network, 2) network = tflearn.layers.normalization.batch_normalization( network) cnn_result = tflearn.fully_connected( network, DENSE_SIZE, activation='relu') out = tflearn.fully_connected( cnn_result, OUTPUT_DIM, activation='sigmoid') return out
Example #11
Source File: vgg16.py From models with MIT License | 5 votes |
def vgg16(placeholderX=None): x = tflearn.input_data(shape=[None, 224, 224, 3], name='input', placeholder=placeholderX) x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_1') x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_2') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool1') x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_1') x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_2') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool2') x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_1') x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_2') x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_3') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool3') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_1') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_2') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool4') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool5') x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc6') x = tflearn.dropout(x, 0.5, name='dropout1') x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc7') x = tflearn.dropout(x, 0.5, name='dropout2') x = tflearn.fully_connected(x, 1000, activation='softmax', scope='fc8') return x
Example #12
Source File: vgg_network_finetuning.py From FRU with MIT License | 5 votes |
def vgg16(input, num_class): x = tflearn.conv_2d(input, 64, 3, activation='relu', scope='conv1_1') x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_2') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool1') x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_1') x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_2') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool2') x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_1') x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_2') x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_3') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool3') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_1') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_2') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool4') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2') x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3') x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool5') x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc6') x = tflearn.dropout(x, 0.5, name='dropout1') x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc7') x = tflearn.dropout(x, 0.5, name='dropout2') x = tflearn.fully_connected(x, num_class, activation='softmax', scope='fc8', restore=False) return x
Example #13
Source File: test_layers.py From FRU with MIT License | 5 votes |
def test_conv_layers(self): X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]] Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]] with tf.Graph().as_default(): g = tflearn.input_data(shape=[None, 4]) g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1]) g = tflearn.conv_2d(g, 4, 2, activation='relu') g = tflearn.max_pool_2d(g, 2) g = tflearn.fully_connected(g, 2, activation='softmax') g = tflearn.regression(g, optimizer='sgd', learning_rate=1.) m = tflearn.DNN(g) m.fit(X, Y, n_epoch=100, snapshot_epoch=False) # TODO: Fix test #self.assertGreater(m.predict([[1., 0., 0., 0.]])[0][0], 0.5) # Bulk Tests with tf.Graph().as_default(): g = tflearn.input_data(shape=[None, 4]) g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1]) g = tflearn.conv_2d(g, 4, 2) g = tflearn.conv_2d(g, 4, 1) g = tflearn.conv_2d_transpose(g, 4, 2, [2, 2]) g = tflearn.max_pool_2d(g, 2)
Example #14
Source File: texture_net.py From TensorFlowBook with Apache License 2.0 | 5 votes |
def generator(input_image): conv2d = tflearn.conv_2d batch_norm = tflearn.batch_normalization relu = tf.nn.relu ratios = [16, 8, 4, 2, 1] n_filter = 8 net = [] for i in range(len(ratios)): net.append(tflearn.max_pool_2d(input_image, ratios[i], ratios[i])) # block_i_0, block_i_1, block_i_2 for block in range(3): ksize = 1 if (block + 1) % 3 == 0 else 3 net[i] = relu(batch_norm(conv2d(net[i], n_filter, ksize))) if i != 0: # concat with net[i-1] upnet = batch_norm(net[i - 1]) downnet = batch_norm(net[i]) net[i] = tf.concat(3, [upnet, downnet]) # block_i_3, block_i_4, block_i_5 for block in range(3, 6): ksize = 1 if (block + 1) % 3 == 0 else 3 net[i] = conv2d(net[i], n_filter * (i + 1), ksize) net[i] = relu(batch_norm(net[i])) if i != len(ratios) - 1: # upsample for concat net[i] = tflearn.upsample_2d(net[i], 2) nn = len(ratios) - 1 output = conv2d(net[nn], 3, 1) return output
Example #15
Source File: vqn-cnn.py From QARC with BSD 3-Clause "New" or "Revised" License | 4 votes |
def vqn_model(x): with tf.variable_scope('vqn'): inputs = tflearn.input_data(placeholder=x) _split_array = [] for i in range(INPUT_SEQ): tmp_network = tf.reshape( inputs[:, i:i+1, :, :, :], [-1, INPUT_H, INPUT_W, INPUT_D]) if i == 0: _split_array.append(CNN_Core(tmp_network)) else: _split_array.append(CNN_Core(tmp_network, True)) merge_net = tflearn.merge(_split_array, 'concat') merge_net = tflearn.flatten(merge_net) _count = merge_net.get_shape().as_list()[1] with tf.variable_scope('full-cnn'): net = tf.reshape(merge_net, [-1, INPUT_SEQ, _count / INPUT_SEQ, 1]) network = tflearn.conv_2d( net, KERNEL, 5, activation='relu', regularizer="L2", weight_decay=0.0001) network = tflearn.max_pool_2d(network, 3) network = tflearn.layers.normalization.batch_normalization(network) network = tflearn.conv_2d( network, KERNEL, 3, activation='relu', regularizer="L2", weight_decay=0.0001) network = tflearn.max_pool_2d(network, 2) network = tflearn.layers.normalization.batch_normalization(network) CNN_result = tflearn.fully_connected( network, DENSE_SIZE, activation='relu') #CNN_result = tflearn.fully_connected(CNN_result, OUTPUT_DIM, activation='sigmoid') # with tf.variable_scope('full-gru'): # net = tf.reshape(merge_net, [-1, INPUT_SEQ, _count / INPUT_SEQ]) # net = tflearn.gru(net, DENSE_SIZE, return_seq=True) # out_gru = tflearn.gru(net, DENSE_SIZE,dropout=0.8) # gru_result = tflearn.fully_connected(out_gru, DENSE_SIZE, activation='relu') #gru_result = tflearn.fully_connected(gru_result, OUTPUT_DIM, activation='sigmoid') merge_net = tflearn.merge([gru_result, CNN_result], 'concat') out = tflearn.fully_connected( CNN_result, OUTPUT_DIM, activation='sigmoid') return out