Python tflearn.layers.conv.max_pool_2d() Examples
The following are 24
code examples of tflearn.layers.conv.max_pool_2d().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tflearn.layers.conv
, or try the search function
.
Example #1
Source File: inceptionVxOnFire.py From fire-detection-cnn with MIT License | 6 votes |
def reduction_block_b(reduction_input_b): reduction_b_1_1 = conv_2d(reduction_input_b,192,1,activation='relu',name='reduction_b_1_1') reduction_b_1_3 = conv_2d(reduction_b_1_1,192,3,strides=2,padding='valid',name='reduction_b_1_3') reduction_b_3_3_reduce = conv_2d(reduction_input_b, 256, filter_size=1, activation='relu', name='reduction_b_3_3_reduce') reduction_b_3_3_asym_1 = conv_2d(reduction_b_3_3_reduce, 256, filter_size=[1,7], activation='relu',name='reduction_b_3_3_asym_1') reduction_b_3_3_asym_2 = conv_2d(reduction_b_3_3_asym_1, 320, filter_size=[7,1], activation='relu',name='reduction_b_3_3_asym_2') reduction_b_3_3=conv_2d(reduction_b_3_3_asym_2,320,3,strides=2,activation='relu',padding='valid',name='reduction_b_3_3') reduction_b_pool = max_pool_2d(reduction_input_b,kernel_size=3,strides=2,padding='valid') # merge the reduction_b reduction_b_output = merge([reduction_b_1_3,reduction_b_3_3,reduction_b_pool],mode='concat',axis=3) return reduction_b_output ################################################################################ # InceptionV4 : defintion of inception_block_c
Example #2
Source File: inceptionVxOnFire.py From fire-detection-cnn with MIT License | 6 votes |
def reduction_block_a(reduction_input_a): reduction_a_conv1_1_1 = conv_2d(reduction_input_a,384,3,strides=2,padding='valid',activation='relu',name='reduction_a_conv1_1_1') reduction_a_conv2_1_1 = conv_2d(reduction_input_a,192,1,activation='relu',name='reduction_a_conv2_1_1') reduction_a_conv2_3_3 = conv_2d(reduction_a_conv2_1_1,224,3,activation='relu',name='reduction_a_conv2_3_3') reduction_a_conv2_3_3_s2 = conv_2d(reduction_a_conv2_3_3,256,3,strides=2,padding='valid',activation='relu',name='reduction_a_conv2_3_3_s2') reduction_a_pool = max_pool_2d(reduction_input_a,strides=2,padding='valid',kernel_size=3,name='reduction_a_pool') # merge reduction_a reduction_a = merge([reduction_a_conv1_1_1,reduction_a_conv2_3_3_s2,reduction_a_pool],mode='concat',axis=3) return reduction_a ################################################################################ # InceptionV4 : definition of inception_block_b
Example #3
Source File: em_model.py From Emotion-recognition-and-prediction with Apache License 2.0 | 6 votes |
def build_network(self): print("---> Starting Neural Network") self.network = input_data(shape = [None, 48, 48, 1]) self.network = conv_2d(self.network, 64, 5, activation = 'relu') self.network = max_pool_2d(self.network, 3, strides = 2) self.network = conv_2d(self.network, 64, 5, activation = 'relu') self.network = max_pool_2d(self.network, 3, strides = 2) self.network = conv_2d(self.network, 128, 4, activation = 'relu') self.network = dropout(self.network, 0.3) self.network = fully_connected(self.network, 3072, activation = 'relu') self.network = fully_connected(self.network, len(self.target_classes), activation = 'softmax') self.network = regression(self.network, optimizer = 'momentum', loss = 'categorical_crossentropy') self.model = tflearn.DNN( self.network, checkpoint_path = 'model_1_nimish', max_checkpoints = 1, tensorboard_verbose = 2 ) self.load_model()
Example #4
Source File: emotion_recognition.py From emotion-recognition-neural-networks with MIT License | 5 votes |
def build_network(self): # Smaller 'AlexNet' # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py print('[+] Building CNN') self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, 1]) self.network = conv_2d(self.network, 64, 5, activation='relu') #self.network = local_response_normalization(self.network) self.network = max_pool_2d(self.network, 3, strides=2) self.network = conv_2d(self.network, 64, 5, activation='relu') self.network = max_pool_2d(self.network, 3, strides=2) self.network = conv_2d(self.network, 128, 4, activation='relu') self.network = dropout(self.network, 0.3) self.network = fully_connected(self.network, 3072, activation='relu') self.network = fully_connected( self.network, len(EMOTIONS), activation='softmax') self.network = regression( self.network, optimizer='momentum', loss='categorical_crossentropy' ) self.model = tflearn.DNN( self.network, checkpoint_path=SAVE_DIRECTORY + '/emotion_recognition', max_checkpoints=1, tensorboard_verbose=2 ) self.load_model()
Example #5
Source File: single_layer_network.py From DeepOSM with MIT License | 5 votes |
def model_for_type(neural_net_type, tile_size, on_band_count): """The neural_net_type can be: one_layer_relu, one_layer_relu_conv, two_layer_relu_conv.""" network = tflearn.input_data(shape=[None, tile_size, tile_size, on_band_count]) # NN architectures mirror ch. 3 of www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf if neural_net_type == 'one_layer_relu': network = tflearn.fully_connected(network, 64, activation='relu') elif neural_net_type == 'one_layer_relu_conv': network = conv_2d(network, 64, 12, strides=4, activation='relu') network = max_pool_2d(network, 3) elif neural_net_type == 'two_layer_relu_conv': network = conv_2d(network, 64, 12, strides=4, activation='relu') network = max_pool_2d(network, 3) network = conv_2d(network, 128, 4, activation='relu') else: print("ERROR: exiting, unknown layer type for neural net") # classify as road or not road softmax = tflearn.fully_connected(network, 2, activation='softmax') # hyperparameters based on www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf momentum = tflearn.optimizers.Momentum( learning_rate=.005, momentum=0.9, lr_decay=0.0002, name='Momentum') net = tflearn.regression(softmax, optimizer=momentum, loss='categorical_crossentropy') return tflearn.DNN(net, tensorboard_verbose=0)
Example #6
Source File: test_validation_monitors.py From FRU with MIT License | 5 votes |
def test_vbs1(self): with tf.Graph().as_default(): # Data loading and preprocessing import tflearn.datasets.mnist as mnist X, Y, testX, testY = mnist.load_data(one_hot=True) X = X.reshape([-1, 28, 28, 1]) testX = testX.reshape([-1, 28, 28, 1]) X = X[:20, :, :, :] Y = Y[:20, :] testX = testX[:10, :, :, :] testY = testY[:10, :] # Building convolutional network network = input_data(shape=[None, 28, 28, 1], name='input') network = conv_2d(network, 32, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 64, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 10, activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='target') # Training model = tflearn.DNN(network, tensorboard_verbose=3) model.fit({'input': X}, {'target': Y}, n_epoch=1, batch_size=10, validation_set=({'input': testX}, {'target': testY}), validation_batch_size=5, snapshot_step=10, show_metric=True, run_id='convnet_mnist_vbs') self.assertEqual(model.train_ops[0].validation_batch_size, 5) self.assertEqual(model.train_ops[0].batch_size, 10)
Example #7
Source File: weights_loading_scope.py From FRU with MIT License | 5 votes |
def make_core_network(network): network = tflearn.reshape(network, [-1, 28, 28, 1], name="reshape") network = conv_2d(network, 32, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 64, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 10, activation='softmax') return network
Example #8
Source File: alexnet.py From pygta5 with GNU General Public License v3.0 | 5 votes |
def alexnet(width, height, lr): network = input_data(shape=[None, width, height, 1], name='input') network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 3, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir='log') return model
Example #9
Source File: alexnet.py From pygta5 with GNU General Public License v3.0 | 5 votes |
def alexnet(width, height, lr): network = input_data(shape=[None, width, height, 1], name='input') network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 3, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
Example #10
Source File: models.py From pygta5 with GNU General Public License v3.0 | 5 votes |
def alexnet(width, height, lr, output=3): network = input_data(shape=[None, width, height, 1], name='input') network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
Example #11
Source File: models.py From pygta5 with GNU General Public License v3.0 | 5 votes |
def alexnet2(width, height, lr, output=3): network = input_data(shape=[None, width, height, 1], name='input') network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
Example #12
Source File: models.py From pygta5 with GNU General Public License v3.0 | 5 votes |
def alexnet(width, height, lr, output=3): network = input_data(shape=[None, width, height, 1], name='input') network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
Example #13
Source File: models.py From pygta5 with GNU General Public License v3.0 | 5 votes |
def alexnet2(width, height, lr, output=3): network = input_data(shape=[None, width, height, 1], name='input') network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
Example #14
Source File: models.py From pygta5 with GNU General Public License v3.0 | 5 votes |
def sentnet_color_2d(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'): network = input_data(shape=[None, width, height, 3], name='input') network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log') return model
Example #15
Source File: models.py From pygta5 with GNU General Public License v3.0 | 5 votes |
def alexnet(width, height, lr, output=3): network = input_data(shape=[None, width, height, 1], name='input') network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
Example #16
Source File: models.py From pygta5 with GNU General Public License v3.0 | 5 votes |
def alexnet2(width, height, lr, output=3): network = input_data(shape=[None, width, height, 1], name='input') network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
Example #17
Source File: models.py From pygta5 with GNU General Public License v3.0 | 5 votes |
def alexnet(width, height, lr, output=3): network = input_data(shape=[None, width, height, 1], name='input') network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
Example #18
Source File: models.py From pygta5 with GNU General Public License v3.0 | 5 votes |
def alexnet2(width, height, lr, output=3): network = input_data(shape=[None, width, height, 1], name='input') network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
Example #19
Source File: models.py From pygta5 with GNU General Public License v3.0 | 5 votes |
def sentnet_color_2d(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'): network = input_data(shape=[None, width, height, 3], name='input') network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log') return model
Example #20
Source File: alexnet.py From pygta5 with GNU General Public License v3.0 | 5 votes |
def alexnet2(width, height, lr, output=3): network = input_data(shape=[None, width, height, 1], name='input') network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir='log') return model
Example #21
Source File: alexnet.py From pygta5 with GNU General Public License v3.0 | 5 votes |
def alexnet(width, height, lr, output=3): network = input_data(shape=[None, width, height, 1], name='input') network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir='log') return model
Example #22
Source File: model.py From facial-expression-recognition-using-cnn with GNU General Public License v3.0 | 4 votes |
def build_modelB(optimizer=HYPERPARAMS.optimizer, optimizer_param=HYPERPARAMS.optimizer_param, learning_rate=HYPERPARAMS.learning_rate, keep_prob=HYPERPARAMS.keep_prob, learning_rate_decay=HYPERPARAMS.learning_rate_decay, decay_step=HYPERPARAMS.decay_step): images_network = input_data(shape=[None, NETWORK.input_size, NETWORK.input_size, 1], name='input1') images_network = conv_2d(images_network, 64, 3, activation=NETWORK.activation) #images_network = local_response_normalization(images_network) if NETWORK.use_batchnorm_after_conv_layers: images_network = batch_normalization(images_network) images_network = max_pool_2d(images_network, 3, strides = 2) images_network = conv_2d(images_network, 128, 3, activation=NETWORK.activation) if NETWORK.use_batchnorm_after_conv_layers: images_network = batch_normalization(images_network) images_network = max_pool_2d(images_network, 3, strides = 2) images_network = conv_2d(images_network, 256, 3, activation=NETWORK.activation) if NETWORK.use_batchnorm_after_conv_layers: images_network = batch_normalization(images_network) images_network = max_pool_2d(images_network, 3, strides = 2) images_network = dropout(images_network, keep_prob=keep_prob) images_network = fully_connected(images_network, 4096, activation=NETWORK.activation) images_network = dropout(images_network, keep_prob=keep_prob) images_network = fully_connected(images_network, 1024, activation=NETWORK.activation) if NETWORK.use_batchnorm_after_fully_connected_layers: images_network = batch_normalization(images_network) if NETWORK.use_landmarks or NETWORK.use_hog_and_landmarks: if NETWORK.use_hog_sliding_window_and_landmarks: landmarks_network = input_data(shape=[None, 2728], name='input2') elif NETWORK.use_hog_and_landmarks: landmarks_network = input_data(shape=[None, 208], name='input2') else: landmarks_network = input_data(shape=[None, 68, 2], name='input2') landmarks_network = fully_connected(landmarks_network, 1024, activation=NETWORK.activation) if NETWORK.use_batchnorm_after_fully_connected_layers: landmarks_network = batch_normalization(landmarks_network) landmarks_network = fully_connected(landmarks_network, 128, activation=NETWORK.activation) if NETWORK.use_batchnorm_after_fully_connected_layers: landmarks_network = batch_normalization(landmarks_network) images_network = fully_connected(images_network, 128, activation=NETWORK.activation) network = merge([images_network, landmarks_network], 'concat', axis=1) else: network = images_network network = fully_connected(network, NETWORK.output_size, activation='softmax') if optimizer == 'momentum': optimizer = Momentum(learning_rate=learning_rate, momentum=optimizer_param, lr_decay=learning_rate_decay, decay_step=decay_step) elif optimizer == 'adam': optimizer = Adam(learning_rate=learning_rate, beta1=optimizer_param, beta2=learning_rate_decay) else: print( "Unknown optimizer: {}".format(optimizer)) network = regression(network, optimizer=optimizer, loss=NETWORK.loss, learning_rate=learning_rate, name='output') return network
Example #23
Source File: model.py From facial-expression-recognition-using-cnn with GNU General Public License v3.0 | 4 votes |
def build_modelA(optimizer=HYPERPARAMS.optimizer, optimizer_param=HYPERPARAMS.optimizer_param, learning_rate=HYPERPARAMS.learning_rate, keep_prob=HYPERPARAMS.keep_prob, learning_rate_decay=HYPERPARAMS.learning_rate_decay, decay_step=HYPERPARAMS.decay_step): images_network = input_data(shape=[None, NETWORK.input_size, NETWORK.input_size, 1], name='input1') images_network = conv_2d(images_network, 64, 5, activation=NETWORK.activation) #images_network = local_response_normalization(images_network) if NETWORK.use_batchnorm_after_conv_layers: images_network = batch_normalization(images_network) images_network = max_pool_2d(images_network, 3, strides = 2) images_network = conv_2d(images_network, 64, 5, activation=NETWORK.activation) if NETWORK.use_batchnorm_after_conv_layers: images_network = batch_normalization(images_network) images_network = max_pool_2d(images_network, 3, strides = 2) images_network = conv_2d(images_network, 128, 4, activation=NETWORK.activation) if NETWORK.use_batchnorm_after_conv_layers: images_network = batch_normalization(images_network) images_network = dropout(images_network, keep_prob=keep_prob) images_network = fully_connected(images_network, 1024, activation=NETWORK.activation) if NETWORK.use_batchnorm_after_fully_connected_layers: images_network = batch_normalization(images_network) if NETWORK.use_landmarks or NETWORK.use_hog_and_landmarks: if NETWORK.use_hog_sliding_window_and_landmarks: landmarks_network = input_data(shape=[None, 2728], name='input2') elif NETWORK.use_hog_and_landmarks: landmarks_network = input_data(shape=[None, 208], name='input2') else: landmarks_network = input_data(shape=[None, 68, 2], name='input2') landmarks_network = fully_connected(landmarks_network, 1024, activation=NETWORK.activation) if NETWORK.use_batchnorm_after_fully_connected_layers: landmarks_network = batch_normalization(landmarks_network) landmarks_network = fully_connected(landmarks_network, 40, activation=NETWORK.activation) if NETWORK.use_batchnorm_after_fully_connected_layers: landmarks_network = batch_normalization(landmarks_network) images_network = fully_connected(images_network, 40, activation=NETWORK.activation) network = merge([images_network, landmarks_network], 'concat', axis=1) else: network = images_network network = fully_connected(network, NETWORK.output_size, activation='softmax') if optimizer == 'momentum': optimizer = Momentum(learning_rate=learning_rate, momentum=optimizer_param, lr_decay=learning_rate_decay, decay_step=decay_step) elif optimizer == 'adam': optimizer = Adam(learning_rate=learning_rate, beta1=optimizer_param, beta2=learning_rate_decay) else: print( "Unknown optimizer: {}".format(optimizer)) network = regression(network, optimizer=optimizer, loss=NETWORK.loss, learning_rate=learning_rate, name='output') return network
Example #24
Source File: inceptionVxOnFire.py From fire-detection-cnn with MIT License | 4 votes |
def construct_inceptionv4onfire(x,y, training=True, enable_batch_norm=True): network = input_data(shape=[None, y, x, 3]) #stem of inceptionV4 conv1_3_3 = conv_2d(network,32,3,strides=2,activation='relu',name='conv1_3_3_s2',padding='valid') conv2_3_3 = conv_2d(conv1_3_3,32,3,activation='relu',name='conv2_3_3') conv3_3_3 = conv_2d(conv2_3_3,64,3,activation='relu',name='conv3_3_3') b_conv_1_pool = max_pool_2d(conv3_3_3,kernel_size=3,strides=2,padding='valid',name='b_conv_1_pool') if enable_batch_norm: b_conv_1_pool = batch_normalization(b_conv_1_pool) b_conv_1_conv = conv_2d(conv3_3_3,96,3,strides=2,padding='valid',activation='relu',name='b_conv_1_conv') b_conv_1 = merge([b_conv_1_conv,b_conv_1_pool],mode='concat',axis=3) b_conv4_1_1 = conv_2d(b_conv_1,64,1,activation='relu',name='conv4_3_3') b_conv4_3_3 = conv_2d(b_conv4_1_1,96,3,padding='valid',activation='relu',name='conv5_3_3') b_conv4_1_1_reduce = conv_2d(b_conv_1,64,1,activation='relu',name='b_conv4_1_1_reduce') b_conv4_1_7 = conv_2d(b_conv4_1_1_reduce,64,[1,7],activation='relu',name='b_conv4_1_7') b_conv4_7_1 = conv_2d(b_conv4_1_7,64,[7,1],activation='relu',name='b_conv4_7_1') b_conv4_3_3_v = conv_2d(b_conv4_7_1,96,3,padding='valid',name='b_conv4_3_3_v') b_conv_4 = merge([b_conv4_3_3_v, b_conv4_3_3],mode='concat',axis=3) b_conv5_3_3 = conv_2d(b_conv_4,192,3,padding='valid',activation='relu',name='b_conv5_3_3',strides=2) b_pool5_3_3 = max_pool_2d(b_conv_4,kernel_size=3,padding='valid',strides=2,name='b_pool5_3_3') if enable_batch_norm: b_pool5_3_3 = batch_normalization(b_pool5_3_3) b_conv_5 = merge([b_conv5_3_3,b_pool5_3_3],mode='concat',axis=3) net = b_conv_5 # inceptionV4 modules net=inception_block_a(net) net=inception_block_b(net) net=inception_block_c(net) pool5_7_7=global_avg_pool(net) if(training): pool5_7_7=dropout(pool5_7_7,0.4) loss = fully_connected(pool5_7_7, 2,activation='softmax') if(training): network = regression(loss, optimizer='rmsprop', loss='categorical_crossentropy', learning_rate=0.001) else: network=loss model = tflearn.DNN(network, checkpoint_path='inceptionv4onfire', max_checkpoints=1, tensorboard_verbose=0) return model ################################################################################