Python lasagne.layers.DropoutLayer() Examples
The following are 25
code examples of lasagne.layers.DropoutLayer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
lasagne.layers
, or try the search function
.
Example #1
Source File: nn_adagrad_log.py From kaggle_otto with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_model(self, input_dim): l_in = InputLayer(shape=(self.batch_size, input_dim)) l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify) l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout) l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden / 2, nonlinearity=rectify) l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout) l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden / 4, nonlinearity=rectify) l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout) l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax) return l_out
Example #2
Source File: bidnn.py From BiDNN with GNU Affero General Public License v3.0 | 5 votes |
def __create_toplogy__(self, input_var_first=None, input_var_second=None): # define network topology if (self.conf.rep % 2 != 0): raise ValueError("Representation size should be divisible by two as it's formed by combining two crossmodal translations", self.conf.rep) # input layers l_in_first = InputLayer(shape=(self.conf.batch_size, self.conf.mod1size), input_var=input_var_first) l_in_second = InputLayer(shape=(self.conf.batch_size, self.conf.mod2size), input_var=input_var_second) # first -> second l_hidden1_first = DenseLayer(l_in_first, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=GlorotUniform()) # enc1 l_hidden2_first = DenseLayer(l_hidden1_first, num_units=self.conf.rep//2, nonlinearity=self.conf.act, W=GlorotUniform()) # enc2 l_hidden2_first_d = DropoutLayer(l_hidden2_first, p=self.conf.dropout) l_hidden3_first = DenseLayer(l_hidden2_first_d, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=GlorotUniform()) # dec1 l_out_first = DenseLayer(l_hidden3_first, num_units=self.conf.mod2size, nonlinearity=self.conf.act, W=GlorotUniform()) # dec2 if self.conf.untied: # FREE l_hidden1_second = DenseLayer(l_in_second, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=GlorotUniform()) # enc1 l_hidden2_second = DenseLayer(l_hidden1_second, num_units=self.conf.rep//2, nonlinearity=self.conf.act, W=GlorotUniform()) # enc2 l_hidden2_second_d = DropoutLayer(l_hidden2_second, p=self.conf.dropout) l_hidden3_second = DenseLayer(l_hidden2_second_d, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=GlorotUniform()) # dec1 l_out_second = DenseLayer(l_hidden3_second, num_units=self.conf.mod1size, nonlinearity=self.conf.act, W=GlorotUniform()) # dec2 else: # TIED middle l_hidden1_second = DenseLayer(l_in_second, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=GlorotUniform()) # enc1 l_hidden2_second = DenseLayer(l_hidden1_second, num_units=self.conf.rep//2, nonlinearity=self.conf.act, W=l_hidden3_first.W.T) # enc2 l_hidden2_second_d = DropoutLayer(l_hidden2_second, p=self.conf.dropout) l_hidden3_second = DenseLayer(l_hidden2_second_d, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=l_hidden2_first.W.T) # dec1 l_out_second = DenseLayer(l_hidden3_second, num_units=self.conf.mod1size, nonlinearity=self.conf.act, W=GlorotUniform()) # dec2 l_out = concat([l_out_first, l_out_second]) return l_out, l_hidden2_first, l_hidden2_second
Example #3
Source File: benchmark_lasagne.py From vgg-benchmarks with MIT License | 5 votes |
def build_model(input_var): net = {} net['input'] = InputLayer((None, 3, 224, 224), input_var=input_var) net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1, flip_filters=False) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1, flip_filters=False) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1, flip_filters=False) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1, flip_filters=False) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1, flip_filters=False) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1, flip_filters=False) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1, flip_filters=False) net['pool3'] = PoolLayer(net['conv3_3'], 2) net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1, flip_filters=False) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1, flip_filters=False) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1, flip_filters=False) net['pool4'] = PoolLayer(net['conv4_3'], 2) net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1, flip_filters=False) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1, flip_filters=False) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1, flip_filters=False) net['pool5'] = PoolLayer(net['conv5_3'], 2) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5) net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=4096) net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5) net['fc8'] = DenseLayer(net['fc7_dropout'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) return net
Example #4
Source File: nn_adagrad.py From kaggle_otto with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_model(self, input_dim): l_in = InputLayer(shape=(self.batch_size, input_dim)) l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify) l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout) l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden, nonlinearity=rectify) l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout) l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden, nonlinearity=rectify) l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout) l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax) return l_out
Example #5
Source File: bagging_nn_rmsprop.py From kaggle_otto with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_model(self, input_dim): l_in = InputLayer(shape=(self.batch_size, input_dim)) l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify) l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout) l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden, nonlinearity=rectify) l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout) l_out = DenseLayer(l_hidden2_dropout, num_units=self.n_classes_, nonlinearity=softmax) return l_out
Example #6
Source File: bagging_nn_nesterov.py From kaggle_otto with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_model(self, input_dim): l_in = InputLayer(shape=(self.batch_size, input_dim)) l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify) l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout) l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden, nonlinearity=rectify) l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout) l_out = DenseLayer(l_hidden2_dropout, num_units=self.n_classes_, nonlinearity=softmax) return l_out
Example #7
Source File: nn_rmsprop_features.py From kaggle_otto with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_model(self, input_dim): l_in = InputLayer(shape=(self.batch_size, input_dim)) l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden / 2, nonlinearity=rectify) l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout) l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden, nonlinearity=rectify) l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout) l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden / 2, nonlinearity=rectify) l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout) l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax) return l_out
Example #8
Source File: nn_adagrad_pca.py From kaggle_otto with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_model(self, input_dim): l_in = InputLayer(shape=(self.batch_size, input_dim)) l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify) l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout) l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden, nonlinearity=rectify) l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout) l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden, nonlinearity=rectify) l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout) l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax) return l_out
Example #9
Source File: nn_adagrad_pca.py From kaggle_otto with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_model(self, input_dim): l_in = InputLayer(shape=(self.batch_size, input_dim)) l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify) l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout) l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden / 2, nonlinearity=rectify) l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout) l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden, nonlinearity=rectify) l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout) l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax) return l_out
Example #10
Source File: model.py From BirdNET with MIT License | 5 votes |
def classificationBranch(net, kernel_size): # Post Convolution branch = l.batch_norm(l.Conv2DLayer(net, num_filters=int(FILTERS[-1] * RESNET_K), filter_size=kernel_size, nonlinearity=nl.rectify)) #log.p(("\t\tPOST CONV SHAPE:", l.get_output_shape(branch), "LAYER:", len(l.get_all_layers(branch)) - 1)) # Dropout Layer branch = l.DropoutLayer(branch) # Dense Convolution branch = l.batch_norm(l.Conv2DLayer(branch, num_filters=int(FILTERS[-1] * RESNET_K * 2), filter_size=1, nonlinearity=nl.rectify)) #log.p(("\t\tDENSE CONV SHAPE:", l.get_output_shape(branch), "LAYER:", len(l.get_all_layers(branch)) - 1)) # Dropout Layer branch = l.DropoutLayer(branch) # Class Convolution branch = l.Conv2DLayer(branch, num_filters=len(cfg.CLASSES), filter_size=1, nonlinearity=None) return branch
Example #11
Source File: Deopen_regression.py From Deopen with MIT License | 5 votes |
def create_network(): l = 1000 pool_size = 5 test_size1 = 13 test_size2 = 7 test_size3 = 5 kernel1 = 128 kernel2 = 128 kernel3 = 128 layer1 = InputLayer(shape=(None, 1, 4, l+1024)) layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1) layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1) layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2) layer2_f = FlattenLayer(layer2_3) layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1)) layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1)) layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1)) layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size)) layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2)) layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2)) layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2)) layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size)) layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3)) layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3)) layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3)) layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size)) layer14_d = DenseLayer(layer14, num_units= 256) layer3_2 = DenseLayer(layer2_f, num_units = 128) layer15 = ConcatLayer([layer14_d,layer3_2]) #layer16 = DropoutLayer(layer15,p=0.5) layer17 = DenseLayer(layer15, num_units=256) network = DenseLayer(layer17, num_units= 1, nonlinearity=None) return network #random search to initialize the weights
Example #12
Source File: Deopen_classification.py From Deopen with MIT License | 5 votes |
def create_network(): l = 1000 pool_size = 5 test_size1 = 13 test_size2 = 7 test_size3 = 5 kernel1 = 128 kernel2 = 128 kernel3 = 128 layer1 = InputLayer(shape=(None, 1, 4, l+1024)) layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1) layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1) layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2) layer2_f = FlattenLayer(layer2_3) layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1)) layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1)) layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1)) layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size)) layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2)) layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2)) layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2)) layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size)) layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3)) layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3)) layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3)) layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size)) layer14_d = DenseLayer(layer14, num_units= 256) layer3_2 = DenseLayer(layer2_f, num_units = 128) layer15 = ConcatLayer([layer14_d,layer3_2]) layer16 = DropoutLayer(layer15,p=0.5) layer17 = DenseLayer(layer16, num_units=256) network = DenseLayer(layer17, num_units= 2, nonlinearity=softmax) return network #random search to initialize the weights
Example #13
Source File: lasagne_net.py From BirdCLEF-Baseline with MIT License | 4 votes |
def build_pi_model(): log.i('BUILDING RASBPERRY PI MODEL...') # Random Seed lasagne_random.set_rng(cfg.getRandomState()) # Input layer for images net = l.InputLayer((None, cfg.IM_DIM, cfg.IM_SIZE[1], cfg.IM_SIZE[0])) # Convolutinal layer groups for i in range(len(cfg.FILTERS)): # 3x3 Convolution + Stride net = batch_norm(l.Conv2DLayer(net, num_filters=cfg.FILTERS[i], filter_size=cfg.KERNEL_SIZES[i], num_groups=cfg.NUM_OF_GROUPS[i], pad='same', stride=2, W=initialization(cfg.NONLINEARITY), nonlinearity=nonlinearity(cfg.NONLINEARITY))) log.i(('\tGROUP', i + 1, 'OUT SHAPE:', l.get_output_shape(net))) # Fully connected layers + dropout layers net = l.DenseLayer(net, cfg.DENSE_UNITS, nonlinearity=nonlinearity(cfg.NONLINEARITY), W=initialization(cfg.NONLINEARITY)) net = l.DropoutLayer(net, p=cfg.DROPOUT) net = l.DenseLayer(net, cfg.DENSE_UNITS, nonlinearity=nonlinearity(cfg.NONLINEARITY), W=initialization(cfg.NONLINEARITY)) net = l.DropoutLayer(net, p=cfg.DROPOUT) # Classification Layer (Softmax) net = l.DenseLayer(net, len(cfg.CLASSES), nonlinearity=nonlinearity('softmax'), W=initialization('softmax')) log.i(("\tFINAL NET OUT SHAPE:", l.get_output_shape(net))) log.i("...DONE!") # Model stats log.i(("MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS")) log.i(("MODEL HAS", l.count_params(net), "PARAMS")) return net ################## BUILDING THE MODEL ###################
Example #14
Source File: res_net_blocks.py From dcase_task2 with MIT License | 4 votes |
def residual_wide_block(l, increase_dim=False, projection=True, first=False, filters=16): """ Create a residual learning building block with two stacked 3x3 conv layers as in paper """ if increase_dim: first_stride = (2, 2) else: first_stride = (1, 1) if first: # hacky solution to keep layers correct bn_pre_relu = l else: # contains the BN -> ReLU portion, steps 1 to 2 bn_pre_conv = BatchNormLayer(l) bn_pre_relu = NonlinearityLayer(bn_pre_conv, rectify) # contains the weight -> BN -> ReLU portion, steps 3 to 5 conv_1 = batch_norm( ConvLayer(bn_pre_relu, num_filters=filters, filter_size=(3, 3), stride=first_stride, nonlinearity=rectify, pad='same', W=he_norm)) dropout = DropoutLayer(conv_1, p=0.3) # contains the last weight portion, step 6 conv_2 = ConvLayer(dropout, num_filters=filters, filter_size=(3, 3), stride=(1, 1), nonlinearity=None, pad='same', W=he_norm) # add shortcut connections if increase_dim: # projection shortcut, as option B in paper projection = ConvLayer(l, num_filters=filters, filter_size=(1, 1), stride=(2, 2), nonlinearity=None, pad='same', b=None) block = ElemwiseSumLayer([conv_2, projection]) elif first: # projection shortcut, as option B in paper projection = ConvLayer(l, num_filters=filters, filter_size=(1, 1), stride=(1, 1), nonlinearity=None, pad='same', b=None) block = ElemwiseSumLayer([conv_2, projection]) else: block = ElemwiseSumLayer([conv_2, l]) return block
Example #15
Source File: cifar10_nin.py From Recipes with MIT License | 4 votes |
def build_model(): net = {} net['input'] = InputLayer((None, 3, 32, 32)) net['conv1'] = ConvLayer(net['input'], num_filters=192, filter_size=5, pad=2, flip_filters=False) net['cccp1'] = ConvLayer( net['conv1'], num_filters=160, filter_size=1, flip_filters=False) net['cccp2'] = ConvLayer( net['cccp1'], num_filters=96, filter_size=1, flip_filters=False) net['pool1'] = PoolLayer(net['cccp2'], pool_size=3, stride=2, mode='max', ignore_border=False) net['drop3'] = DropoutLayer(net['pool1'], p=0.5) net['conv2'] = ConvLayer(net['drop3'], num_filters=192, filter_size=5, pad=2, flip_filters=False) net['cccp3'] = ConvLayer( net['conv2'], num_filters=192, filter_size=1, flip_filters=False) net['cccp4'] = ConvLayer( net['cccp3'], num_filters=192, filter_size=1, flip_filters=False) net['pool2'] = PoolLayer(net['cccp4'], pool_size=3, stride=2, mode='average_exc_pad', ignore_border=False) net['drop6'] = DropoutLayer(net['pool2'], p=0.5) net['conv3'] = ConvLayer(net['drop6'], num_filters=192, filter_size=3, pad=1, flip_filters=False) net['cccp5'] = ConvLayer( net['conv3'], num_filters=192, filter_size=1, flip_filters=False) net['cccp6'] = ConvLayer( net['cccp5'], num_filters=10, filter_size=1, flip_filters=False) net['pool3'] = PoolLayer(net['cccp6'], pool_size=8, mode='average_exc_pad', ignore_border=False) net['output'] = FlattenLayer(net['pool3']) return net
Example #16
Source File: AED_train.py From AcousticEventDetection with MIT License | 4 votes |
def buildModel(): print "BUILDING MODEL TYPE..." #default settings filters = 64 first_stride = 2 last_filter_multiplier = 16 #input layer net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0])) #conv layers net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net) #dense layers net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.DropoutLayer(net, DROPOUT) net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.DropoutLayer(net, DROPOUT) #Classification Layer if MULTI_LABEL: net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1)) else: net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1)) print "...DONE!" #model stats print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS" print "MODEL HAS", l.count_params(net), "PARAMS" return net
Example #17
Source File: network.py From cnn_workshop with Apache License 2.0 | 4 votes |
def get_net(): return NeuralNet( layers=[ ('input', layers.InputLayer), ('conv1', Conv2DLayer), ('pool1', MaxPool2DLayer), ('dropout1', layers.DropoutLayer), ('conv2', Conv2DLayer), ('pool2', MaxPool2DLayer), ('dropout2', layers.DropoutLayer), ('conv3', Conv2DLayer), ('pool3', MaxPool2DLayer), ('dropout3', layers.DropoutLayer), ('hidden4', layers.DenseLayer), ('dropout4', layers.DropoutLayer), ('hidden5', layers.DenseLayer), ('output', layers.DenseLayer), ], input_shape=(None, 1, 96, 96), conv1_num_filters=32, conv1_filter_size=(3, 3), pool1_pool_size=(2, 2), dropout1_p=0.1, conv2_num_filters=64, conv2_filter_size=(2, 2), pool2_pool_size=(2, 2), dropout2_p=0.2, conv3_num_filters=128, conv3_filter_size=(2, 2), pool3_pool_size=(2, 2), dropout3_p=0.3, hidden4_num_units=1000, dropout4_p=0.5, hidden5_num_units=1000, output_num_units=30, output_nonlinearity=None, update_learning_rate=theano.shared(float32(0.03)), update_momentum=theano.shared(float32(0.9)), regression=True, batch_iterator_train=FlipBatchIterator(batch_size=128), on_epoch_finished=[ AdjustVariable('update_learning_rate', start=0.03, stop=0.0001), AdjustVariable('update_momentum', start=0.9, stop=0.999), EarlyStopping(patience=200), ], max_epochs=3000, verbose=1, )
Example #18
Source File: vgg19.py From Recipes with MIT License | 4 votes |
def build_model(): net = {} net['input'] = InputLayer((None, 3, 224, 224)) net['conv1_1'] = ConvLayer( net['input'], 64, 3, pad=1, flip_filters=False) net['conv1_2'] = ConvLayer( net['conv1_1'], 64, 3, pad=1, flip_filters=False) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer( net['pool1'], 128, 3, pad=1, flip_filters=False) net['conv2_2'] = ConvLayer( net['conv2_1'], 128, 3, pad=1, flip_filters=False) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer( net['pool2'], 256, 3, pad=1, flip_filters=False) net['conv3_2'] = ConvLayer( net['conv3_1'], 256, 3, pad=1, flip_filters=False) net['conv3_3'] = ConvLayer( net['conv3_2'], 256, 3, pad=1, flip_filters=False) net['conv3_4'] = ConvLayer( net['conv3_3'], 256, 3, pad=1, flip_filters=False) net['pool3'] = PoolLayer(net['conv3_4'], 2) net['conv4_1'] = ConvLayer( net['pool3'], 512, 3, pad=1, flip_filters=False) net['conv4_2'] = ConvLayer( net['conv4_1'], 512, 3, pad=1, flip_filters=False) net['conv4_3'] = ConvLayer( net['conv4_2'], 512, 3, pad=1, flip_filters=False) net['conv4_4'] = ConvLayer( net['conv4_3'], 512, 3, pad=1, flip_filters=False) net['pool4'] = PoolLayer(net['conv4_4'], 2) net['conv5_1'] = ConvLayer( net['pool4'], 512, 3, pad=1, flip_filters=False) net['conv5_2'] = ConvLayer( net['conv5_1'], 512, 3, pad=1, flip_filters=False) net['conv5_3'] = ConvLayer( net['conv5_2'], 512, 3, pad=1, flip_filters=False) net['conv5_4'] = ConvLayer( net['conv5_3'], 512, 3, pad=1, flip_filters=False) net['pool5'] = PoolLayer(net['conv5_4'], 2) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5) net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=4096) net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5) net['fc8'] = DenseLayer( net['fc7_dropout'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) return net
Example #19
Source File: vgg16_lymph.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_model(): net = {} net['input'] = InputLayer((None, 3, 224, 224)) net['conv1_1'] = ConvLayer( net['input'], 64, 3, pad=1, flip_filters=False) net['conv1_2'] = ConvLayer( net['conv1_1'], 64, 3, pad=1, flip_filters=False) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer( net['pool1'], 128, 3, pad=1, flip_filters=False) net['conv2_2'] = ConvLayer( net['conv2_1'], 128, 3, pad=1, flip_filters=False) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer( net['pool2'], 256, 3, pad=1, flip_filters=False) net['conv3_2'] = ConvLayer( net['conv3_1'], 256, 3, pad=1, flip_filters=False) net['conv3_3'] = ConvLayer( net['conv3_2'], 256, 3, pad=1, flip_filters=False) net['pool3'] = PoolLayer(net['conv3_3'], 2) net['conv4_1'] = ConvLayer( net['pool3'], 512, 3, pad=1, flip_filters=False) net['conv4_2'] = ConvLayer( net['conv4_1'], 512, 3, pad=1, flip_filters=False) net['conv4_3'] = ConvLayer( net['conv4_2'], 512, 3, pad=1, flip_filters=False) net['pool4'] = PoolLayer(net['conv4_3'], 2) net['conv5_1'] = ConvLayer( net['pool4'], 512, 3, pad=1, flip_filters=False) net['conv5_2'] = ConvLayer( net['conv5_1'], 512, 3, pad=1, flip_filters=False) net['conv5_3'] = ConvLayer( net['conv5_2'], 512, 3, pad=1, flip_filters=False) net['pool5'] = PoolLayer(net['conv5_3'], 2) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5) net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=4096) net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5) net['fc8'] = DenseLayer( net['fc7_dropout'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) output_layer = net['prob']; return output_layer
Example #20
Source File: vgg16.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_model(): net = {} input_var = theano.tensor.tensor4('input_var'); net['input'] = InputLayer((None, 3, 224, 224), input_var=input_var) net['conv1_1'] = ConvLayer( net['input'], 64, 3, pad=1, flip_filters=False) net['conv1_2'] = ConvLayer( net['conv1_1'], 64, 3, pad=1, flip_filters=False) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer( net['pool1'], 128, 3, pad=1, flip_filters=False) net['conv2_2'] = ConvLayer( net['conv2_1'], 128, 3, pad=1, flip_filters=False) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer( net['pool2'], 256, 3, pad=1, flip_filters=False) net['conv3_2'] = ConvLayer( net['conv3_1'], 256, 3, pad=1, flip_filters=False) net['conv3_3'] = ConvLayer( net['conv3_2'], 256, 3, pad=1, flip_filters=False) net['pool3'] = PoolLayer(net['conv3_3'], 2) net['conv4_1'] = ConvLayer( net['pool3'], 512, 3, pad=1, flip_filters=False) net['conv4_2'] = ConvLayer( net['conv4_1'], 512, 3, pad=1, flip_filters=False) net['conv4_3'] = ConvLayer( net['conv4_2'], 512, 3, pad=1, flip_filters=False) net['pool4'] = PoolLayer(net['conv4_3'], 2) net['conv5_1'] = ConvLayer( net['pool4'], 512, 3, pad=1, flip_filters=False) net['conv5_2'] = ConvLayer( net['conv5_1'], 512, 3, pad=1, flip_filters=False) net['conv5_3'] = ConvLayer( net['conv5_2'], 512, 3, pad=1, flip_filters=False) net['pool5'] = PoolLayer(net['conv5_3'], 2) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5) net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=4096) net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5) net['fc8'] = DenseLayer( net['fc7_dropout'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) output_layer = net['prob']; return net, output_layer, input_var;
Example #21
Source File: vgg16_full.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_model(): net = {} input_var = theano.tensor.tensor4('input_var'); net['input'] = InputLayer((None, 3, 224, 224), input_var=input_var) net['conv1_1'] = ConvLayer( net['input'], 64, 3, pad=1, flip_filters=False) net['conv1_2'] = ConvLayer( net['conv1_1'], 64, 3, pad=1, flip_filters=False) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer( net['pool1'], 128, 3, pad=1, flip_filters=False) net['conv2_2'] = ConvLayer( net['conv2_1'], 128, 3, pad=1, flip_filters=False) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer( net['pool2'], 256, 3, pad=1, flip_filters=False) net['conv3_2'] = ConvLayer( net['conv3_1'], 256, 3, pad=1, flip_filters=False) net['conv3_3'] = ConvLayer( net['conv3_2'], 256, 3, pad=1, flip_filters=False) net['pool3'] = PoolLayer(net['conv3_3'], 2) net['conv4_1'] = ConvLayer( net['pool3'], 512, 3, pad=1, flip_filters=False) net['conv4_2'] = ConvLayer( net['conv4_1'], 512, 3, pad=1, flip_filters=False) net['conv4_3'] = ConvLayer( net['conv4_2'], 512, 3, pad=1, flip_filters=False) net['pool4'] = PoolLayer(net['conv4_3'], 2) net['conv5_1'] = ConvLayer( net['pool4'], 512, 3, pad=1, flip_filters=False) net['conv5_2'] = ConvLayer( net['conv5_1'], 512, 3, pad=1, flip_filters=False) net['conv5_3'] = ConvLayer( net['conv5_2'], 512, 3, pad=1, flip_filters=False) net['pool5'] = PoolLayer(net['conv5_3'], 2) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5) net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=4096) net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5) net['fc8'] = DenseLayer( net['fc7_dropout'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) output_layer = net['prob']; return net, output_layer, input_var;
Example #22
Source File: vgg16.py From Recipes with MIT License | 4 votes |
def build_model(): net = {} net['input'] = InputLayer((None, 3, 224, 224)) net['conv1_1'] = ConvLayer( net['input'], 64, 3, pad=1, flip_filters=False) net['conv1_2'] = ConvLayer( net['conv1_1'], 64, 3, pad=1, flip_filters=False) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer( net['pool1'], 128, 3, pad=1, flip_filters=False) net['conv2_2'] = ConvLayer( net['conv2_1'], 128, 3, pad=1, flip_filters=False) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer( net['pool2'], 256, 3, pad=1, flip_filters=False) net['conv3_2'] = ConvLayer( net['conv3_1'], 256, 3, pad=1, flip_filters=False) net['conv3_3'] = ConvLayer( net['conv3_2'], 256, 3, pad=1, flip_filters=False) net['pool3'] = PoolLayer(net['conv3_3'], 2) net['conv4_1'] = ConvLayer( net['pool3'], 512, 3, pad=1, flip_filters=False) net['conv4_2'] = ConvLayer( net['conv4_1'], 512, 3, pad=1, flip_filters=False) net['conv4_3'] = ConvLayer( net['conv4_2'], 512, 3, pad=1, flip_filters=False) net['pool4'] = PoolLayer(net['conv4_3'], 2) net['conv5_1'] = ConvLayer( net['pool4'], 512, 3, pad=1, flip_filters=False) net['conv5_2'] = ConvLayer( net['conv5_1'], 512, 3, pad=1, flip_filters=False) net['conv5_3'] = ConvLayer( net['conv5_2'], 512, 3, pad=1, flip_filters=False) net['pool5'] = PoolLayer(net['conv5_3'], 2) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5) net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=4096) net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5) net['fc8'] = DenseLayer( net['fc7_dropout'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) return net
Example #23
Source File: birdCLEF_train.py From BirdCLEF2017 with MIT License | 4 votes |
def buildModel(mtype=1): print "BUILDING MODEL TYPE", mtype, "..." #default settings (Model 1) filters = 64 first_stride = 2 last_filter_multiplier = 16 #specific model type settings (see working notes for details) if mtype == 2: first_stride = 1 elif mtype == 3: filters = 32 last_filter_multiplier = 8 #input layer net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0])) #conv layers net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) if mtype == 2: net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net) #dense layers net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.DropoutLayer(net, DROPOUT) net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.DropoutLayer(net, DROPOUT) #Classification Layer if MULTI_LABEL: net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1)) else: net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1)) print "...DONE!" #model stats print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS" print "MODEL HAS", l.count_params(net), "PARAMS" return net
Example #24
Source File: neuralforest.py From ShallowNeuralDecisionForest with MIT License | 4 votes |
def __init__(self, n_inputs, n_outputs, regression, multiclass=False, depth=5, n_estimators=20, n_hidden=128, learning_rate=0.01, num_epochs=500, pi_iters=20, sgd_iters=10, batch_size=1000, momentum=0.0, dropout=0.0, loss=None, update=adagrad): """ Parameters ---------- n_inputs : number of input features n_outputs : number of classes to predict (1 for regression) for 2 class classification n_outputs should be 2, not 1 regression : True for regression, False for classification multiclass : not used depth : depth of each tree in the ensemble n_estimators : number of trees in the ensemble n_hidden : number of neurons in the hidden layer pi_iters : number of iterations for the iterative algorithm that updates pi sgd_iters : number of full iterations of sgd between two consequtive updates of pi loss : theano loss function. If None, squared error will be used for regression and cross entropy will be used for classification update : theano update function """ self._depth = depth self._n_estimators = n_estimators self._n_hidden = n_hidden self._n_outputs = n_outputs self._loss = loss self._regression = regression self._multiclass = multiclass self._learning_rate = learning_rate self._num_epochs = num_epochs self._pi_iters = pi_iters self._sgd_iters = sgd_iters self._batch_size = batch_size self._momentum = momentum self._update = update self.t_input = T.matrix('input') self.t_label = T.matrix('output') self._cached_trainable_params = None self._cached_params = None self._n_net_out = n_estimators * ((1 << depth) - 1) self.l_input = InputLayer((None, n_inputs)) self.l_dense1 = DenseLayer(self.l_input, self._n_hidden, nonlinearity=rectify) if dropout != 0: self.l_dense1 = DropoutLayer(self.l_dense1, p=dropout) if not __DEBUG_NO_FOREST__: self.l_dense2 = DenseLayer(self.l_dense1, self._n_net_out, nonlinearity=sigmoid) self.l_forest = NeuralForestLayer(self.l_dense2, self._depth, self._n_estimators, self._n_outputs, self._pi_iters) else: self.l_forest = DenseLayer(self.l_dense1, self._n_outputs, nonlinearity=softmax)
Example #25
Source File: vgg_cnn_s.py From Recipes with MIT License | 4 votes |
def build_model(): net = {} net['input'] = InputLayer((None, 3, 224, 224)) net['conv1'] = ConvLayer(net['input'], num_filters=96, filter_size=7, stride=2, flip_filters=False) # caffe has alpha = alpha * pool_size net['norm1'] = NormLayer(net['conv1'], alpha=0.0001) net['pool1'] = PoolLayer(net['norm1'], pool_size=3, stride=3, ignore_border=False) net['conv2'] = ConvLayer(net['pool1'], num_filters=256, filter_size=5, flip_filters=False) net['pool2'] = PoolLayer(net['conv2'], pool_size=2, stride=2, ignore_border=False) net['conv3'] = ConvLayer(net['pool2'], num_filters=512, filter_size=3, pad=1, flip_filters=False) net['conv4'] = ConvLayer(net['conv3'], num_filters=512, filter_size=3, pad=1, flip_filters=False) net['conv5'] = ConvLayer(net['conv4'], num_filters=512, filter_size=3, pad=1, flip_filters=False) net['pool5'] = PoolLayer(net['conv5'], pool_size=3, stride=3, ignore_border=False) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['drop6'] = DropoutLayer(net['fc6'], p=0.5) net['fc7'] = DenseLayer(net['drop6'], num_units=4096) net['drop7'] = DropoutLayer(net['fc7'], p=0.5) net['fc8'] = DenseLayer(net['drop7'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) return net