Python lasagne.layers.Conv2DLayer() Examples

The following are 30 code examples of lasagne.layers.Conv2DLayer(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module lasagne.layers , or try the search function .
Example #1
Source File: __init__.py    From aenet with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def build_model(self):
        '''
        Build Acoustic Event Net model
        :return:
        '''

        # A architecture 41 classes
        nonlin = lasagne.nonlinearities.rectify
        net = {}
        net['input'] = InputLayer((None, feat_shape[0], feat_shape[1], feat_shape[2]))  # channel, time. frequency
        # ----------- 1st layer group ---------------
        net['conv1a'] = ConvLayer(net['input'], num_filters=64, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
        net['conv1b'] = ConvLayer(net['conv1a'], num_filters=64, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
        net['pool1'] = MaxPool2DLayer(net['conv1b'], pool_size=(1, 2))  # (time, freq)
        # ----------- 2nd layer group ---------------
        net['conv2a'] = ConvLayer(net['pool1'], num_filters=128, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
        net['conv2b'] = ConvLayer(net['conv2a'], num_filters=128, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
        net['pool2'] = MaxPool2DLayer(net['conv2b'], pool_size=(2, 2))  # (time, freq)
        # ----------- fully connected layer group ---------------
        net['fc5'] = DenseLayer(net['pool2'], num_units=1024, nonlinearity=nonlin)
        net['fc6'] = DenseLayer(net['fc5'], num_units=1024, nonlinearity=nonlin)
        net['prob'] = DenseLayer(net['fc6'], num_units=41, nonlinearity=lasagne.nonlinearities.softmax)

        return net 
Example #2
Source File: models_uncond.py    From EvolutionaryGAN with MIT License 6 votes vote down vote up
def build_discriminator_32(image=None,ndf=128):
    lrelu = LeakyRectify(0.2)
    # input: images
    InputImg = InputLayer(shape=(None, 3, 32, 32), input_var=image)
    print ("Dis Img_input:", InputImg.output_shape)
    # Conv Layer
    dis1 = Conv2DLayer(InputImg, ndf, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu)
    print ("Dis conv1:", dis1.output_shape)
    # Conv Layer
    dis2 = batch_norm(Conv2DLayer(dis1, ndf*2, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
    print ("Dis conv2:", dis2.output_shape)
    # Conv Layer
    dis3 = batch_norm(Conv2DLayer(dis2, ndf*4, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
    print ("Dis conv3:", dis3.output_shape)
    # Conv Layer
    dis4 = DenseLayer(dis3, 1, W=Normal(0.02), nonlinearity=sigmoid)
    print ("Dis output:", dis4.output_shape)
    return dis4 
Example #3
Source File: models_uncond.py    From EvolutionaryGAN with MIT License 6 votes vote down vote up
def build_discriminator_64(image=None,ndf=128):
    lrelu = LeakyRectify(0.2)
    # input: images
    InputImg = InputLayer(shape=(None, 3, 64, 64), input_var=image)
    print ("Dis Img_input:", InputImg.output_shape)
    # Conv Layer
    dis1 = Conv2DLayer(InputImg, ndf, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu)
    print ("Dis conv1:", dis1.output_shape)
    # Conv Layer
    dis2 = batch_norm(Conv2DLayer(dis1, ndf*2, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
    print ("Dis conv2:", dis2.output_shape)
    # Conv Layer
    dis3 = batch_norm(Conv2DLayer(dis2, ndf*4, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
    print ("Dis conv3:", dis3.output_shape)
    # Conv Layer
    dis4 = batch_norm(Conv2DLayer(dis3, ndf*8, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
    print ("Dis conv3:", dis4.output_shape)
    # Conv Layer
    dis5 = DenseLayer(dis4, 1, W=Normal(0.02), nonlinearity=sigmoid)
    print ("Dis output:", dis5.output_shape)
    return dis5 
Example #4
Source File: models_uncond.py    From EvolutionaryGAN with MIT License 6 votes vote down vote up
def build_discriminator_128(image=None,ndf=128):
    lrelu = LeakyRectify(0.2)
    # input: images
    InputImg = InputLayer(shape=(None, 3, 128, 128), input_var=image)
    print ("Dis Img_input:", InputImg.output_shape)
    # Conv Layer
    dis1 = Conv2DLayer(InputImg, ndf, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu)
    print ("Dis conv1:", dis1.output_shape)
    # Conv Layer
    dis2 = batch_norm(Conv2DLayer(dis1, ndf*2, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
    print ("Dis conv2:", dis2.output_shape)
    # Conv Layer
    dis3 = batch_norm(Conv2DLayer(dis2, ndf*4, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
    print ("Dis conv3:", dis3.output_shape)
    # Conv Layer
    dis4 = batch_norm(Conv2DLayer(dis3, ndf*8, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
    print ("Dis conv3:", dis4.output_shape)
    # Conv Layer
    dis5 = batch_norm(Conv2DLayer(dis4, ndf*16, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
    print ("Dis conv4:", dis5.output_shape)
    # Conv Layer
    dis6 = DenseLayer(dis5, 1, W=Normal(0.02), nonlinearity=sigmoid)
    print ("Dis output:", dis6.output_shape)
    return dis6 
Example #5
Source File: adda_network.py    From adda_mnist64 with MIT License 6 votes vote down vote up
def network_classifier(self, input_var):

        network = {}
        network['classifier/input'] = InputLayer(shape=(None, 3, 64, 64), input_var=input_var, name='classifier/input')
        network['classifier/conv1'] = Conv2DLayer(network['classifier/input'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv1')
        network['classifier/pool1'] = MaxPool2DLayer(network['classifier/conv1'], pool_size=2, stride=2, pad=0, name='classifier/pool1')
        network['classifier/conv2'] = Conv2DLayer(network['classifier/pool1'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv2')
        network['classifier/pool2'] = MaxPool2DLayer(network['classifier/conv2'], pool_size=2, stride=2, pad=0, name='classifier/pool2')
        network['classifier/conv3'] = Conv2DLayer(network['classifier/pool2'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv3')
        network['classifier/pool3'] = MaxPool2DLayer(network['classifier/conv3'], pool_size=2, stride=2, pad=0, name='classifier/pool3')
        network['classifier/conv4'] = Conv2DLayer(network['classifier/pool3'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv4')
        network['classifier/pool4'] = MaxPool2DLayer(network['classifier/conv4'], pool_size=2, stride=2, pad=0, name='classifier/pool4')
        network['classifier/dense1'] = DenseLayer(network['classifier/pool4'], num_units=64, nonlinearity=rectify, name='classifier/dense1')
        network['classifier/output'] = DenseLayer(network['classifier/dense1'], num_units=10, nonlinearity=softmax, name='classifier/output')

        return network 
Example #6
Source File: enhance.py    From neural-enhance with GNU Affero General Public License v3.0 6 votes vote down vote up
def setup_discriminator(self):
        c = args.discriminator_size
        self.make_layer('disc1.1', batch_norm(self.network['conv1_2']), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc1.2', self.last_layer(), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc2', batch_norm(self.network['conv2_2']), 2*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc3', batch_norm(self.network['conv3_2']), 3*c, filter_size=(3,3), stride=(1,1), pad=(1,1))
        hypercolumn = ConcatLayer([self.network['disc1.2>'], self.network['disc2>'], self.network['disc3>']])
        self.make_layer('disc4', hypercolumn, 4*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
        self.make_layer('disc5', self.last_layer(), 3*c, filter_size=(3,3), stride=(2,2))
        self.make_layer('disc6', self.last_layer(), 2*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
        self.network['disc'] = batch_norm(ConvLayer(self.last_layer(), 1, filter_size=(1,1),
                                                    nonlinearity=lasagne.nonlinearities.linear))


    #------------------------------------------------------------------------------------------------------------------
    # Input / Output
    #------------------------------------------------------------------------------------------------------------------ 
Example #7
Source File: enhance.py    From neural-enhance with GNU Affero General Public License v3.0 6 votes vote down vote up
def setup_generator(self, input, config):
        for k, v in config.items(): setattr(args, k, v)
        args.zoom = 2**(args.generator_upscale - args.generator_downscale)

        units_iter = extend(args.generator_filters)
        units = next(units_iter)
        self.make_layer('iter.0', input, units, filter_size=(7,7), pad=(3,3))

        for i in range(0, args.generator_downscale):
            self.make_layer('downscale%i'%i, self.last_layer(), next(units_iter), filter_size=(4,4), stride=(2,2))

        units = next(units_iter)
        for i in range(0, args.generator_blocks):
            self.make_block('iter.%i'%(i+1), self.last_layer(), units)

        for i in range(0, args.generator_upscale):
            u = next(units_iter)
            self.make_layer('upscale%i.2'%i, self.last_layer(), u*4)
            self.network['upscale%i.1'%i] = SubpixelReshuffleLayer(self.last_layer(), u, 2)

        self.network['out'] = ConvLayer(self.last_layer(), 3, filter_size=(7,7), pad=(3,3), nonlinearity=None) 
Example #8
Source File: test_layers_theano.py    From visual_dynamics with MIT License 6 votes vote down vote up
def test_conv2d(x_shape, num_filters, filter_size, flip_filters, batch_size=2):
    X_var = T.tensor4('X')
    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    X = np.random.random((batch_size,) + x_shape).astype(theano.config.floatX)

    l_conv = L.Conv2DLayer(l_x, num_filters, filter_size=filter_size, stride=1, pad='same',
                           flip_filters=flip_filters, untie_biases=True, nonlinearity=None, b=None)
    conv_var = L.get_output(l_conv)
    conv_fn = theano.function([X_var], conv_var)
    tic()
    conv = conv_fn(X)
    toc("conv time for x_shape=%r, num_filters=%r, filter_size=%r, flip_filters=%r, batch_size=%r\n\t" %
        (x_shape, num_filters, filter_size, flip_filters, batch_size))

    tic()
    loop_conv = conv2d(X, l_conv.W.get_value(), flip_filters=flip_filters)
    toc("loop conv time for x_shape=%r, num_filters=%r, filter_size=%r, flip_filters=%r, batch_size=%r\n\t" %
        (x_shape, num_filters, filter_size, flip_filters, batch_size))

    assert np.allclose(conv, loop_conv, atol=1e-6) 
Example #9
Source File: convolutional_neural_network.py    From kaggle-breast-cancer-prediction with MIT License 5 votes vote down vote up
def CNN(n_epochs):
    net1 = NeuralNet(
        layers=[
            ('input', layers.InputLayer),
            ('conv1', layers.Conv2DLayer),  # Convolutional layer.  Params defined below
            ('pool1', layers.MaxPool2DLayer),  # Like downsampling, for execution speed
            ('conv2', layers.Conv2DLayer),
            ('hidden3', layers.DenseLayer),
            ('output', layers.DenseLayer),
        ],

        input_shape=(None, 1, 6, 5),
        conv1_num_filters=8,
        conv1_filter_size=(3, 3),
        conv1_nonlinearity=lasagne.nonlinearities.rectify,

        pool1_pool_size=(2, 2),

        conv2_num_filters=12,
        conv2_filter_size=(1, 1),
        conv2_nonlinearity=lasagne.nonlinearities.rectify,

        hidden3_num_units=1000,
        output_num_units=2,
        output_nonlinearity=lasagne.nonlinearities.softmax,

        update_learning_rate=0.0001,
        update_momentum=0.9,

        max_epochs=n_epochs,
        verbose=0,
    )
    return net1 
Example #10
Source File: FaceAlignment.py    From DeepAlignmentNetwork with MIT License 5 votes vote down vote up
def createCNN(self):
        net = {}
        net['input'] = lasagne.layers.InputLayer(shape=(None, self.nChannels, self.imageHeight, self.imageWidth), input_var=self.data)       
        print("Input shape: {0}".format(net['input'].output_shape))

        #STAGE 1
        net['s1_conv1_1'] = batch_norm(Conv2DLayer(net['input'], 64, 3, pad='same', W=GlorotUniform('relu')))
        net['s1_conv1_2'] = batch_norm(Conv2DLayer(net['s1_conv1_1'], 64, 3, pad='same', W=GlorotUniform('relu')))
        net['s1_pool1'] = lasagne.layers.Pool2DLayer(net['s1_conv1_2'], 2)

        net['s1_conv2_1'] = batch_norm(Conv2DLayer(net['s1_pool1'], 128, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_conv2_2'] = batch_norm(Conv2DLayer(net['s1_conv2_1'], 128, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_pool2'] = lasagne.layers.Pool2DLayer(net['s1_conv2_2'], 2)

        net['s1_conv3_1'] = batch_norm (Conv2DLayer(net['s1_pool2'], 256, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_conv3_2'] = batch_norm (Conv2DLayer(net['s1_conv3_1'], 256, 3, pad=1, W=GlorotUniform('relu')))  
        net['s1_pool3'] = lasagne.layers.Pool2DLayer(net['s1_conv3_2'], 2)
        
        net['s1_conv4_1'] = batch_norm(Conv2DLayer(net['s1_pool3'], 512, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_conv4_2'] = batch_norm (Conv2DLayer(net['s1_conv4_1'], 512, 3, pad=1, W=GlorotUniform('relu')))  
        net['s1_pool4'] = lasagne.layers.Pool2DLayer(net['s1_conv4_2'], 2)
                      
        net['s1_fc1_dropout'] = lasagne.layers.DropoutLayer(net['s1_pool4'], p=0.5)
        net['s1_fc1'] = batch_norm(lasagne.layers.DenseLayer(net['s1_fc1_dropout'], num_units=256, W=GlorotUniform('relu')))

        net['s1_output'] = lasagne.layers.DenseLayer(net['s1_fc1'], num_units=136, nonlinearity=None)
        net['s1_landmarks'] = LandmarkInitLayer(net['s1_output'], self.initLandmarks)

        if self.confidenceLayer:
            net['s1_confidence'] = lasagne.layers.DenseLayer(net['s1_fc1'], num_units=2, W=GlorotUniform('relu'), nonlinearity=lasagne.nonlinearities.softmax)

        for i in range(1, self.nStages):
            self.addDANStage(i + 1, net)

        net['output'] = net['s' + str(self.nStages) + '_landmarks']
        if self.confidenceLayer:
            net['output'] = lasagne.layers.ConcatLayer([net['output'], net['s1_confidence']])

        return net 
Example #11
Source File: Deopen_classification.py    From Deopen with MIT License 5 votes vote down vote up
def create_network():
    l = 1000
    pool_size = 5
    test_size1 = 13
    test_size2 = 7
    test_size3 = 5
    kernel1 = 128
    kernel2 = 128
    kernel3 = 128
    layer1 = InputLayer(shape=(None, 1, 4, l+1024))
    layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1)
    layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1)
    layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2)
    layer2_f = FlattenLayer(layer2_3)
    layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1))
    layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1))
    layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1))
    layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size))
    layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2))
    layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2))
    layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2))
    layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size))
    layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3))
    layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3))
    layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3))
    layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size))
    layer14_d = DenseLayer(layer14, num_units= 256)
    layer3_2 = DenseLayer(layer2_f, num_units = 128)
    layer15 = ConcatLayer([layer14_d,layer3_2])
    layer16 = DropoutLayer(layer15,p=0.5)
    layer17 = DenseLayer(layer16, num_units=256)
    network = DenseLayer(layer17, num_units= 2, nonlinearity=softmax)
    return network


#random search to initialize the weights 
Example #12
Source File: layers.py    From gogh-figure with GNU Affero General Public License v3.0 5 votes vote down vote up
def style_conv_block(conv_in, num_styles, num_filters, filter_size, stride, nonlinearity=rectify, normalization=instance_norm):
	sc_network = ReflectLayer(conv_in, filter_size//2)
	sc_network = normalization(ConvLayer(sc_network, num_filters, filter_size, stride, nonlinearity=nonlinearity, W=Normal()), num_styles=num_styles)
	return sc_network 
Example #13
Source File: model.py    From gogh-figure with GNU Affero General Public License v3.0 5 votes vote down vote up
def setup_loss_net(self):
		"""
		Create a network of convolution layers based on the VGG16 architecture from the paper:
		"Very Deep Convolutional Networks for Large-Scale Image Recognition"

		Original source: https://gist.github.com/ksimonyan/211839e770f7b538e2d8
		License: see http://www.robots.ox.ac.uk/~vgg/research/very_deep/

		Based on code in the Lasagne Recipes repository: https://github.com/Lasagne/Recipes
		"""
		loss_net = self.network['loss_net']
		loss_net['input'] = InputLayer(shape=self.shape)
		loss_net['conv1_1'] = ConvLayer(loss_net['input'], 64, 3, pad=1, flip_filters=False)
		loss_net['conv1_2'] = ConvLayer(loss_net['conv1_1'], 64, 3, pad=1, flip_filters=False)
		loss_net['pool1'] = PoolLayer(loss_net['conv1_2'], 2)
		loss_net['conv2_1'] = ConvLayer(loss_net['pool1'], 128, 3, pad=1, flip_filters=False)
		loss_net['conv2_2'] = ConvLayer(loss_net['conv2_1'], 128, 3, pad=1, flip_filters=False)
		loss_net['pool2'] = PoolLayer(loss_net['conv2_2'], 2)
		loss_net['conv3_1'] = ConvLayer(loss_net['pool2'], 256, 3, pad=1, flip_filters=False)
		loss_net['conv3_2'] = ConvLayer(loss_net['conv3_1'], 256, 3, pad=1, flip_filters=False)
		loss_net['conv3_3'] = ConvLayer(loss_net['conv3_2'], 256, 3, pad=1, flip_filters=False)
		loss_net['pool3'] = PoolLayer(loss_net['conv3_3'], 2)
		loss_net['conv4_1'] = ConvLayer(loss_net['pool3'], 512, 3, pad=1, flip_filters=False)
		loss_net['conv4_2'] = ConvLayer(loss_net['conv4_1'], 512, 3, pad=1, flip_filters=False)
		loss_net['conv4_3'] = ConvLayer(loss_net['conv4_2'], 512, 3, pad=1, flip_filters=False)
		loss_net['pool4'] = PoolLayer(loss_net['conv4_3'], 2)
		loss_net['conv5_1'] = ConvLayer(loss_net['pool4'], 512, 3, pad=1, flip_filters=False)
		loss_net['conv5_2'] = ConvLayer(loss_net['conv5_1'], 512, 3, pad=1, flip_filters=False)
		loss_net['conv5_3'] = ConvLayer(loss_net['conv5_2'], 512, 3, pad=1, flip_filters=False) 
Example #14
Source File: models.py    From diagnose-heart with MIT License 5 votes vote down vote up
def build_fcn_segmenter(input_var, shape, version=2):
    ret = {}

    if version == 2:
        ret['input'] = la = InputLayer(shape, input_var)
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=8, filter_size=7))
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=16, filter_size=3))
        ret['pool%d'%len(ret)] = la = MaxPool2DLayer(la, pool_size=2)
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=32, filter_size=3))
        ret['pool%d'%len(ret)] = la = MaxPool2DLayer(la, pool_size=2)
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=64, filter_size=3))
        ret['pool%d'%len(ret)] = la = MaxPool2DLayer(la, pool_size=2)
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=64, filter_size=3))
        ret['dec%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=64, filter_size=3,
            pad='full'))
        ret['ups%d'%len(ret)] = la = Upscale2DLayer(la, scale_factor=2)
        ret['dec%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=64, filter_size=3,
            pad='full'))
        ret['ups%d'%len(ret)] = la = Upscale2DLayer(la, scale_factor=2)
        ret['dec%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=32, filter_size=7,
            pad='full'))
        ret['ups%d'%len(ret)] = la = Upscale2DLayer(la, scale_factor=2)
        ret['dec%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=16, filter_size=3,
            pad='full'))
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=8, filter_size=7))
        ret['output'] = la = Conv2DLayer(la, num_filters=1, filter_size=7,
                pad='full', nonlinearity=nn.nonlinearities.sigmoid)

    return ret, nn.layers.get_output(ret['output']), \
            nn.layers.get_output(ret['output'], deterministic=True) 
Example #15
Source File: deep_conv_classification_alt51_heatmap.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_network_from_ae(classn):
    input_var = T.tensor4('input_var');
    target_var = T.imatrix('targets');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 120,  filter_size=(4,4), stride=1, nonlinearity=leaky_rectify));
    layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2);
    layer = batch_norm(layers.Conv2DLayer(layer, 240,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));

    layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad');
    network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid);

    return network, input_var, target_var; 
Example #16
Source File: Deopen_regression.py    From Deopen with MIT License 5 votes vote down vote up
def create_network():
    l = 1000
    pool_size = 5
    test_size1 = 13
    test_size2 = 7
    test_size3 = 5
    kernel1 = 128
    kernel2 = 128
    kernel3 = 128
    layer1 = InputLayer(shape=(None, 1, 4, l+1024))
    layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1)
    layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1)
    layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2)
    layer2_f = FlattenLayer(layer2_3)
    layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1))
    layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1))
    layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1))
    layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size))
    layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2))
    layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2))
    layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2))
    layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size))
    layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3))
    layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3))
    layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3))
    layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size))
    layer14_d = DenseLayer(layer14, num_units= 256)
    layer3_2 = DenseLayer(layer2_f, num_units = 128)
    layer15 = ConcatLayer([layer14_d,layer3_2])
    #layer16 = DropoutLayer(layer15,p=0.5)
    layer17 = DenseLayer(layer15, num_units=256)
    network = DenseLayer(layer17, num_units= 1, nonlinearity=None)
    return network


#random search to initialize the weights 
Example #17
Source File: eeg_cnn_lib.py    From EEGLearn with GNU General Public License v2.0 5 votes vote down vote up
def build_cnn(input_var=None, w_init=None, n_layers=(4, 2, 1), n_filters_first=32, imsize=32, n_colors=3):
    """
    Builds a VGG style CNN network followed by a fully-connected layer and a softmax layer.
    Stacks are separated by a maxpool layer. Number of kernels in each layer is twice
    the number in previous stack.
    input_var: Theano variable for input to the network
    outputs: pointer to the output of the last layer of network (softmax)

    :param input_var: theano variable as input to the network
    :param w_init: Initial weight values
    :param n_layers: number of layers in each stack. An array of integers with each
                    value corresponding to the number of layers in each stack.
                    (e.g. [4, 2, 1] == 3 stacks with 4, 2, and 1 layers in each.
    :param n_filters_first: number of filters in the first layer
    :param imsize: Size of the image
    :param n_colors: Number of color channels (depth)
    :return: a pointer to the output of last layer
    """
    weights = []        # Keeps the weights for all layers
    count = 0
    # If no initial weight is given, initialize with GlorotUniform
    if w_init is None:
        w_init = [lasagne.init.GlorotUniform()] * sum(n_layers)
    # Input layer
    network = InputLayer(shape=(None, n_colors, imsize, imsize),
                                        input_var=input_var)
    for i, s in enumerate(n_layers):
        for l in range(s):
            network = Conv2DLayer(network, num_filters=n_filters_first * (2 ** i), filter_size=(3, 3),
                          W=w_init[count], pad='same')
            count += 1
            weights.append(network.W)
        network = MaxPool2DLayer(network, pool_size=(2, 2))
    return network, weights 
Example #18
Source File: adda_network.py    From adda_mnist64 with MIT License 5 votes vote down vote up
def network_discriminator(self, features):

        network = {}
        network['discriminator/conv2'] = Conv2DLayer(features, num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='discriminator/conv2')
        network['discriminator/pool2'] = MaxPool2DLayer(network['discriminator/conv2'], pool_size=2, stride=2, pad=0, name='discriminator/pool2')
        network['discriminator/conv3'] = Conv2DLayer(network['discriminator/pool2'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='discriminator/conv3')
        network['discriminator/pool3'] = MaxPool2DLayer(network['discriminator/conv3'], pool_size=2, stride=2, pad=0, name='discriminator/pool3')
        network['discriminator/conv4'] = Conv2DLayer(network['discriminator/pool3'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='discriminator/conv4')
        network['discriminator/pool4'] = MaxPool2DLayer(network['discriminator/conv4'], pool_size=2, stride=2, pad=0, name='discriminator/pool4')
        network['discriminator/dense1'] = DenseLayer(network['discriminator/pool4'], num_units=64, nonlinearity=rectify, name='discriminator/dense1')
        network['discriminator/output'] = DenseLayer(network['discriminator/dense1'], num_units=2, nonlinearity=softmax, name='discriminator/output')

        return network 
Example #19
Source File: FaceAlignmentTraining.py    From DeepAlignmentNetwork with MIT License 5 votes vote down vote up
def createCNN(self):
        net = {}
        net['input'] = lasagne.layers.InputLayer(shape=(None, self.nChannels, self.imageHeight, self.imageWidth), input_var=self.data)       
        print("Input shape: {0}".format(net['input'].output_shape))

        #STAGE 1
        net['s1_conv1_1'] = batch_norm(Conv2DLayer(net['input'], 64, 3, pad='same', W=GlorotUniform('relu')))
        net['s1_conv1_2'] = batch_norm(Conv2DLayer(net['s1_conv1_1'], 64, 3, pad='same', W=GlorotUniform('relu')))
        net['s1_pool1'] = lasagne.layers.Pool2DLayer(net['s1_conv1_2'], 2)

        net['s1_conv2_1'] = batch_norm(Conv2DLayer(net['s1_pool1'], 128, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_conv2_2'] = batch_norm(Conv2DLayer(net['s1_conv2_1'], 128, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_pool2'] = lasagne.layers.Pool2DLayer(net['s1_conv2_2'], 2)

        net['s1_conv3_1'] = batch_norm (Conv2DLayer(net['s1_pool2'], 256, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_conv3_2'] = batch_norm (Conv2DLayer(net['s1_conv3_1'], 256, 3, pad=1, W=GlorotUniform('relu')))  
        net['s1_pool3'] = lasagne.layers.Pool2DLayer(net['s1_conv3_2'], 2)
        
        net['s1_conv4_1'] = batch_norm(Conv2DLayer(net['s1_pool3'], 512, 3, pad=1, W=GlorotUniform('relu')))
        net['s1_conv4_2'] = batch_norm (Conv2DLayer(net['s1_conv4_1'], 512, 3, pad=1, W=GlorotUniform('relu')))  
        net['s1_pool4'] = lasagne.layers.Pool2DLayer(net['s1_conv4_2'], 2)
                      
        net['s1_fc1_dropout'] = lasagne.layers.DropoutLayer(net['s1_pool4'], p=0.5)
        net['s1_fc1'] = batch_norm(lasagne.layers.DenseLayer(net['s1_fc1_dropout'], num_units=256, W=GlorotUniform('relu')))

        net['s1_output'] = lasagne.layers.DenseLayer(net['s1_fc1'], num_units=136, nonlinearity=None)
        net['s1_landmarks'] = LandmarkInitLayer(net['s1_output'], self.initLandmarks)

        for i in range(1, self.nStages):
            self.addDANStage(i + 1, net)

        net['output'] = net['s' + str(self.nStages) + '_landmarks']

        return net 
Example #20
Source File: deep_conv_classification_alt64.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_network_from_ae(classn):
    input_var = T.tensor4('input_var');
    target_var = T.imatrix('targets');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 120,  filter_size=(4,4), stride=1, nonlinearity=leaky_rectify));
    layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2);
    layer = batch_norm(layers.Conv2DLayer(layer, 240,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));

    layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad');
    network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid);

    return network, input_var, target_var; 
Example #21
Source File: deep_conv_classification_alt51_luad10_luad10in20_brca10x2.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_network_from_ae(classn):
    input_var = T.tensor4('input_var');
    target_var = T.imatrix('targets');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 120,  filter_size=(4,4), stride=1, nonlinearity=leaky_rectify));
    layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2);
    layer = batch_norm(layers.Conv2DLayer(layer, 240,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));

    layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad');
    network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid);

    return network, input_var, target_var; 
Example #22
Source File: model.py    From BirdNET with MIT License 5 votes vote down vote up
def classificationBranch(net, kernel_size):

    # Post Convolution
    branch = l.batch_norm(l.Conv2DLayer(net,
                        num_filters=int(FILTERS[-1] * RESNET_K),
                        filter_size=kernel_size,
                        nonlinearity=nl.rectify))

    #log.p(("\t\tPOST  CONV SHAPE:", l.get_output_shape(branch), "LAYER:", len(l.get_all_layers(branch)) - 1))

    # Dropout Layer
    branch = l.DropoutLayer(branch)
    
    # Dense Convolution
    branch = l.batch_norm(l.Conv2DLayer(branch,
                        num_filters=int(FILTERS[-1] * RESNET_K * 2),
                        filter_size=1,
                        nonlinearity=nl.rectify))

    #log.p(("\t\tDENSE CONV SHAPE:", l.get_output_shape(branch), "LAYER:", len(l.get_all_layers(branch)) - 1))
    
    # Dropout Layer
    branch = l.DropoutLayer(branch)
    
    # Class Convolution
    branch = l.Conv2DLayer(branch,
                        num_filters=len(cfg.CLASSES),
                        filter_size=1,
                        nonlinearity=None)
    return branch 
Example #23
Source File: deep_conv_classification_alt51_luad10_luad10in20_brca10x1.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_network_from_ae(classn):
    input_var = T.tensor4('input_var');
    target_var = T.imatrix('targets');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 120,  filter_size=(4,4), stride=1, nonlinearity=leaky_rectify));
    layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2);
    layer = batch_norm(layers.Conv2DLayer(layer, 240,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));

    layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad');
    network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid);

    return network, input_var, target_var; 
Example #24
Source File: enhance.py    From neural-enhance with GNU Affero General Public License v3.0 5 votes vote down vote up
def make_layer(self, name, input, units, filter_size=(3,3), stride=(1,1), pad=(1,1), alpha=0.25):
        conv = ConvLayer(input, units, filter_size, stride=stride, pad=pad, nonlinearity=None)
        prelu = lasagne.layers.ParametricRectifierLayer(conv, alpha=lasagne.init.Constant(alpha))
        self.network[name+'x'] = conv
        self.network[name+'>'] = prelu
        return prelu 
Example #25
Source File: enhance.py    From neural-enhance with GNU Affero General Public License v3.0 5 votes vote down vote up
def setup_perceptual(self, input):
        """Use lasagne to create a network of convolution layers using pre-trained VGG19 weights.
        """
        offset = np.array([103.939, 116.779, 123.680], dtype=np.float32).reshape((1,3,1,1))
        self.network['percept'] = lasagne.layers.NonlinearityLayer(input, lambda x: ((x+0.5)*255.0) - offset)

        self.network['mse'] = self.network['percept']
        self.network['conv1_1'] = ConvLayer(self.network['percept'], 64, 3, pad=1)
        self.network['conv1_2'] = ConvLayer(self.network['conv1_1'], 64, 3, pad=1)
        self.network['pool1']   = PoolLayer(self.network['conv1_2'], 2, mode='max')
        self.network['conv2_1'] = ConvLayer(self.network['pool1'],   128, 3, pad=1)
        self.network['conv2_2'] = ConvLayer(self.network['conv2_1'], 128, 3, pad=1)
        self.network['pool2']   = PoolLayer(self.network['conv2_2'], 2, mode='max')
        self.network['conv3_1'] = ConvLayer(self.network['pool2'],   256, 3, pad=1)
        self.network['conv3_2'] = ConvLayer(self.network['conv3_1'], 256, 3, pad=1)
        self.network['conv3_3'] = ConvLayer(self.network['conv3_2'], 256, 3, pad=1)
        self.network['conv3_4'] = ConvLayer(self.network['conv3_3'], 256, 3, pad=1)
        self.network['pool3']   = PoolLayer(self.network['conv3_4'], 2, mode='max')
        self.network['conv4_1'] = ConvLayer(self.network['pool3'],   512, 3, pad=1)
        self.network['conv4_2'] = ConvLayer(self.network['conv4_1'], 512, 3, pad=1)
        self.network['conv4_3'] = ConvLayer(self.network['conv4_2'], 512, 3, pad=1)
        self.network['conv4_4'] = ConvLayer(self.network['conv4_3'], 512, 3, pad=1)
        self.network['pool4']   = PoolLayer(self.network['conv4_4'], 2, mode='max')
        self.network['conv5_1'] = ConvLayer(self.network['pool4'],   512, 3, pad=1)
        self.network['conv5_2'] = ConvLayer(self.network['conv5_1'], 512, 3, pad=1)
        self.network['conv5_3'] = ConvLayer(self.network['conv5_2'], 512, 3, pad=1)
        self.network['conv5_4'] = ConvLayer(self.network['conv5_3'], 512, 3, pad=1) 
Example #26
Source File: deep_conv_classification_alt57.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_network_from_ae(classn):
    input_var = T.tensor4('input_var');
    target_var = T.imatrix('targets');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = (layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 120,  filter_size=(4,4), stride=1, nonlinearity=leaky_rectify));
    layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2);
    layer = (layers.Conv2DLayer(layer, 240,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));

    layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad');
    network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid);

    return network, input_var, target_var; 
Example #27
Source File: deep_conv_classification_alt55.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_network_from_ae(classn):
    input_var = T.tensor4('input_var');
    target_var = T.imatrix('targets');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = (layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 120,  filter_size=(4,4), stride=1, nonlinearity=leaky_rectify));
    layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2);
    layer = (layers.Conv2DLayer(layer, 240,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));

    layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad');
    network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid);

    return network, input_var, target_var; 
Example #28
Source File: deep_conv_classification_alt51_luad10_luad10in20_brca10x1_heatmap.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_network_from_ae(classn):
    input_var = T.tensor4('input_var');
    target_var = T.imatrix('targets');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 120,  filter_size=(4,4), stride=1, nonlinearity=leaky_rectify));
    layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2);
    layer = batch_norm(layers.Conv2DLayer(layer, 240,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));

    layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad');
    network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid);

    return network, input_var, target_var; 
Example #29
Source File: deep_conv_classification_alt56.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_network_from_ae(classn):
    input_var = T.tensor4('input_var');
    target_var = T.imatrix('targets');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = (layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 120,  filter_size=(4,4), stride=1, nonlinearity=leaky_rectify));
    layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2);
    layer = (layers.Conv2DLayer(layer, 240,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));

    layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad');
    network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid);

    return network, input_var, target_var; 
Example #30
Source File: deep_conv_classification_alt51_luad10in20_brca10.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_network_from_ae(classn):
    input_var = T.tensor4('input_var');
    target_var = T.imatrix('targets');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 120,  filter_size=(4,4), stride=1, nonlinearity=leaky_rectify));
    layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2);
    layer = batch_norm(layers.Conv2DLayer(layer, 240,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));

    layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad');
    network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid);

    return network, input_var, target_var;