Python lasagne.layers.batch_norm() Examples
The following are 30
code examples of lasagne.layers.batch_norm().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
lasagne.layers
, or try the search function
.
Example #1
Source File: models_uncond.py From EvolutionaryGAN with MIT License | 6 votes |
def build_discriminator_32(image=None,ndf=128): lrelu = LeakyRectify(0.2) # input: images InputImg = InputLayer(shape=(None, 3, 32, 32), input_var=image) print ("Dis Img_input:", InputImg.output_shape) # Conv Layer dis1 = Conv2DLayer(InputImg, ndf, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu) print ("Dis conv1:", dis1.output_shape) # Conv Layer dis2 = batch_norm(Conv2DLayer(dis1, ndf*2, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu)) print ("Dis conv2:", dis2.output_shape) # Conv Layer dis3 = batch_norm(Conv2DLayer(dis2, ndf*4, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu)) print ("Dis conv3:", dis3.output_shape) # Conv Layer dis4 = DenseLayer(dis3, 1, W=Normal(0.02), nonlinearity=sigmoid) print ("Dis output:", dis4.output_shape) return dis4
Example #2
Source File: wgan.py From Theano-MPI with Educational Community License v2.0 | 6 votes |
def build_critic(input_var=None): from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer, DenseLayer) try: from lasagne.layers.dnn import batch_norm_dnn as batch_norm except ImportError: from lasagne.layers import batch_norm from lasagne.nonlinearities import LeakyRectify lrelu = LeakyRectify(0.2) # input: (None, 1, 28, 28) layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_var) # two convolutions layer = batch_norm(Conv2DLayer(layer, 64, 5, stride=2, pad='same', nonlinearity=lrelu)) layer = batch_norm(Conv2DLayer(layer, 128, 5, stride=2, pad='same', nonlinearity=lrelu)) # fully-connected layer layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu)) # output layer (linear and without bias) layer = DenseLayer(layer, 1, nonlinearity=None, b=None) print ("critic output:", layer.output_shape) return layer
Example #3
Source File: lsgan.py From Theano-MPI with Educational Community License v2.0 | 6 votes |
def build_critic(input_var=None): from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer, DenseLayer) try: from lasagne.layers.dnn import batch_norm_dnn as batch_norm except ImportError: from lasagne.layers import batch_norm from lasagne.nonlinearities import LeakyRectify lrelu = LeakyRectify(0.2) # input: (None, 1, 28, 28) layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_var) # two convolutions layer = batch_norm(Conv2DLayer(layer, 64, 5, stride=2, pad='same', nonlinearity=lrelu)) layer = batch_norm(Conv2DLayer(layer, 128, 5, stride=2, pad='same', nonlinearity=lrelu)) # fully-connected layer layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu)) # output layer (linear) layer = DenseLayer(layer, 1, nonlinearity=None) print ("critic output:", layer.output_shape) return layer
Example #4
Source File: enhance.py From neural-enhance with GNU Affero General Public License v3.0 | 6 votes |
def setup_discriminator(self): c = args.discriminator_size self.make_layer('disc1.1', batch_norm(self.network['conv1_2']), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2)) self.make_layer('disc1.2', self.last_layer(), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2)) self.make_layer('disc2', batch_norm(self.network['conv2_2']), 2*c, filter_size=(5,5), stride=(2,2), pad=(2,2)) self.make_layer('disc3', batch_norm(self.network['conv3_2']), 3*c, filter_size=(3,3), stride=(1,1), pad=(1,1)) hypercolumn = ConcatLayer([self.network['disc1.2>'], self.network['disc2>'], self.network['disc3>']]) self.make_layer('disc4', hypercolumn, 4*c, filter_size=(1,1), stride=(1,1), pad=(0,0)) self.make_layer('disc5', self.last_layer(), 3*c, filter_size=(3,3), stride=(2,2)) self.make_layer('disc6', self.last_layer(), 2*c, filter_size=(1,1), stride=(1,1), pad=(0,0)) self.network['disc'] = batch_norm(ConvLayer(self.last_layer(), 1, filter_size=(1,1), nonlinearity=lasagne.nonlinearities.linear)) #------------------------------------------------------------------------------------------------------------------ # Input / Output #------------------------------------------------------------------------------------------------------------------
Example #5
Source File: models_uncond.py From EvolutionaryGAN with MIT License | 6 votes |
def build_discriminator_toy(image=None, nd=512, GP_norm=None): Input = InputLayer(shape=(None, 2), input_var=image) print ("Dis input:", Input.output_shape) dis0 = DenseLayer(Input, nd, W=Normal(0.02), nonlinearity=relu) print ("Dis fc0:", dis0.output_shape) if GP_norm is True: dis1 = DenseLayer(dis0, nd, W=Normal(0.02), nonlinearity=relu) else: dis1 = batch_norm(DenseLayer(dis0, nd, W=Normal(0.02), nonlinearity=relu)) print ("Dis fc1:", dis1.output_shape) if GP_norm is True: dis2 = batch_norm(DenseLayer(dis1, nd, W=Normal(0.02), nonlinearity=relu)) else: dis2 = DenseLayer(dis1, nd, W=Normal(0.02), nonlinearity=relu) print ("Dis fc2:", dis2.output_shape) disout = DenseLayer(dis2, 1, W=Normal(0.02), nonlinearity=sigmoid) print ("Dis output:", disout.output_shape) return disout
Example #6
Source File: layers.py From gogh-figure with GNU Affero General Public License v3.0 | 6 votes |
def instance_norm(layer, **kwargs): """ The equivalent of Lasagne's `batch_norm()` convenience method, but for instance normalization. Refer: http://lasagne.readthedocs.io/en/latest/modules/layers/normalization.html#lasagne.layers.batch_norm """ nonlinearity = getattr(layer, 'nonlinearity', None) if nonlinearity is not None: layer.nonlinearity = identity if hasattr(layer, 'b') and layer.b is not None: del layer.params[layer.b] layer.b = None bn_name = (kwargs.pop('name', None) or (getattr(layer, 'name', None) and layer.name + '_bn')) layer = InstanceNormLayer(layer, name=bn_name, **kwargs) if nonlinearity is not None: nonlin_name = bn_name and bn_name + '_nonlin' layer = NonlinearityLayer(layer, nonlinearity, name=nonlin_name) return layer # TODO: Add normalization
Example #7
Source File: models_uncond.py From EvolutionaryGAN with MIT License | 6 votes |
def build_discriminator_128(image=None,ndf=128): lrelu = LeakyRectify(0.2) # input: images InputImg = InputLayer(shape=(None, 3, 128, 128), input_var=image) print ("Dis Img_input:", InputImg.output_shape) # Conv Layer dis1 = Conv2DLayer(InputImg, ndf, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu) print ("Dis conv1:", dis1.output_shape) # Conv Layer dis2 = batch_norm(Conv2DLayer(dis1, ndf*2, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu)) print ("Dis conv2:", dis2.output_shape) # Conv Layer dis3 = batch_norm(Conv2DLayer(dis2, ndf*4, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu)) print ("Dis conv3:", dis3.output_shape) # Conv Layer dis4 = batch_norm(Conv2DLayer(dis3, ndf*8, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu)) print ("Dis conv3:", dis4.output_shape) # Conv Layer dis5 = batch_norm(Conv2DLayer(dis4, ndf*16, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu)) print ("Dis conv4:", dis5.output_shape) # Conv Layer dis6 = DenseLayer(dis5, 1, W=Normal(0.02), nonlinearity=sigmoid) print ("Dis output:", dis6.output_shape) return dis6
Example #8
Source File: models_uncond.py From EvolutionaryGAN with MIT License | 6 votes |
def build_discriminator_64(image=None,ndf=128): lrelu = LeakyRectify(0.2) # input: images InputImg = InputLayer(shape=(None, 3, 64, 64), input_var=image) print ("Dis Img_input:", InputImg.output_shape) # Conv Layer dis1 = Conv2DLayer(InputImg, ndf, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu) print ("Dis conv1:", dis1.output_shape) # Conv Layer dis2 = batch_norm(Conv2DLayer(dis1, ndf*2, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu)) print ("Dis conv2:", dis2.output_shape) # Conv Layer dis3 = batch_norm(Conv2DLayer(dis2, ndf*4, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu)) print ("Dis conv3:", dis3.output_shape) # Conv Layer dis4 = batch_norm(Conv2DLayer(dis3, ndf*8, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu)) print ("Dis conv3:", dis4.output_shape) # Conv Layer dis5 = DenseLayer(dis4, 1, W=Normal(0.02), nonlinearity=sigmoid) print ("Dis output:", dis5.output_shape) return dis5
Example #9
Source File: lasagne_net.py From BirdCLEF-Baseline with MIT License | 5 votes |
def batch_norm(layer): if cfg.BATCH_NORM: return l_batch_norm(layer) else: return layer
Example #10
Source File: layers.py From Neural-Photo-Editor with MIT License | 5 votes |
def InceptionUpscaleLayer(incoming,param_dict,block_name): branch = [0]*len(param_dict) # Loop across branches for i,dict in enumerate(param_dict): for j,style in enumerate(dict['style']): # Loop up branch branch[i] = TC2D( incoming = branch[i] if j else incoming, num_filters = dict['num_filters'][j], filter_size = dict['filter_size'][j], crop = dict['pad'][j] if 'pad' in dict else None, stride = dict['stride'][j], W = initmethod('relu'), nonlinearity = dict['nonlinearity'][j], name = block_name+'_'+str(i)+'_'+str(j)) if style=='convolutional'\ else NL( incoming = lasagne.layers.dnn.Pool2DDNNLayer( incoming = lasagne.layers.Upscale2DLayer( incoming=incoming if j == 0 else branch[i], scale_factor = dict['stride'][j]), pool_size = dict['filter_size'][j], stride = [1,1], mode = dict['mode'][j], pad = dict['pad'][j], name = block_name+'_'+str(i)+'_'+str(j)), nonlinearity = dict['nonlinearity'][j]) # Apply Batchnorm branch[i] = BN(branch[i],name = block_name+'_bnorm_'+str(i)+'_'+str(j)) if dict['bnorm'][j] else branch[i] # Concatenate Sublayers return CL(incomings=branch,name=block_name) # Convenience function to efficiently generate param dictionaries for use with InceptioNlayer
Example #11
Source File: layers.py From Neural-Photo-Editor with MIT License | 5 votes |
def MDBLOCK(incoming,num_filters,scales,name,nonlinearity): return NL(BN(ESL([incoming, MDCL(NL(BN(MDCL(NL(BN(incoming,name=name+'bnorm0'),nonlinearity),num_filters,scales,name),name=name+'bnorm1'),nonlinearity), num_filters, scales, name+'2')]),name=name+'bnorm2'),nonlinearity) # Gaussian Sample Layer for VAE from Tencia Lee
Example #12
Source File: dcgan.py From deep-learning-models with MIT License | 5 votes |
def create_model(self, X, Z, n_dim, n_out, n_chan=1): # params n_lat = 100 # latent variables n_g_hid1 = 1024 # size of hidden layer in generator layer 1 n_g_hid2 = 128 # size of hidden layer in generator layer 2 n_out = n_dim * n_dim * n_chan # total dimensionality of output if self.model == 'gaussian': raise Exception('Gaussian variables currently nor supported in GAN') # create the generator network l_g_in = lasagne.layers.InputLayer(shape=(None, n_lat), input_var=Z) l_g_hid1 = batch_norm(lasagne.layers.DenseLayer(l_g_in, n_g_hid1)) l_g_hid2 = batch_norm(lasagne.layers.DenseLayer(l_g_hid1, n_g_hid2*7*7)) l_g_hid2 = lasagne.layers.ReshapeLayer(l_g_hid2, ([0], n_g_hid2, 7, 7)) l_g_dc1 = batch_norm(Deconv2DLayer(l_g_hid2, 64, 5, stride=2, pad=2)) l_g = Deconv2DLayer(l_g_dc1, n_chan, 5, stride=2, pad=2, nonlinearity=lasagne.nonlinearities.sigmoid) print ("Generator output:", l_g.output_shape) # create the discriminator network lrelu = lasagne.nonlinearities.LeakyRectify(0.2) l_d_in = lasagne.layers.InputLayer(shape=(None, n_chan, n_dim, n_dim), input_var=X) l_d_hid1 = batch_norm(lasagne.layers.Conv2DLayer( l_d_in, num_filters=64, filter_size=5, stride=2, pad=2, nonlinearity=lrelu, name='l_d_hid1')) l_d_hid2 = batch_norm(lasagne.layers.Conv2DLayer( l_d_hid1, num_filters=128, filter_size=5, stride=2, pad=2, nonlinearity=lrelu, name='l_d_hid2')) l_d_hid3 = batch_norm(lasagne.layers.DenseLayer(l_d_hid2, 1024, nonlinearity=lrelu)) l_d = lasagne.layers.DenseLayer(l_d_hid3, 1, nonlinearity=lasagne.nonlinearities.sigmoid) print ("Discriminator output:", l_d.output_shape) return l_g, l_d
Example #13
Source File: model.py From BirdNET with MIT License | 5 votes |
def classificationBranch(net, kernel_size): # Post Convolution branch = l.batch_norm(l.Conv2DLayer(net, num_filters=int(FILTERS[-1] * RESNET_K), filter_size=kernel_size, nonlinearity=nl.rectify)) #log.p(("\t\tPOST CONV SHAPE:", l.get_output_shape(branch), "LAYER:", len(l.get_all_layers(branch)) - 1)) # Dropout Layer branch = l.DropoutLayer(branch) # Dense Convolution branch = l.batch_norm(l.Conv2DLayer(branch, num_filters=int(FILTERS[-1] * RESNET_K * 2), filter_size=1, nonlinearity=nl.rectify)) #log.p(("\t\tDENSE CONV SHAPE:", l.get_output_shape(branch), "LAYER:", len(l.get_all_layers(branch)) - 1)) # Dropout Layer branch = l.DropoutLayer(branch) # Class Convolution branch = l.Conv2DLayer(branch, num_filters=len(cfg.CLASSES), filter_size=1, nonlinearity=None) return branch
Example #14
Source File: FaceAlignment.py From DeepAlignmentNetwork with MIT License | 5 votes |
def createCNN(self): net = {} net['input'] = lasagne.layers.InputLayer(shape=(None, self.nChannels, self.imageHeight, self.imageWidth), input_var=self.data) print("Input shape: {0}".format(net['input'].output_shape)) #STAGE 1 net['s1_conv1_1'] = batch_norm(Conv2DLayer(net['input'], 64, 3, pad='same', W=GlorotUniform('relu'))) net['s1_conv1_2'] = batch_norm(Conv2DLayer(net['s1_conv1_1'], 64, 3, pad='same', W=GlorotUniform('relu'))) net['s1_pool1'] = lasagne.layers.Pool2DLayer(net['s1_conv1_2'], 2) net['s1_conv2_1'] = batch_norm(Conv2DLayer(net['s1_pool1'], 128, 3, pad=1, W=GlorotUniform('relu'))) net['s1_conv2_2'] = batch_norm(Conv2DLayer(net['s1_conv2_1'], 128, 3, pad=1, W=GlorotUniform('relu'))) net['s1_pool2'] = lasagne.layers.Pool2DLayer(net['s1_conv2_2'], 2) net['s1_conv3_1'] = batch_norm (Conv2DLayer(net['s1_pool2'], 256, 3, pad=1, W=GlorotUniform('relu'))) net['s1_conv3_2'] = batch_norm (Conv2DLayer(net['s1_conv3_1'], 256, 3, pad=1, W=GlorotUniform('relu'))) net['s1_pool3'] = lasagne.layers.Pool2DLayer(net['s1_conv3_2'], 2) net['s1_conv4_1'] = batch_norm(Conv2DLayer(net['s1_pool3'], 512, 3, pad=1, W=GlorotUniform('relu'))) net['s1_conv4_2'] = batch_norm (Conv2DLayer(net['s1_conv4_1'], 512, 3, pad=1, W=GlorotUniform('relu'))) net['s1_pool4'] = lasagne.layers.Pool2DLayer(net['s1_conv4_2'], 2) net['s1_fc1_dropout'] = lasagne.layers.DropoutLayer(net['s1_pool4'], p=0.5) net['s1_fc1'] = batch_norm(lasagne.layers.DenseLayer(net['s1_fc1_dropout'], num_units=256, W=GlorotUniform('relu'))) net['s1_output'] = lasagne.layers.DenseLayer(net['s1_fc1'], num_units=136, nonlinearity=None) net['s1_landmarks'] = LandmarkInitLayer(net['s1_output'], self.initLandmarks) if self.confidenceLayer: net['s1_confidence'] = lasagne.layers.DenseLayer(net['s1_fc1'], num_units=2, W=GlorotUniform('relu'), nonlinearity=lasagne.nonlinearities.softmax) for i in range(1, self.nStages): self.addDANStage(i + 1, net) net['output'] = net['s' + str(self.nStages) + '_landmarks'] if self.confidenceLayer: net['output'] = lasagne.layers.ConcatLayer([net['output'], net['s1_confidence']]) return net
Example #15
Source File: models.py From diagnose-heart with MIT License | 5 votes |
def build_fcn_segmenter(input_var, shape, version=2): ret = {} if version == 2: ret['input'] = la = InputLayer(shape, input_var) ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=8, filter_size=7)) ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=16, filter_size=3)) ret['pool%d'%len(ret)] = la = MaxPool2DLayer(la, pool_size=2) ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=32, filter_size=3)) ret['pool%d'%len(ret)] = la = MaxPool2DLayer(la, pool_size=2) ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=64, filter_size=3)) ret['pool%d'%len(ret)] = la = MaxPool2DLayer(la, pool_size=2) ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=64, filter_size=3)) ret['dec%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=64, filter_size=3, pad='full')) ret['ups%d'%len(ret)] = la = Upscale2DLayer(la, scale_factor=2) ret['dec%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=64, filter_size=3, pad='full')) ret['ups%d'%len(ret)] = la = Upscale2DLayer(la, scale_factor=2) ret['dec%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=32, filter_size=7, pad='full')) ret['ups%d'%len(ret)] = la = Upscale2DLayer(la, scale_factor=2) ret['dec%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=16, filter_size=3, pad='full')) ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=8, filter_size=7)) ret['output'] = la = Conv2DLayer(la, num_filters=1, filter_size=7, pad='full', nonlinearity=nn.nonlinearities.sigmoid) return ret, nn.layers.get_output(ret['output']), \ nn.layers.get_output(ret['output'], deterministic=True)
Example #16
Source File: FaceAlignmentTraining.py From DeepAlignmentNetwork with MIT License | 5 votes |
def createCNN(self): net = {} net['input'] = lasagne.layers.InputLayer(shape=(None, self.nChannels, self.imageHeight, self.imageWidth), input_var=self.data) print("Input shape: {0}".format(net['input'].output_shape)) #STAGE 1 net['s1_conv1_1'] = batch_norm(Conv2DLayer(net['input'], 64, 3, pad='same', W=GlorotUniform('relu'))) net['s1_conv1_2'] = batch_norm(Conv2DLayer(net['s1_conv1_1'], 64, 3, pad='same', W=GlorotUniform('relu'))) net['s1_pool1'] = lasagne.layers.Pool2DLayer(net['s1_conv1_2'], 2) net['s1_conv2_1'] = batch_norm(Conv2DLayer(net['s1_pool1'], 128, 3, pad=1, W=GlorotUniform('relu'))) net['s1_conv2_2'] = batch_norm(Conv2DLayer(net['s1_conv2_1'], 128, 3, pad=1, W=GlorotUniform('relu'))) net['s1_pool2'] = lasagne.layers.Pool2DLayer(net['s1_conv2_2'], 2) net['s1_conv3_1'] = batch_norm (Conv2DLayer(net['s1_pool2'], 256, 3, pad=1, W=GlorotUniform('relu'))) net['s1_conv3_2'] = batch_norm (Conv2DLayer(net['s1_conv3_1'], 256, 3, pad=1, W=GlorotUniform('relu'))) net['s1_pool3'] = lasagne.layers.Pool2DLayer(net['s1_conv3_2'], 2) net['s1_conv4_1'] = batch_norm(Conv2DLayer(net['s1_pool3'], 512, 3, pad=1, W=GlorotUniform('relu'))) net['s1_conv4_2'] = batch_norm (Conv2DLayer(net['s1_conv4_1'], 512, 3, pad=1, W=GlorotUniform('relu'))) net['s1_pool4'] = lasagne.layers.Pool2DLayer(net['s1_conv4_2'], 2) net['s1_fc1_dropout'] = lasagne.layers.DropoutLayer(net['s1_pool4'], p=0.5) net['s1_fc1'] = batch_norm(lasagne.layers.DenseLayer(net['s1_fc1_dropout'], num_units=256, W=GlorotUniform('relu'))) net['s1_output'] = lasagne.layers.DenseLayer(net['s1_fc1'], num_units=136, nonlinearity=None) net['s1_landmarks'] = LandmarkInitLayer(net['s1_output'], self.initLandmarks) for i in range(1, self.nStages): self.addDANStage(i + 1, net) net['output'] = net['s' + str(self.nStages) + '_landmarks'] return net
Example #17
Source File: lsgan.py From Theano-MPI with Educational Community License v2.0 | 5 votes |
def build_generator(input_var=None): from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer try: from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer except ImportError: raise ImportError("Your Lasagne is too old. Try the bleeding-edge " "version: http://lasagne.readthedocs.io/en/latest/" "user/installation.html#bleeding-edge-version") try: from lasagne.layers.dnn import batch_norm_dnn as batch_norm except ImportError: from lasagne.layers import batch_norm from lasagne.nonlinearities import sigmoid # input: 100dim layer = InputLayer(shape=(None, 100), input_var=input_var) # fully-connected layer layer = batch_norm(DenseLayer(layer, 1024)) # project and reshape layer = batch_norm(DenseLayer(layer, 128*7*7)) layer = ReshapeLayer(layer, ([0], 128, 7, 7)) # two fractional-stride convolutions layer = batch_norm(Deconv2DLayer(layer, 64, 5, stride=2, crop='same', output_size=14)) layer = Deconv2DLayer(layer, 1, 5, stride=2, crop='same', output_size=28, nonlinearity=sigmoid) print ("Generator output:", layer.output_shape) return layer
Example #18
Source File: res_net_blocks.py From dcase_task2 with MIT License | 5 votes |
def ResNet_FullPreActivation(input_shape=(None, 3, PIXELS, PIXELS), input_var=None, n_classes=10, n=18): """ Adapted from https://github.com/Lasagne/Recipes/tree/master/papers/deep_residual_learning. Tweaked to be consistent with 'Identity Mappings in Deep Residual Networks', Kaiming He et al. 2016 (https://arxiv.org/abs/1603.05027) Formula to figure out depth: 6n + 2 """ # Building the network l_in = InputLayer(shape=input_shape, input_var=input_var) # first layer, output is 16 x 32 x 32 l = batch_norm(ConvLayer(l_in, num_filters=16, filter_size=(3, 3), stride=(1, 1), nonlinearity=rectify, pad='same', W=he_norm)) # first stack of residual blocks, output is 16 x 32 x 32 l = residual_block(l, first=True) for _ in range(1, n): l = residual_block(l) # second stack of residual blocks, output is 32 x 16 x 16 l = residual_block(l, increase_dim=True) for _ in range(1, n): l = residual_block(l) # third stack of residual blocks, output is 64 x 8 x 8 l = residual_block(l, increase_dim=True) for _ in range(1, n): l = residual_block(l) bn_post_conv = BatchNormLayer(l) bn_post_relu = NonlinearityLayer(bn_post_conv, rectify) # average pooling avg_pool = GlobalPoolLayer(bn_post_relu) # fully connected layer network = DenseLayer(avg_pool, num_units=n_classes, W=HeNormal(), nonlinearity=softmax) return network
Example #19
Source File: wgan.py From Theano-MPI with Educational Community License v2.0 | 5 votes |
def build_generator(input_var=None): from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer try: from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer except ImportError: raise ImportError("Your Lasagne is too old. Try the bleeding-edge " "version: http://lasagne.readthedocs.io/en/latest/" "user/installation.html#bleeding-edge-version") try: from lasagne.layers.dnn import batch_norm_dnn as batch_norm except ImportError: from lasagne.layers import batch_norm from lasagne.nonlinearities import sigmoid # input: 100dim layer = InputLayer(shape=(None, 100), input_var=input_var) # fully-connected layer layer = batch_norm(DenseLayer(layer, 1024)) # project and reshape layer = batch_norm(DenseLayer(layer, 128*7*7)) layer = ReshapeLayer(layer, ([0], 128, 7, 7)) # two fractional-stride convolutions layer = batch_norm(Deconv2DLayer(layer, 64, 5, stride=2, crop='same', output_size=14)) layer = Deconv2DLayer(layer, 1, 5, stride=2, crop='same', output_size=28, nonlinearity=sigmoid) print ("Generator output:", layer.output_shape) return layer
Example #20
Source File: res_net_blocks.py From dcase_task2 with MIT License | 5 votes |
def residual_block(l, increase_dim=False, projection=True, first=False): """ Create a residual learning building block with two stacked 3x3 convlayers as in paper 'Identity Mappings in Deep Residual Networks', Kaiming He et al. 2016 (https://arxiv.org/abs/1603.05027) """ input_num_filters = l.output_shape[1] if increase_dim: first_stride = (2, 2) out_num_filters = input_num_filters * 2 else: first_stride = (1, 1) out_num_filters = input_num_filters if first: # hacky solution to keep layers correct bn_pre_relu = l else: # contains the BN -> ReLU portion, steps 1 to 2 bn_pre_conv = BatchNormLayer(l) bn_pre_relu = NonlinearityLayer(bn_pre_conv, rectify) # contains the weight -> BN -> ReLU portion, steps 3 to 5 conv_1 = batch_norm(ConvLayer(bn_pre_relu, num_filters=out_num_filters, filter_size=(3, 3), stride=first_stride, nonlinearity=rectify, pad='same', W=he_norm)) # contains the last weight portion, step 6 conv_2 = ConvLayer(conv_1, num_filters=out_num_filters, filter_size=(3, 3), stride=(1, 1), nonlinearity=None, pad='same', W=he_norm) # add shortcut connections if increase_dim: # projection shortcut, as option B in paper projection = ConvLayer(l, num_filters=out_num_filters, filter_size=(1, 1), stride=(2, 2), nonlinearity=None, pad='same', b=None) block = ElemwiseSumLayer([conv_2, projection]) else: block = ElemwiseSumLayer([conv_2, l]) return block
Example #21
Source File: lsgan_cifar10.py From Theano-MPI with Educational Community License v2.0 | 5 votes |
def build_generator(input_var=None, verbose=False): from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer try: from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer except ImportError: raise ImportError("Your Lasagne is too old. Try the bleeding-edge " "version: http://lasagne.readthedocs.io/en/latest/" "user/installation.html#bleeding-edge-version") try: from lasagne.layers.dnn import batch_norm_dnn as batch_norm except ImportError: from lasagne.layers import batch_norm from lasagne.nonlinearities import sigmoid # input: 100dim layer = InputLayer(shape=(None, 100), input_var=input_var) # # fully-connected layer # layer = batch_norm(DenseLayer(layer, 1024)) # project and reshape layer = batch_norm(DenseLayer(layer, 1024*4*4)) layer = ReshapeLayer(layer, ([0], 1024, 4, 4)) # two fractional-stride convolutions layer = batch_norm(Deconv2DLayer(layer, 512, 5, stride=2, crop='same', output_size=8)) layer = batch_norm(Deconv2DLayer(layer, 256, 5, stride=2, crop='same', output_size=16)) layer = Deconv2DLayer(layer, 3, 5, stride=2, crop='same', output_size=32, nonlinearity=sigmoid) if verbose: print ("Generator output:", layer.output_shape) return layer
Example #22
Source File: convnade.py From NADE with BSD 3-Clause "New" or "Revised" License | 4 votes |
def network(self): if self._network is not None: return self._network # Build the computational graph using a dummy input. import lasagne from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer from lasagne.layers import ElemwiseSumLayer, NonlinearityLayer, ExpressionLayer, PadLayer, InputLayer, FlattenLayer, SliceLayer # from lasagne.layers import batch_norm from lasagne.nonlinearities import rectify self._network_in = InputLayer(shape=(None, self.nb_channels,) + self.image_shape, input_var=None) convnet_layers = [self._network_in] convnet_layers_preact = [self._network_in] layer_blueprints = list(map(str.strip, self.convnet_blueprint.split("->"))) for i, layer_blueprint in enumerate(layer_blueprints, start=1): "64@3x3(valid) -> 64@3x3(full)" nb_filters, rest = layer_blueprint.split("@") filter_shape, rest = rest.split("(") nb_filters = int(nb_filters) filter_shape = tuple(map(int, filter_shape.split("x"))) pad = rest[:-1] preact = ConvLayer(convnet_layers[-1], num_filters=nb_filters, filter_size=filter_shape, stride=(1, 1), nonlinearity=None, pad=pad, W=lasagne.init.HeNormal(gain='relu')) if i > len(layer_blueprints) // 2 and i != len(layer_blueprints): shortcut = convnet_layers_preact[len(layer_blueprints)-i] if i == len(layer_blueprints): if preact.output_shape[1] != shortcut.output_shape[1]: shortcut = SliceLayer(shortcut, slice(0, 1), axis=1) else: raise NameError("Something is wrong.") print("Shortcut from {} to {}".format(len(layer_blueprints)-i, i)) preact = ElemwiseSumLayer([preact, shortcut]) convnet_layers_preact.append(preact) layer = NonlinearityLayer(preact, nonlinearity=rectify) convnet_layers.append(layer) self._network = FlattenLayer(preact) # network = DenseLayer(l, num_units=int(np.prod(self.image_shape)), # W=lasagne.init.HeNormal(), # nonlinearity=None) print("Nb. of parameters in model: {}".format(lasagne.layers.count_params(self._network, trainable=True))) return self._network
Example #23
Source File: models.py From diagnose-heart with MIT License | 4 votes |
def build_fcn_segmenter(input_var, shape, version=1): ret = {} if version == 1: #for size 256 ret['input'] = layer = nn.layers.InputLayer(shape, input_var) ret['conv{}'.format(len(ret))] = layer = bn(nn.layers.Conv2DLayer(layer, num_filters=8, filter_size=5)) ret['conv{}'.format(len(ret))] = layer = bn(nn.layers.Conv2DLayer(layer, num_filters=16, filter_size=3)) ret['pool{}'.format(len(ret))] = layer = nn.layers.MaxPool2DLayer(layer, pool_size=2) ret['conv{}'.format(len(ret))] = layer = bn(nn.layers.Conv2DLayer(layer, num_filters=32, filter_size=4)) ret['pool{}'.format(len(ret))] = layer = nn.layers.MaxPool2DLayer(layer, pool_size=2) ret['conv{}'.format(len(ret))] = layer = bn(nn.layers.Conv2DLayer(layer, num_filters=64, filter_size=4)) ret['pool{}'.format(len(ret))] = layer = nn.layers.MaxPool2DLayer(layer, pool_size=2) ret['conv{}'.format(len(ret))] = layer = bn(nn.layers.Conv2DLayer(layer, num_filters=64, filter_size=5)) ret['dec{}'.format(len(ret))] = layer = bn(nn.layers.Conv2DLayer(layer, num_filters=64, filter_size=5, pad='full')) ret['ups{}'.format(len(ret))] = layer = nn.layers.Upscale2DLayer(layer, scale_factor=2) ret['dec{}'.format(len(ret))] = layer = bn(nn.layers.Conv2DLayer(layer, num_filters=32, filter_size=4, pad='full')) ret['ups{}'.format(len(ret))] = layer = nn.layers.Upscale2DLayer(layer, scale_factor=2) ret['dec{}'.format(len(ret))] = layer = bn(nn.layers.Conv2DLayer(layer, num_filters=16, filter_size=4, pad='full')) ret['ups{}'.format(len(ret))] = layer = nn.layers.Upscale2DLayer(layer, scale_factor=2) ret['dec{}'.format(len(ret))] = layer = bn(nn.layers.Conv2DLayer(layer, num_filters=8, filter_size=3, pad='full')) ret['output'] = layer = nn.layers.Conv2DLayer(layer, num_filters=1, filter_size=5, pad='full', nonlinearity=nn.nonlinearities.sigmoid) elif version == 2: #for size 196 ret['input'] = layer = nn.layers.InputLayer(shape, input_var) ret['conv{}'.format(len(ret))] = layer = bn(nn.layers.Conv2DLayer(layer, num_filters=8, filter_size=5)) ret['conv{}'.format(len(ret))] = layer = bn(nn.layers.Conv2DLayer(layer, num_filters=16, filter_size=3)) ret['pool{}'.format(len(ret))] = layer = nn.layers.MaxPool2DLayer(layer, pool_size=2) ret['conv{}'.format(len(ret))] = layer = bn(nn.layers.Conv2DLayer(layer, num_filters=32, filter_size=4)) ret['pool{}'.format(len(ret))] = layer = nn.layers.MaxPool2DLayer(layer, pool_size=2) ret['conv{}'.format(len(ret))] = layer = bn(nn.layers.Conv2DLayer(layer, num_filters=64, filter_size=5)) ret['pool{}'.format(len(ret))] = layer = nn.layers.MaxPool2DLayer(layer, pool_size=2) ret['conv{}'.format(len(ret))] = layer = bn(nn.layers.Conv2DLayer(layer, num_filters=128, filter_size=6)) ret['dec{}'.format(len(ret))] = layer = bn(nn.layers.Conv2DLayer(layer, num_filters=64, filter_size=6, pad='full')) ret['ups{}'.format(len(ret))] = layer = nn.layers.Upscale2DLayer(layer, scale_factor=2) ret['dec{}'.format(len(ret))] = layer = bn(nn.layers.Conv2DLayer(layer, num_filters=32, filter_size=5, pad='full')) ret['ups{}'.format(len(ret))] = layer = nn.layers.Upscale2DLayer(layer, scale_factor=2) ret['dec{}'.format(len(ret))] = layer = bn(nn.layers.Conv2DLayer(layer, num_filters=16, filter_size=4, pad='full')) ret['ups{}'.format(len(ret))] = layer = nn.layers.Upscale2DLayer(layer, scale_factor=2) ret['dec{}'.format(len(ret))] = layer = bn(nn.layers.Conv2DLayer(layer, num_filters=8, filter_size=3, pad='full')) ret['output'] = layer = nn.layers.Conv2DLayer(layer, num_filters=1, filter_size=5, pad='full', nonlinearity=nn.nonlinearities.sigmoid) return ret, nn.layers.get_output(ret['output']), \ nn.layers.get_output(ret['output'], deterministic=True)
Example #24
Source File: birdCLEF_train.py From BirdCLEF2017 with MIT License | 4 votes |
def buildModel(mtype=1): print "BUILDING MODEL TYPE", mtype, "..." #default settings (Model 1) filters = 64 first_stride = 2 last_filter_multiplier = 16 #specific model type settings (see working notes for details) if mtype == 2: first_stride = 1 elif mtype == 3: filters = 32 last_filter_multiplier = 8 #input layer net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0])) #conv layers net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) if mtype == 2: net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net) #dense layers net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.DropoutLayer(net, DROPOUT) net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.DropoutLayer(net, DROPOUT) #Classification Layer if MULTI_LABEL: net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1)) else: net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1)) print "...DONE!" #model stats print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS" print "MODEL HAS", l.count_params(net), "PARAMS" return net
Example #25
Source File: birdCLEF_test.py From BirdCLEF2017 with MIT License | 4 votes |
def buildModel(mtype=1): print "BUILDING MODEL TYPE", mtype, "..." #default settings (Model 1) filters = 64 first_stride = 2 last_filter_multiplier = 16 #specific model type settings (see working notes for details) if mtype == 2: first_stride = 1 elif mtype == 3: filters = 32 last_filter_multiplier = 8 #input layer net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0])) #conv layers net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) if mtype == 2: net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net) #dense layers net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) #Classification Layer if MULTI_LABEL: net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1)) else: net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1)) print "...DONE!" #model stats print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS" print "MODEL HAS", l.count_params(net), "PARAMS" return net
Example #26
Source File: birdCLEF_evaluate.py From BirdCLEF2017 with MIT License | 4 votes |
def buildModel(mtype=1): print "BUILDING MODEL TYPE", mtype, "..." #default settings (Model 1) filters = 64 first_stride = 2 last_filter_multiplier = 16 #specific model type settings (see working notes for details) if mtype == 2: first_stride = 1 elif mtype == 3: filters = 32 last_filter_multiplier = 8 #input layer net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0])) #conv layers net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) if mtype == 2: net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.MaxPool2DLayer(net, pool_size=2) print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net) #dense layers net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY)) #Classification Layer if MULTI_LABEL: net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1)) else: net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1)) print "...DONE!" #model stats print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS" print "MODEL HAS", l.count_params(net), "PARAMS" return net
Example #27
Source File: layers.py From Neural-Photo-Editor with MIT License | 4 votes |
def InceptionLayer(incoming,param_dict,block_name): branch = [0]*len(param_dict) # Loop across branches for i,dict in enumerate(param_dict): for j,style in enumerate(dict['style']): # Loop up branch branch[i] = C2D( incoming = branch[i] if j else incoming, num_filters = dict['num_filters'][j], filter_size = dict['filter_size'][j], pad = dict['pad'][j] if 'pad' in dict else None, stride = dict['stride'][j], W = initmethod('relu'), nonlinearity = dict['nonlinearity'][j], name = block_name+'_'+str(i)+'_'+str(j)) if style=='convolutional'\ else NL(lasagne.layers.dnn.Pool2DDNNLayer( incoming=incoming if j == 0 else branch[i], pool_size = dict['filter_size'][j], mode = dict['mode'][j], stride = dict['stride'][j], pad = dict['pad'][j], name = block_name+'_'+str(i)+'_'+str(j)), nonlinearity = dict['nonlinearity'][j]) if style=='pool'\ else lasagne.layers.DilatedConv2DLayer( incoming = lasagne.layers.PadLayer(incoming = incoming if j==0 else branch[i],width = dict['pad'][j]) if 'pad' in dict else incoming if j==0 else branch[i], num_filters = dict['num_filters'][j], filter_size = dict['filter_size'][j], dilation = dict['dilation'][j], # pad = dict['pad'][j] if 'pad' in dict else None, W = initmethod('relu'), nonlinearity = dict['nonlinearity'][j], name = block_name+'_'+str(i)+'_'+str(j)) if style== 'dilation'\ else DL( incoming = incoming if j==0 else branch[i], num_units = dict['num_filters'][j], W = initmethod('relu'), b = None, nonlinearity = dict['nonlinearity'][j], name = block_name+'_'+str(i)+'_'+str(j)) # Apply Batchnorm branch[i] = BN(branch[i],name = block_name+'_bnorm_'+str(i)+'_'+str(j)) if dict['bnorm'][j] else branch[i] # Concatenate Sublayers return CL(incomings=branch,name=block_name) # Convenience function to define an inception-style block with upscaling
Example #28
Source File: models.py From acnn with GNU General Public License v3.0 | 4 votes |
def arch_class_02(dim_desc, dim_labels, param_arch, logger): logger.info('Architecture:') # input layers desc = LL.InputLayer(shape=(None, dim_desc)) patch_op = LL.InputLayer(input_var=Tsp.csc_fmatrix('patch_op'), shape=(None, None)) logger.info(' input : dim = %d' % dim_desc) # layer 1: dimensionality reduction to 16 n_dim = 16 net = LL.DenseLayer(desc, n_dim) logger.info(' layer 1: FC%d' % n_dim) # layer 2: anisotropic convolution layer with 16 filters n_filters = 16 net = CL.GCNNLayer([net, patch_op], n_filters, nrings=5, nrays=16) string = ' layer 2: IC%d' % n_filters if param_arch['flag_batchnorm'] is True: net = LL.batch_norm(net) string = string + ' + batch normalization' logger.info(string) # layer 3: anisotropic convolution layer with 32 filters n_filters = 32 net = CL.GCNNLayer([net, patch_op], n_filters, nrings=5, nrays=16) string = ' layer 3: IC%d' % n_filters if param_arch['flag_batchnorm'] is True: net = LL.batch_norm(net) string = string + ' + batch normalization' logger.info(string) # layer 4: anisotropic convolution layer with 64 filters n_filters = 64 net = CL.GCNNLayer([net, patch_op], n_filters, nrings=5, nrays=16) string = ' layer 4: IC%d' % n_filters if param_arch['flag_batchnorm'] is True: net = LL.batch_norm(net) string = string + ' + batch normalization' logger.info(string) # layer 5: softmax layer producing a probability on the labels if param_arch['non_linearity'] == 'softmax': cla = LL.DenseLayer(net, dim_labels, nonlinearity=LN.softmax) string = ' layer 5: softmax' elif param_arch['non_linearity'] == 'log_softmax': cla = LL.DenseLayer(net, dim_labels, nonlinearity=log_softmax) string = ' layer 5: log-softmax' else: raise Exception('[e] the chosen non-linearity is not supported!') logger.info(string) # outputs return desc, patch_op, cla, net, logger
Example #29
Source File: models.py From acnn with GNU General Public License v3.0 | 4 votes |
def arch_class_00(dim_desc, dim_labels, param_arch, logger): logger.info('Architecture:') # input layers desc = LL.InputLayer(shape=(None, dim_desc)) patch_op = LL.InputLayer(input_var=Tsp.csc_fmatrix('patch_op'), shape=(None, None)) logger.info(' input : dim = %d' % dim_desc) # layer 1: dimensionality reduction to 16 n_dim = 16 net = LL.DenseLayer(desc, n_dim) logger.info(' layer 1: FC%d' % n_dim) # layer 2: anisotropic convolution layer with 16 filters n_filters = 16 net = CL.GCNNLayer([net, patch_op], n_filters, nrings=5, nrays=16) string = ' layer 2: IC%d' % n_filters if param_arch['flag_batchnorm'] is True: net = LL.batch_norm(net) string = string + ' + batch normalization' logger.info(string) # layer 3: anisotropic convolution layer with 32 filters n_filters = 32 net = CL.GCNNLayer([net, patch_op], n_filters, nrings=5, nrays=16) string = ' layer 3: IC%d' % n_filters if param_arch['flag_batchnorm'] is True: net = LL.batch_norm(net) string = string + ' + batch normalization' logger.info(string) # layer 4: anisotropic convolution layer with 64 filters n_filters = 64 net = CL.GCNNLayer([net, patch_op], n_filters, nrings=5, nrays=16) string = ' layer 4: IC%d' % n_filters if param_arch['flag_batchnorm'] is True: net = LL.batch_norm(net) string = string + ' + batch normalization' logger.info(string) # layer 5: fully connected layer with 256 filters n_dim = 256 net = LL.DenseLayer(net, n_dim) string = ' layer 5: FC%d' % n_dim if param_arch['flag_batchnorm'] is True: net = LL.batch_norm(net) string = string + ' + batch normalization' logger.info(string) # layer 6: softmax layer producing a probability on the labels if param_arch['flag_nonlinearity'] == 'softmax': cla = LL.DenseLayer(net, dim_labels, nonlinearity=LN.softmax) string = ' layer 6: softmax' elif param_arch['flag_nonlinearity'] == 'log_softmax': cla = LL.DenseLayer(net, dim_labels, nonlinearity=log_softmax) string = ' layer 6: log-softmax' else: raise Exception('[e] the chosen non-linearity is not supported!') logger.info(string) # outputs return desc, patch_op, cla, net, logger
Example #30
Source File: convnade.py From NADE with BSD 3-Clause "New" or "Revised" License | 4 votes |
def network(self): if self._network is not None: return self._network # Build the computational graph using a dummy input. import lasagne from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer from lasagne.layers import ElemwiseSumLayer, NonlinearityLayer, InputLayer, FlattenLayer, DenseLayer from lasagne.layers import batch_norm from lasagne.nonlinearities import rectify self._network_in = InputLayer(shape=(None, self.nb_channels,) + self.image_shape, input_var=None) network_out = [] if self.convnet_blueprint is not None: convnet_layers = [self._network_in] layer_blueprints = list(map(str.strip, self.convnet_blueprint.split("->"))) for i, layer_blueprint in enumerate(layer_blueprints, start=1): # eg. "64@3x3(valid) -> 64@3x3(full)" nb_filters, rest = layer_blueprint.split("@") filter_shape, rest = rest.split("(") nb_filters = int(nb_filters) filter_shape = tuple(map(int, filter_shape.split("x"))) pad = rest[:-1] preact = ConvLayer(convnet_layers[-1], num_filters=nb_filters, filter_size=filter_shape, stride=(1, 1), nonlinearity=None, pad=pad, W=lasagne.init.HeNormal(gain='relu'), name="layer_{}_conv".format(i)) if self.use_batch_norm: preact = batch_norm(preact) layer = NonlinearityLayer(preact, nonlinearity=rectify) convnet_layers.append(layer) network_out.append(FlattenLayer(preact)) if self.fullnet_blueprint is not None: fullnet_layers = [FlattenLayer(self._network_in)] layer_blueprints = list(map(str.strip, self.fullnet_blueprint.split("->"))) for i, layer_blueprint in enumerate(layer_blueprints, start=1): # e.g. "500 -> 500 -> 784" hidden_size = int(layer_blueprint) preact = DenseLayer(fullnet_layers[-1], num_units=hidden_size, nonlinearity=None, W=lasagne.init.HeNormal(gain='relu'), name="layer_{}_dense".format(i)) if self.use_batch_norm: preact = batch_norm(preact) layer = NonlinearityLayer(preact, nonlinearity=rectify) fullnet_layers.append(layer) network_out.append(preact) self._network = ElemwiseSumLayer(network_out) # TODO: sigmoid should be applied here instead of within loss function. print("Nb. of parameters in model: {}".format(lasagne.layers.count_params(self._network, trainable=True))) return self._network