Python lasagne.layers.dnn.Conv2DDNNLayer() Examples
The following are 30
code examples of lasagne.layers.dnn.Conv2DDNNLayer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
lasagne.layers.dnn
, or try the search function
.
Example #1
Source File: nn_hough.py From kaggle-heart with MIT License | 6 votes |
def __init__(self, incoming, radii, normalise=True, stride=(1, 1), pad=0, **kwargs): # Use predetermined Hough filters W = _create_hough_filters(2*np.max(radii)+1, radii, normalise=normalise) # Transform to correct shape and dtype W = W[:, np.newaxis, :, :].astype('float32') # remove biasses and nonlinearities b = None untie_biases = False nonlinearity = None flip_filters = True # doesn't matter super(Conv2DDNNLayer, self).__init__(incoming, W.shape[0], W.shape[-2:], stride, pad, untie_biases, W, b, nonlinearity, flip_filters, n=2, **kwargs) # Remove trainable tag for W self.params[self.W] = self.params[self.W].difference(set(['trainable']))
Example #2
Source File: art_it_up.py From ArtsyNetworks with MIT License | 5 votes |
def initialize_network(width): # initialize network - VGG19 style net = {} net['input'] = InputLayer((1, 3, width, width)) net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1) net['pool1'] = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad') net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1) net['pool2'] = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad') net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1) net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=1) net['pool3'] = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad') net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1) net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=1) net['pool4'] = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad') net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1) net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=1) net['pool5'] = PoolLayer(net['conv5_4'], 2, mode='average_exc_pad') return net
Example #3
Source File: benchmark_lasagne.py From vgg-benchmarks with MIT License | 5 votes |
def build_model(input_var): net = {} net['input'] = InputLayer((None, 3, 224, 224), input_var=input_var) net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1, flip_filters=False) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1, flip_filters=False) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1, flip_filters=False) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1, flip_filters=False) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1, flip_filters=False) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1, flip_filters=False) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1, flip_filters=False) net['pool3'] = PoolLayer(net['conv3_3'], 2) net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1, flip_filters=False) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1, flip_filters=False) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1, flip_filters=False) net['pool4'] = PoolLayer(net['conv4_3'], 2) net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1, flip_filters=False) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1, flip_filters=False) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1, flip_filters=False) net['pool5'] = PoolLayer(net['conv5_3'], 2) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5) net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=4096) net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5) net['fc8'] = DenseLayer(net['fc7_dropout'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) return net
Example #4
Source File: layers.py From Neural-Photo-Editor with MIT License | 5 votes |
def pd(num_layers=2,num_filters=32,filter_size=(3,3),pad=1,stride = (1,1),nonlinearity=elu,style='convolutional',bnorm=1,**kwargs): input_args = locals() input_args.pop('num_layers') return {key:entry if type(entry) is list else [entry]*num_layers for key,entry in input_args.iteritems()} # Possible Conv2DDNN convenience function. Remember to delete the C2D import at the top if you use this # def C2D(incoming = None, num_filters = 32, filter_size= [3,3],pad = 'same',stride = [1,1], W = initmethod('relu'),nonlinearity = elu,name = None): # return lasagne.layers.dnn.Conv2DDNNLayer(incoming,num_filters,filter_size,stride,pad,False,W,None,nonlinearity,False) # Shape-Preserving Gaussian Sample layer for latent vectors with spatial dimensions. # This is a holdover from an "old" (i.e. I abandoned it last month) idea.
Example #5
Source File: res_net_blocks.py From dcase_task2 with MIT License | 5 votes |
def ResNet_FullPreActivation(input_shape=(None, 3, PIXELS, PIXELS), input_var=None, n_classes=10, n=18): """ Adapted from https://github.com/Lasagne/Recipes/tree/master/papers/deep_residual_learning. Tweaked to be consistent with 'Identity Mappings in Deep Residual Networks', Kaiming He et al. 2016 (https://arxiv.org/abs/1603.05027) Formula to figure out depth: 6n + 2 """ # Building the network l_in = InputLayer(shape=input_shape, input_var=input_var) # first layer, output is 16 x 32 x 32 l = batch_norm(ConvLayer(l_in, num_filters=16, filter_size=(3, 3), stride=(1, 1), nonlinearity=rectify, pad='same', W=he_norm)) # first stack of residual blocks, output is 16 x 32 x 32 l = residual_block(l, first=True) for _ in range(1, n): l = residual_block(l) # second stack of residual blocks, output is 32 x 16 x 16 l = residual_block(l, increase_dim=True) for _ in range(1, n): l = residual_block(l) # third stack of residual blocks, output is 64 x 8 x 8 l = residual_block(l, increase_dim=True) for _ in range(1, n): l = residual_block(l) bn_post_conv = BatchNormLayer(l) bn_post_relu = NonlinearityLayer(bn_post_conv, rectify) # average pooling avg_pool = GlobalPoolLayer(bn_post_relu) # fully connected layer network = DenseLayer(avg_pool, num_units=n_classes, W=HeNormal(), nonlinearity=softmax) return network
Example #6
Source File: res_net_blocks.py From dcase_task2 with MIT License | 5 votes |
def residual_block(l, increase_dim=False, projection=True, first=False): """ Create a residual learning building block with two stacked 3x3 convlayers as in paper 'Identity Mappings in Deep Residual Networks', Kaiming He et al. 2016 (https://arxiv.org/abs/1603.05027) """ input_num_filters = l.output_shape[1] if increase_dim: first_stride = (2, 2) out_num_filters = input_num_filters * 2 else: first_stride = (1, 1) out_num_filters = input_num_filters if first: # hacky solution to keep layers correct bn_pre_relu = l else: # contains the BN -> ReLU portion, steps 1 to 2 bn_pre_conv = BatchNormLayer(l) bn_pre_relu = NonlinearityLayer(bn_pre_conv, rectify) # contains the weight -> BN -> ReLU portion, steps 3 to 5 conv_1 = batch_norm(ConvLayer(bn_pre_relu, num_filters=out_num_filters, filter_size=(3, 3), stride=first_stride, nonlinearity=rectify, pad='same', W=he_norm)) # contains the last weight portion, step 6 conv_2 = ConvLayer(conv_1, num_filters=out_num_filters, filter_size=(3, 3), stride=(1, 1), nonlinearity=None, pad='same', W=he_norm) # add shortcut connections if increase_dim: # projection shortcut, as option B in paper projection = ConvLayer(l, num_filters=out_num_filters, filter_size=(1, 1), stride=(2, 2), nonlinearity=None, pad='same', b=None) block = ElemwiseSumLayer([conv_2, projection]) else: block = ElemwiseSumLayer([conv_2, l]) return block
Example #7
Source File: Deopen_regression.py From Deopen with MIT License | 5 votes |
def create_network(): l = 1000 pool_size = 5 test_size1 = 13 test_size2 = 7 test_size3 = 5 kernel1 = 128 kernel2 = 128 kernel3 = 128 layer1 = InputLayer(shape=(None, 1, 4, l+1024)) layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1) layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1) layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2) layer2_f = FlattenLayer(layer2_3) layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1)) layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1)) layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1)) layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size)) layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2)) layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2)) layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2)) layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size)) layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3)) layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3)) layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3)) layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size)) layer14_d = DenseLayer(layer14, num_units= 256) layer3_2 = DenseLayer(layer2_f, num_units = 128) layer15 = ConcatLayer([layer14_d,layer3_2]) #layer16 = DropoutLayer(layer15,p=0.5) layer17 = DenseLayer(layer15, num_units=256) network = DenseLayer(layer17, num_units= 1, nonlinearity=None) return network #random search to initialize the weights
Example #8
Source File: Deopen_classification.py From Deopen with MIT License | 5 votes |
def create_network(): l = 1000 pool_size = 5 test_size1 = 13 test_size2 = 7 test_size3 = 5 kernel1 = 128 kernel2 = 128 kernel3 = 128 layer1 = InputLayer(shape=(None, 1, 4, l+1024)) layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1) layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1) layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2) layer2_f = FlattenLayer(layer2_3) layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1)) layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1)) layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1)) layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size)) layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2)) layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2)) layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2)) layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size)) layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3)) layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3)) layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3)) layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size)) layer14_d = DenseLayer(layer14, num_units= 256) layer3_2 = DenseLayer(layer2_f, num_units = 128) layer15 = ConcatLayer([layer14_d,layer3_2]) layer16 = DropoutLayer(layer15,p=0.5) layer17 = DenseLayer(layer16, num_units=256) network = DenseLayer(layer17, num_units= 2, nonlinearity=softmax) return network #random search to initialize the weights
Example #9
Source File: googlenet.py From Recipes with MIT License | 5 votes |
def build_inception_module(name, input_layer, nfilters): # nfilters: (pool_proj, 1x1, 3x3_reduce, 3x3, 5x5_reduce, 5x5) net = {} net['pool'] = PoolLayerDNN(input_layer, pool_size=3, stride=1, pad=1) net['pool_proj'] = ConvLayer( net['pool'], nfilters[0], 1, flip_filters=False) net['1x1'] = ConvLayer(input_layer, nfilters[1], 1, flip_filters=False) net['3x3_reduce'] = ConvLayer( input_layer, nfilters[2], 1, flip_filters=False) net['3x3'] = ConvLayer( net['3x3_reduce'], nfilters[3], 3, pad=1, flip_filters=False) net['5x5_reduce'] = ConvLayer( input_layer, nfilters[4], 1, flip_filters=False) net['5x5'] = ConvLayer( net['5x5_reduce'], nfilters[5], 5, pad=2, flip_filters=False) net['output'] = ConcatLayer([ net['1x1'], net['3x3'], net['5x5'], net['pool_proj'], ]) return {'{}/{}'.format(name, k): v for k, v in net.items()}
Example #10
Source File: j0_dense3.py From kaggle-heart with MIT License | 4 votes |
def build_model(): ################# # Regular model # ################# l0 = InputLayer(data_sizes["sliced:data:ax"]) l0r = reshape(l0, (-1, 1, ) + data_sizes["sliced:data:ax"][-2:]) # first do the segmentation steps l1a = ConvLayer(l0r, num_filters=32, filter_size=(3, 3), pad='same', W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1), ) l1b = ConvLayer(l1a, num_filters=32, filter_size=(3, 3), pad='same', W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1), ) l1c = ConvLayer(l1b, num_filters=64, filter_size=(3, 3), pad='same', W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1), ) l1f = ConvLayer(l1c, num_filters=1, filter_size=(3, 3), pad='same', W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1), nonlinearity=lasagne.nonlinearities.sigmoid) l_1r = reshape(l1f, data_sizes["sliced:data:ax"]) l_d3 = lasagne.layers.DenseLayer(l_1r, num_units=2, nonlinearity=lasagne.nonlinearities.identity) l_systole = MuLogSigmaErfLayer(l_d3) l_d3b = lasagne.layers.DenseLayer(l_1r, num_units=2, nonlinearity=lasagne.nonlinearities.identity) l_diastole = MuLogSigmaErfLayer(l_d3b) return { "inputs":{ "sliced:data:ax": l0 }, "outputs":{ "systole": l_systole, "diastole": l_diastole } }
Example #11
Source File: convnade.py From NADE with BSD 3-Clause "New" or "Revised" License | 4 votes |
def network(self): if self._network is not None: return self._network # Build the computational graph using a dummy input. import lasagne from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer from lasagne.layers import ElemwiseSumLayer, NonlinearityLayer, ExpressionLayer, PadLayer, InputLayer, FlattenLayer, SliceLayer # from lasagne.layers import batch_norm from lasagne.nonlinearities import rectify self._network_in = InputLayer(shape=(None, self.nb_channels,) + self.image_shape, input_var=None) convnet_layers = [self._network_in] convnet_layers_preact = [self._network_in] layer_blueprints = list(map(str.strip, self.convnet_blueprint.split("->"))) for i, layer_blueprint in enumerate(layer_blueprints, start=1): "64@3x3(valid) -> 64@3x3(full)" nb_filters, rest = layer_blueprint.split("@") filter_shape, rest = rest.split("(") nb_filters = int(nb_filters) filter_shape = tuple(map(int, filter_shape.split("x"))) pad = rest[:-1] preact = ConvLayer(convnet_layers[-1], num_filters=nb_filters, filter_size=filter_shape, stride=(1, 1), nonlinearity=None, pad=pad, W=lasagne.init.HeNormal(gain='relu')) if i > len(layer_blueprints) // 2 and i != len(layer_blueprints): shortcut = convnet_layers_preact[len(layer_blueprints)-i] if i == len(layer_blueprints): if preact.output_shape[1] != shortcut.output_shape[1]: shortcut = SliceLayer(shortcut, slice(0, 1), axis=1) else: raise NameError("Something is wrong.") print("Shortcut from {} to {}".format(len(layer_blueprints)-i, i)) preact = ElemwiseSumLayer([preact, shortcut]) convnet_layers_preact.append(preact) layer = NonlinearityLayer(preact, nonlinearity=rectify) convnet_layers.append(layer) self._network = FlattenLayer(preact) # network = DenseLayer(l, num_units=int(np.prod(self.image_shape)), # W=lasagne.init.HeNormal(), # nonlinearity=None) print("Nb. of parameters in model: {}".format(lasagne.layers.count_params(self._network, trainable=True))) return self._network
Example #12
Source File: q_network.py From Q-Optimality-Tightening with MIT License | 4 votes |
def build_nature_network_dnn(self, input_width, input_height, output_dim, num_frames, batch_size): """ Build a large network consistent with the DeepMind Nature paper. """ from lasagne.layers import dnn l_in = lasagne.layers.InputLayer( shape=(None, num_frames, input_width, input_height) ) l_conv1 = dnn.Conv2DDNNLayer( l_in, num_filters=32, filter_size=(8, 8), stride=(4, 4), nonlinearity=lasagne.nonlinearities.rectify, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1) ) l_conv2 = dnn.Conv2DDNNLayer( l_conv1, num_filters=64, filter_size=(4, 4), stride=(2, 2), nonlinearity=lasagne.nonlinearities.rectify, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1) ) l_conv3 = dnn.Conv2DDNNLayer( l_conv2, num_filters=64, filter_size=(3, 3), stride=(1, 1), nonlinearity=lasagne.nonlinearities.rectify, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1) ) l_hidden1 = lasagne.layers.DenseLayer( l_conv3, num_units=512, nonlinearity=lasagne.nonlinearities.rectify, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1) ) l_out = lasagne.layers.DenseLayer( l_hidden1, num_units=output_dim, nonlinearity=None, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1) ) return l_out
Example #13
Source File: initial_regular.py From kaggle-heart with MIT License | 4 votes |
def build_model(): ################# # Regular model # ################# l0 = InputLayer(data_sizes["sliced:data:ax"]) l0r = reshape(l0, (-1, 1, ) + data_sizes["sliced:data:ax"][-2:]) # first do the segmentation steps l1a = ConvLayer(l0r, num_filters=32, filter_size=(3, 3), pad='same', W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1), ) l1b = ConvLayer(l1a, num_filters=32, filter_size=(3, 3), pad='same', W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1), ) l1c = ConvLayer(l1b, num_filters=64, filter_size=(3, 3), pad='same', W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1), ) l1f = ConvLayer(l1c, num_filters=1, filter_size=(3, 3), pad='same', W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1), nonlinearity=lasagne.nonlinearities.sigmoid) l_1r = reshape(l1f, data_sizes["sliced:data:ax"]) l_d3 = lasagne.layers.DenseLayer(l_1r, num_units=2) l_systole = MuLogSigmaErfLayer(l_d3) l_d3b = lasagne.layers.DenseLayer(l_1r, num_units=2) l_diastole = MuLogSigmaErfLayer(l_d3b) return { "inputs":{ "sliced:data:ax": l0, }, "outputs":{ "systole": l_systole, "diastole": l_diastole } }
Example #14
Source File: initial_sunny.py From kaggle-heart with MIT License | 4 votes |
def build_model(): l0 = InputLayer(data_sizes["sunny"]) l1a = ConvLayer(l0, num_filters=32, filter_size=(3, 3), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l1b = ConvLayer(l1a, num_filters=32, filter_size=(3, 3), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l1c = ConvLayer(l1b, num_filters=32, filter_size=(3, 3), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l1 = MaxPoolLayer(l1c, pool_size=(2, 2)) l2a = ConvLayer(l1, num_filters=32, filter_size=(3, 3), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l2b = ConvLayer(l2a, num_filters=32, filter_size=(3, 3), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l2c = ConvLayer(l2b, num_filters=32, filter_size=(3, 3), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l3 = MaxPoolLayer(l2c, pool_size=(3, 3)) l4a = ConvLayer(l3, num_filters=32, filter_size=(4, 4), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l4b = ConvLayer(l4a, num_filters=32, filter_size=(3, 3), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l4c = ConvLayer(l4b, num_filters=32, filter_size=(3, 3), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l5a = ConvLayer(l4c, num_filters=256, filter_size=(1, 1), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l5b = ConvLayer(l5a, num_filters=256, filter_size=(1, 1), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l5c = ConvLayer(l5b, num_filters=1, filter_size=(1, 1), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1), nonlinearity=lasagne.nonlinearities.sigmoid) l_final = lasagne.layers.FlattenLayer(l5c, outdim=2) return { "inputs":{ "sunny": l0 }, "outputs":{ "segmentation": l_final, "top": l_final } }
Example #15
Source File: layers.py From Neural-Photo-Editor with MIT License | 4 votes |
def InceptionLayer(incoming,param_dict,block_name): branch = [0]*len(param_dict) # Loop across branches for i,dict in enumerate(param_dict): for j,style in enumerate(dict['style']): # Loop up branch branch[i] = C2D( incoming = branch[i] if j else incoming, num_filters = dict['num_filters'][j], filter_size = dict['filter_size'][j], pad = dict['pad'][j] if 'pad' in dict else None, stride = dict['stride'][j], W = initmethod('relu'), nonlinearity = dict['nonlinearity'][j], name = block_name+'_'+str(i)+'_'+str(j)) if style=='convolutional'\ else NL(lasagne.layers.dnn.Pool2DDNNLayer( incoming=incoming if j == 0 else branch[i], pool_size = dict['filter_size'][j], mode = dict['mode'][j], stride = dict['stride'][j], pad = dict['pad'][j], name = block_name+'_'+str(i)+'_'+str(j)), nonlinearity = dict['nonlinearity'][j]) if style=='pool'\ else lasagne.layers.DilatedConv2DLayer( incoming = lasagne.layers.PadLayer(incoming = incoming if j==0 else branch[i],width = dict['pad'][j]) if 'pad' in dict else incoming if j==0 else branch[i], num_filters = dict['num_filters'][j], filter_size = dict['filter_size'][j], dilation = dict['dilation'][j], # pad = dict['pad'][j] if 'pad' in dict else None, W = initmethod('relu'), nonlinearity = dict['nonlinearity'][j], name = block_name+'_'+str(i)+'_'+str(j)) if style== 'dilation'\ else DL( incoming = incoming if j==0 else branch[i], num_units = dict['num_filters'][j], W = initmethod('relu'), b = None, nonlinearity = dict['nonlinearity'][j], name = block_name+'_'+str(i)+'_'+str(j)) # Apply Batchnorm branch[i] = BN(branch[i],name = block_name+'_bnorm_'+str(i)+'_'+str(j)) if dict['bnorm'][j] else branch[i] # Concatenate Sublayers return CL(incomings=branch,name=block_name) # Convenience function to define an inception-style block with upscaling
Example #16
Source File: layers.py From Neural-Photo-Editor with MIT License | 4 votes |
def MDCL(incoming,num_filters,scales,name,dnn=True): if dnn: from lasagne.layers.dnn import Conv2DDNNLayer as C2D # W initialization method--this should also work as Orthogonal('relu'), but I have yet to validate that as thoroughly. winit = initmethod(0.02) # Initialization method for the coefficients sinit = lasagne.init.Constant(1.0/(1+len(scales))) # Number of incoming channels ni =lasagne.layers.get_output_shape(incoming)[1] # Weight parameter--the primary parameter for this block W = theano.shared(lasagne.utils.floatX(winit.sample((num_filters,lasagne.layers.get_output_shape(incoming)[1],3,3))),name=name+'W') # Primary Convolution Layer--No Dilation n = C2D(incoming = incoming, num_filters = num_filters, filter_size = [3,3], stride = [1,1], pad = (1,1), W = W*theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)), name+'_coeff_base').dimshuffle(0,'x','x','x'), # Note the broadcasting dimshuffle for the num_filter scalars. b = None, nonlinearity = None, name = name+'base' ) # List of remaining layers. This should probably just all be concatenated into a single list rather than being a separate deal. nd = [] for i,scale in enumerate(scales): # I don't think 0 dilation is technically defined (or if it is it's just the regular filter) but I use it here as a convenient keyword to grab the 1x1 mean conv. if scale==0: nd.append(C2D(incoming = incoming, num_filters = num_filters, filter_size = [1,1], stride = [1,1], pad = (0,0), W = T.mean(W,axis=[2,3]).dimshuffle(0,1,'x','x')*theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)), name+'_coeff_1x1').dimshuffle(0,'x','x','x'), b = None, nonlinearity = None, name = name+str(scale))) # Note the dimshuffles in this layer--these are critical as the current DilatedConv2D implementation uses a backward pass. else: nd.append(lasagne.layers.DilatedConv2DLayer(incoming = lasagne.layers.PadLayer(incoming = incoming, width=(scale,scale)), num_filters = num_filters, filter_size = [3,3], dilation=(scale,scale), W = W.dimshuffle(1,0,2,3)*theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)), name+'_coeff_'+str(scale)).dimshuffle('x',0,'x','x'), b = None, nonlinearity = None, name = name+str(scale))) return ESL(nd+[n]) # MDC-based Upsample Layer. # This is a prototype I don't make use of extensively. It's operational but it doesn't seem to improve results yet.
Example #17
Source File: q_network.py From deep_q_rl with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_nips_network_dnn(self, input_width, input_height, output_dim, num_frames, batch_size): """ Build a network consistent with the 2013 NIPS paper. """ # Import it here, in case it isn't installed. from lasagne.layers import dnn l_in = lasagne.layers.InputLayer( shape=(None, num_frames, input_width, input_height) ) l_conv1 = dnn.Conv2DDNNLayer( l_in, num_filters=16, filter_size=(8, 8), stride=(4, 4), nonlinearity=lasagne.nonlinearities.rectify, #W=lasagne.init.HeUniform(), W=lasagne.init.Normal(.01), b=lasagne.init.Constant(.1) ) l_conv2 = dnn.Conv2DDNNLayer( l_conv1, num_filters=32, filter_size=(4, 4), stride=(2, 2), nonlinearity=lasagne.nonlinearities.rectify, #W=lasagne.init.HeUniform(), W=lasagne.init.Normal(.01), b=lasagne.init.Constant(.1) ) l_hidden1 = lasagne.layers.DenseLayer( l_conv2, num_units=256, nonlinearity=lasagne.nonlinearities.rectify, #W=lasagne.init.HeUniform(), W=lasagne.init.Normal(.01), b=lasagne.init.Constant(.1) ) l_out = lasagne.layers.DenseLayer( l_hidden1, num_units=output_dim, nonlinearity=None, #W=lasagne.init.HeUniform(), W=lasagne.init.Normal(.01), b=lasagne.init.Constant(.1) ) return l_out
Example #18
Source File: q_network.py From deep_q_rl with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_nature_network_dnn(self, input_width, input_height, output_dim, num_frames, batch_size): """ Build a large network consistent with the DeepMind Nature paper. """ from lasagne.layers import dnn l_in = lasagne.layers.InputLayer( shape=(None, num_frames, input_width, input_height) ) l_conv1 = dnn.Conv2DDNNLayer( l_in, num_filters=32, filter_size=(8, 8), stride=(4, 4), nonlinearity=lasagne.nonlinearities.rectify, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1) ) l_conv2 = dnn.Conv2DDNNLayer( l_conv1, num_filters=64, filter_size=(4, 4), stride=(2, 2), nonlinearity=lasagne.nonlinearities.rectify, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1) ) l_conv3 = dnn.Conv2DDNNLayer( l_conv2, num_filters=64, filter_size=(3, 3), stride=(1, 1), nonlinearity=lasagne.nonlinearities.rectify, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1) ) l_hidden1 = lasagne.layers.DenseLayer( l_conv3, num_units=512, nonlinearity=lasagne.nonlinearities.rectify, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1) ) l_out = lasagne.layers.DenseLayer( l_hidden1, num_units=output_dim, nonlinearity=None, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1) ) return l_out
Example #19
Source File: vgg16_full.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_model(): net = {} input_var = theano.tensor.tensor4('input_var'); net['input'] = InputLayer((None, 3, 224, 224), input_var=input_var) net['conv1_1'] = ConvLayer( net['input'], 64, 3, pad=1, flip_filters=False) net['conv1_2'] = ConvLayer( net['conv1_1'], 64, 3, pad=1, flip_filters=False) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer( net['pool1'], 128, 3, pad=1, flip_filters=False) net['conv2_2'] = ConvLayer( net['conv2_1'], 128, 3, pad=1, flip_filters=False) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer( net['pool2'], 256, 3, pad=1, flip_filters=False) net['conv3_2'] = ConvLayer( net['conv3_1'], 256, 3, pad=1, flip_filters=False) net['conv3_3'] = ConvLayer( net['conv3_2'], 256, 3, pad=1, flip_filters=False) net['pool3'] = PoolLayer(net['conv3_3'], 2) net['conv4_1'] = ConvLayer( net['pool3'], 512, 3, pad=1, flip_filters=False) net['conv4_2'] = ConvLayer( net['conv4_1'], 512, 3, pad=1, flip_filters=False) net['conv4_3'] = ConvLayer( net['conv4_2'], 512, 3, pad=1, flip_filters=False) net['pool4'] = PoolLayer(net['conv4_3'], 2) net['conv5_1'] = ConvLayer( net['pool4'], 512, 3, pad=1, flip_filters=False) net['conv5_2'] = ConvLayer( net['conv5_1'], 512, 3, pad=1, flip_filters=False) net['conv5_3'] = ConvLayer( net['conv5_2'], 512, 3, pad=1, flip_filters=False) net['pool5'] = PoolLayer(net['conv5_3'], 2) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5) net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=4096) net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5) net['fc8'] = DenseLayer( net['fc7_dropout'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) output_layer = net['prob']; return net, output_layer, input_var;
Example #20
Source File: vgg16.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_model(): net = {} input_var = theano.tensor.tensor4('input_var'); net['input'] = InputLayer((None, 3, 224, 224), input_var=input_var) net['conv1_1'] = ConvLayer( net['input'], 64, 3, pad=1, flip_filters=False) net['conv1_2'] = ConvLayer( net['conv1_1'], 64, 3, pad=1, flip_filters=False) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer( net['pool1'], 128, 3, pad=1, flip_filters=False) net['conv2_2'] = ConvLayer( net['conv2_1'], 128, 3, pad=1, flip_filters=False) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer( net['pool2'], 256, 3, pad=1, flip_filters=False) net['conv3_2'] = ConvLayer( net['conv3_1'], 256, 3, pad=1, flip_filters=False) net['conv3_3'] = ConvLayer( net['conv3_2'], 256, 3, pad=1, flip_filters=False) net['pool3'] = PoolLayer(net['conv3_3'], 2) net['conv4_1'] = ConvLayer( net['pool3'], 512, 3, pad=1, flip_filters=False) net['conv4_2'] = ConvLayer( net['conv4_1'], 512, 3, pad=1, flip_filters=False) net['conv4_3'] = ConvLayer( net['conv4_2'], 512, 3, pad=1, flip_filters=False) net['pool4'] = PoolLayer(net['conv4_3'], 2) net['conv5_1'] = ConvLayer( net['pool4'], 512, 3, pad=1, flip_filters=False) net['conv5_2'] = ConvLayer( net['conv5_1'], 512, 3, pad=1, flip_filters=False) net['conv5_3'] = ConvLayer( net['conv5_2'], 512, 3, pad=1, flip_filters=False) net['pool5'] = PoolLayer(net['conv5_3'], 2) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5) net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=4096) net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5) net['fc8'] = DenseLayer( net['fc7_dropout'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) output_layer = net['prob']; return net, output_layer, input_var;
Example #21
Source File: vgg16_lymph.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_model(): net = {} net['input'] = InputLayer((None, 3, 224, 224)) net['conv1_1'] = ConvLayer( net['input'], 64, 3, pad=1, flip_filters=False) net['conv1_2'] = ConvLayer( net['conv1_1'], 64, 3, pad=1, flip_filters=False) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer( net['pool1'], 128, 3, pad=1, flip_filters=False) net['conv2_2'] = ConvLayer( net['conv2_1'], 128, 3, pad=1, flip_filters=False) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer( net['pool2'], 256, 3, pad=1, flip_filters=False) net['conv3_2'] = ConvLayer( net['conv3_1'], 256, 3, pad=1, flip_filters=False) net['conv3_3'] = ConvLayer( net['conv3_2'], 256, 3, pad=1, flip_filters=False) net['pool3'] = PoolLayer(net['conv3_3'], 2) net['conv4_1'] = ConvLayer( net['pool3'], 512, 3, pad=1, flip_filters=False) net['conv4_2'] = ConvLayer( net['conv4_1'], 512, 3, pad=1, flip_filters=False) net['conv4_3'] = ConvLayer( net['conv4_2'], 512, 3, pad=1, flip_filters=False) net['pool4'] = PoolLayer(net['conv4_3'], 2) net['conv5_1'] = ConvLayer( net['pool4'], 512, 3, pad=1, flip_filters=False) net['conv5_2'] = ConvLayer( net['conv5_1'], 512, 3, pad=1, flip_filters=False) net['conv5_3'] = ConvLayer( net['conv5_2'], 512, 3, pad=1, flip_filters=False) net['pool5'] = PoolLayer(net['conv5_3'], 2) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5) net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=4096) net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5) net['fc8'] = DenseLayer( net['fc7_dropout'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) output_layer = net['prob']; return output_layer
Example #22
Source File: cifar10_nin.py From Recipes with MIT License | 4 votes |
def build_model(): net = {} net['input'] = InputLayer((None, 3, 32, 32)) net['conv1'] = ConvLayer(net['input'], num_filters=192, filter_size=5, pad=2, flip_filters=False) net['cccp1'] = ConvLayer( net['conv1'], num_filters=160, filter_size=1, flip_filters=False) net['cccp2'] = ConvLayer( net['cccp1'], num_filters=96, filter_size=1, flip_filters=False) net['pool1'] = PoolLayer(net['cccp2'], pool_size=3, stride=2, mode='max', ignore_border=False) net['drop3'] = DropoutLayer(net['pool1'], p=0.5) net['conv2'] = ConvLayer(net['drop3'], num_filters=192, filter_size=5, pad=2, flip_filters=False) net['cccp3'] = ConvLayer( net['conv2'], num_filters=192, filter_size=1, flip_filters=False) net['cccp4'] = ConvLayer( net['cccp3'], num_filters=192, filter_size=1, flip_filters=False) net['pool2'] = PoolLayer(net['cccp4'], pool_size=3, stride=2, mode='average_exc_pad', ignore_border=False) net['drop6'] = DropoutLayer(net['pool2'], p=0.5) net['conv3'] = ConvLayer(net['drop6'], num_filters=192, filter_size=3, pad=1, flip_filters=False) net['cccp5'] = ConvLayer( net['conv3'], num_filters=192, filter_size=1, flip_filters=False) net['cccp6'] = ConvLayer( net['cccp5'], num_filters=10, filter_size=1, flip_filters=False) net['pool3'] = PoolLayer(net['cccp6'], pool_size=8, mode='average_exc_pad', ignore_border=False) net['output'] = FlattenLayer(net['pool3']) return net
Example #23
Source File: convnade.py From NADE with BSD 3-Clause "New" or "Revised" License | 4 votes |
def network(self): if self._network is not None: return self._network # Build the computational graph using a dummy input. import lasagne from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer from lasagne.layers import ElemwiseSumLayer, NonlinearityLayer, InputLayer, FlattenLayer, DenseLayer from lasagne.layers import batch_norm from lasagne.nonlinearities import rectify self._network_in = InputLayer(shape=(None, self.nb_channels,) + self.image_shape, input_var=None) network_out = [] if self.convnet_blueprint is not None: convnet_layers = [self._network_in] layer_blueprints = list(map(str.strip, self.convnet_blueprint.split("->"))) for i, layer_blueprint in enumerate(layer_blueprints, start=1): # eg. "64@3x3(valid) -> 64@3x3(full)" nb_filters, rest = layer_blueprint.split("@") filter_shape, rest = rest.split("(") nb_filters = int(nb_filters) filter_shape = tuple(map(int, filter_shape.split("x"))) pad = rest[:-1] preact = ConvLayer(convnet_layers[-1], num_filters=nb_filters, filter_size=filter_shape, stride=(1, 1), nonlinearity=None, pad=pad, W=lasagne.init.HeNormal(gain='relu'), name="layer_{}_conv".format(i)) if self.use_batch_norm: preact = batch_norm(preact) layer = NonlinearityLayer(preact, nonlinearity=rectify) convnet_layers.append(layer) network_out.append(FlattenLayer(preact)) if self.fullnet_blueprint is not None: fullnet_layers = [FlattenLayer(self._network_in)] layer_blueprints = list(map(str.strip, self.fullnet_blueprint.split("->"))) for i, layer_blueprint in enumerate(layer_blueprints, start=1): # e.g. "500 -> 500 -> 784" hidden_size = int(layer_blueprint) preact = DenseLayer(fullnet_layers[-1], num_units=hidden_size, nonlinearity=None, W=lasagne.init.HeNormal(gain='relu'), name="layer_{}_dense".format(i)) if self.use_batch_norm: preact = batch_norm(preact) layer = NonlinearityLayer(preact, nonlinearity=rectify) fullnet_layers.append(layer) network_out.append(preact) self._network = ElemwiseSumLayer(network_out) # TODO: sigmoid should be applied here instead of within loss function. print("Nb. of parameters in model: {}".format(lasagne.layers.count_params(self._network, trainable=True))) return self._network
Example #24
Source File: q_network.py From Model-Free-Episodic-Control with MIT License | 4 votes |
def build_nature_network_dnn(self, input_width, input_height, output_dim, num_frames, batch_size): """ Build a large network consistent with the DeepMind Nature paper. """ from lasagne.layers import dnn l_in = lasagne.layers.InputLayer( shape=(None, num_frames, input_width, input_height) ) l_conv1 = dnn.Conv2DDNNLayer( l_in, num_filters=32, filter_size=(8, 8), stride=(4, 4), nonlinearity=lasagne.nonlinearities.rectify, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1) ) l_conv2 = dnn.Conv2DDNNLayer( l_conv1, num_filters=64, filter_size=(4, 4), stride=(2, 2), nonlinearity=lasagne.nonlinearities.rectify, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1) ) l_conv3 = dnn.Conv2DDNNLayer( l_conv2, num_filters=64, filter_size=(3, 3), stride=(1, 1), nonlinearity=lasagne.nonlinearities.rectify, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1) ) l_hidden1 = lasagne.layers.DenseLayer( l_conv3, num_units=512, nonlinearity=lasagne.nonlinearities.rectify, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1) ) l_out = lasagne.layers.DenseLayer( l_hidden1, num_units=output_dim, nonlinearity=None, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1) ) return l_out
Example #25
Source File: res_net_blocks.py From dcase_task2 with MIT License | 4 votes |
def ResNet_BottleNeck_FullPreActivation(input_shape=(None, 3, PIXELS, PIXELS), input_var=None, n_classes=10, n=18): ''' Adapted from https://github.com/Lasagne/Recipes/tree/master/papers/deep_residual_learning. Tweaked to be consistent with 'Identity Mappings in Deep Residual Networks', Kaiming He et al. 2016 (https://arxiv.org/abs/1603.05027) Judging from https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua. Number of filters go 16 -> 64 -> 128 -> 256 Forumala to figure out depth: 9n + 2 ''' # Building the network l_in = InputLayer(shape=input_shape, input_var=input_var) # first layer, output is 16x16x16 l = batch_norm(ConvLayer(l_in, num_filters=16, filter_size=(3, 3), stride=(1, 1), nonlinearity=rectify, pad='same', W=he_norm)) # first stack of residual blocks, output is 64x16x16 l = residual_bottleneck_block(l, first=True) for _ in range(1, n): l = residual_bottleneck_block(l) # second stack of residual blocks, output is 128x8x8 l = residual_bottleneck_block(l, increase_dim=True) for _ in range(1, n): l = residual_bottleneck_block(l) # third stack of residual blocks, output is 256x4x4 l = residual_bottleneck_block(l, increase_dim=True) for _ in range(1, n): l = residual_bottleneck_block(l) bn_post_conv = BatchNormLayer(l) bn_post_relu = NonlinearityLayer(bn_post_conv, rectify) # average pooling avg_pool = GlobalPoolLayer(bn_post_relu) # fully connected layer network = DenseLayer(avg_pool, num_units=n_classes, W=HeNormal(), nonlinearity=softmax) return network
Example #26
Source File: res_net_blocks.py From dcase_task2 with MIT License | 4 votes |
def residual_wide_block(l, increase_dim=False, projection=True, first=False, filters=16): """ Create a residual learning building block with two stacked 3x3 conv layers as in paper """ if increase_dim: first_stride = (2, 2) else: first_stride = (1, 1) if first: # hacky solution to keep layers correct bn_pre_relu = l else: # contains the BN -> ReLU portion, steps 1 to 2 bn_pre_conv = BatchNormLayer(l) bn_pre_relu = NonlinearityLayer(bn_pre_conv, rectify) # contains the weight -> BN -> ReLU portion, steps 3 to 5 conv_1 = batch_norm( ConvLayer(bn_pre_relu, num_filters=filters, filter_size=(3, 3), stride=first_stride, nonlinearity=rectify, pad='same', W=he_norm)) dropout = DropoutLayer(conv_1, p=0.3) # contains the last weight portion, step 6 conv_2 = ConvLayer(dropout, num_filters=filters, filter_size=(3, 3), stride=(1, 1), nonlinearity=None, pad='same', W=he_norm) # add shortcut connections if increase_dim: # projection shortcut, as option B in paper projection = ConvLayer(l, num_filters=filters, filter_size=(1, 1), stride=(2, 2), nonlinearity=None, pad='same', b=None) block = ElemwiseSumLayer([conv_2, projection]) elif first: # projection shortcut, as option B in paper projection = ConvLayer(l, num_filters=filters, filter_size=(1, 1), stride=(1, 1), nonlinearity=None, pad='same', b=None) block = ElemwiseSumLayer([conv_2, projection]) else: block = ElemwiseSumLayer([conv_2, l]) return block
Example #27
Source File: res_net_blocks.py From dcase_task2 with MIT License | 4 votes |
def residual_bottleneck_block(l, increase_dim=False, first=False): """ Create a residual learning building block with two stacked 3x3 conv layers as in paper 'Identity Mappings in Deep Residual Networks', Kaiming He et al. 2016 (https://arxiv.org/abs/1603.05027) """ input_num_filters = l.output_shape[1] if increase_dim: first_stride = (2, 2) out_num_filters = input_num_filters * 2 else: first_stride = (1, 1) out_num_filters = input_num_filters if first: # hacky solution to keep layers correct bn_pre_relu = l out_num_filters = out_num_filters * 4 else: # contains the BN -> ReLU portion, steps 1 to 2 bn_pre_conv = BatchNormLayer(l) bn_pre_relu = NonlinearityLayer(bn_pre_conv, rectify) bottleneck_filters = out_num_filters / 4 # contains the weight -> BN -> ReLU portion, steps 3 to 5 conv_1 = batch_norm( ConvLayer(bn_pre_relu, num_filters=bottleneck_filters, filter_size=(1, 1), stride=(1, 1), nonlinearity=rectify, pad='same', W=he_norm)) conv_2 = batch_norm( ConvLayer(conv_1, num_filters=bottleneck_filters, filter_size=(3, 3), stride=first_stride, nonlinearity=rectify, pad='same', W=he_norm)) # contains the last weight portion, step 6 conv_3 = ConvLayer(conv_2, num_filters=out_num_filters, filter_size=(1, 1), stride=(1, 1), nonlinearity=None, pad='same', W=he_norm) if increase_dim: # projection shortcut, as option B in paper projection = ConvLayer(l, num_filters=out_num_filters, filter_size=(1, 1), stride=(2, 2), nonlinearity=None, pad='same', b=None) block = ElemwiseSumLayer([conv_3, projection]) elif first: # projection shortcut, as option B in paper projection = ConvLayer(l, num_filters=out_num_filters, filter_size=(1, 1), stride=(1, 1), nonlinearity=None, pad='same', b=None) block = ElemwiseSumLayer([conv_3, projection]) else: block = ElemwiseSumLayer([conv_3, l]) return block
Example #28
Source File: vgg19.py From Recipes with MIT License | 4 votes |
def build_model(): net = {} net['input'] = InputLayer((None, 3, 224, 224)) net['conv1_1'] = ConvLayer( net['input'], 64, 3, pad=1, flip_filters=False) net['conv1_2'] = ConvLayer( net['conv1_1'], 64, 3, pad=1, flip_filters=False) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer( net['pool1'], 128, 3, pad=1, flip_filters=False) net['conv2_2'] = ConvLayer( net['conv2_1'], 128, 3, pad=1, flip_filters=False) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer( net['pool2'], 256, 3, pad=1, flip_filters=False) net['conv3_2'] = ConvLayer( net['conv3_1'], 256, 3, pad=1, flip_filters=False) net['conv3_3'] = ConvLayer( net['conv3_2'], 256, 3, pad=1, flip_filters=False) net['conv3_4'] = ConvLayer( net['conv3_3'], 256, 3, pad=1, flip_filters=False) net['pool3'] = PoolLayer(net['conv3_4'], 2) net['conv4_1'] = ConvLayer( net['pool3'], 512, 3, pad=1, flip_filters=False) net['conv4_2'] = ConvLayer( net['conv4_1'], 512, 3, pad=1, flip_filters=False) net['conv4_3'] = ConvLayer( net['conv4_2'], 512, 3, pad=1, flip_filters=False) net['conv4_4'] = ConvLayer( net['conv4_3'], 512, 3, pad=1, flip_filters=False) net['pool4'] = PoolLayer(net['conv4_4'], 2) net['conv5_1'] = ConvLayer( net['pool4'], 512, 3, pad=1, flip_filters=False) net['conv5_2'] = ConvLayer( net['conv5_1'], 512, 3, pad=1, flip_filters=False) net['conv5_3'] = ConvLayer( net['conv5_2'], 512, 3, pad=1, flip_filters=False) net['conv5_4'] = ConvLayer( net['conv5_3'], 512, 3, pad=1, flip_filters=False) net['pool5'] = PoolLayer(net['conv5_4'], 2) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5) net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=4096) net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5) net['fc8'] = DenseLayer( net['fc7_dropout'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) return net
Example #29
Source File: vgg16.py From Recipes with MIT License | 4 votes |
def build_model(): net = {} net['input'] = InputLayer((None, 3, 224, 224)) net['conv1_1'] = ConvLayer( net['input'], 64, 3, pad=1, flip_filters=False) net['conv1_2'] = ConvLayer( net['conv1_1'], 64, 3, pad=1, flip_filters=False) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer( net['pool1'], 128, 3, pad=1, flip_filters=False) net['conv2_2'] = ConvLayer( net['conv2_1'], 128, 3, pad=1, flip_filters=False) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer( net['pool2'], 256, 3, pad=1, flip_filters=False) net['conv3_2'] = ConvLayer( net['conv3_1'], 256, 3, pad=1, flip_filters=False) net['conv3_3'] = ConvLayer( net['conv3_2'], 256, 3, pad=1, flip_filters=False) net['pool3'] = PoolLayer(net['conv3_3'], 2) net['conv4_1'] = ConvLayer( net['pool3'], 512, 3, pad=1, flip_filters=False) net['conv4_2'] = ConvLayer( net['conv4_1'], 512, 3, pad=1, flip_filters=False) net['conv4_3'] = ConvLayer( net['conv4_2'], 512, 3, pad=1, flip_filters=False) net['pool4'] = PoolLayer(net['conv4_3'], 2) net['conv5_1'] = ConvLayer( net['pool4'], 512, 3, pad=1, flip_filters=False) net['conv5_2'] = ConvLayer( net['conv5_1'], 512, 3, pad=1, flip_filters=False) net['conv5_3'] = ConvLayer( net['conv5_2'], 512, 3, pad=1, flip_filters=False) net['pool5'] = PoolLayer(net['conv5_3'], 2) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5) net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=4096) net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5) net['fc8'] = DenseLayer( net['fc7_dropout'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) return net
Example #30
Source File: googlenet.py From Recipes with MIT License | 4 votes |
def build_model(): net = {} net['input'] = InputLayer((None, 3, None, None)) net['conv1/7x7_s2'] = ConvLayer( net['input'], 64, 7, stride=2, pad=3, flip_filters=False) net['pool1/3x3_s2'] = PoolLayer( net['conv1/7x7_s2'], pool_size=3, stride=2, ignore_border=False) net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1) net['conv2/3x3_reduce'] = ConvLayer( net['pool1/norm1'], 64, 1, flip_filters=False) net['conv2/3x3'] = ConvLayer( net['conv2/3x3_reduce'], 192, 3, pad=1, flip_filters=False) net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1) net['pool2/3x3_s2'] = PoolLayer( net['conv2/norm2'], pool_size=3, stride=2, ignore_border=False) net.update(build_inception_module('inception_3a', net['pool2/3x3_s2'], [32, 64, 96, 128, 16, 32])) net.update(build_inception_module('inception_3b', net['inception_3a/output'], [64, 128, 128, 192, 32, 96])) net['pool3/3x3_s2'] = PoolLayer( net['inception_3b/output'], pool_size=3, stride=2, ignore_border=False) net.update(build_inception_module('inception_4a', net['pool3/3x3_s2'], [64, 192, 96, 208, 16, 48])) net.update(build_inception_module('inception_4b', net['inception_4a/output'], [64, 160, 112, 224, 24, 64])) net.update(build_inception_module('inception_4c', net['inception_4b/output'], [64, 128, 128, 256, 24, 64])) net.update(build_inception_module('inception_4d', net['inception_4c/output'], [64, 112, 144, 288, 32, 64])) net.update(build_inception_module('inception_4e', net['inception_4d/output'], [128, 256, 160, 320, 32, 128])) net['pool4/3x3_s2'] = PoolLayer( net['inception_4e/output'], pool_size=3, stride=2, ignore_border=False) net.update(build_inception_module('inception_5a', net['pool4/3x3_s2'], [128, 256, 160, 320, 32, 128])) net.update(build_inception_module('inception_5b', net['inception_5a/output'], [128, 384, 192, 384, 48, 128])) net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output']) net['loss3/classifier'] = DenseLayer(net['pool5/7x7_s1'], num_units=1000, nonlinearity=linear) net['prob'] = NonlinearityLayer(net['loss3/classifier'], nonlinearity=softmax) return net