Python lasagne.layers.ConcatLayer() Examples

The following are 16 code examples of lasagne.layers.ConcatLayer(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module lasagne.layers , or try the search function .
Example #1
Source File: inception_v3.py    From Recipes with MIT License 6 votes vote down vote up
def inceptionA(input_layer, nfilt):
    # Corresponds to a modified version of figure 5 in the paper
    l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1)

    l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
    l2 = bn_conv(l2, num_filters=nfilt[1][1], filter_size=5, pad=2)

    l3 = bn_conv(input_layer, num_filters=nfilt[2][0], filter_size=1)
    l3 = bn_conv(l3, num_filters=nfilt[2][1], filter_size=3, pad=1)
    l3 = bn_conv(l3, num_filters=nfilt[2][2], filter_size=3, pad=1)

    l4 = Pool2DLayer(
        input_layer, pool_size=3, stride=1, pad=1, mode='average_exc_pad')
    l4 = bn_conv(l4, num_filters=nfilt[3][0], filter_size=1)

    return ConcatLayer([l1, l2, l3, l4]) 
Example #2
Source File: inception_v3.py    From Recipes with MIT License 6 votes vote down vote up
def inceptionC(input_layer, nfilt):
    # Corresponds to figure 6 in the paper
    l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1)

    l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
    l2 = bn_conv(l2, num_filters=nfilt[1][1], filter_size=(1, 7), pad=(0, 3))
    l2 = bn_conv(l2, num_filters=nfilt[1][2], filter_size=(7, 1), pad=(3, 0))

    l3 = bn_conv(input_layer, num_filters=nfilt[2][0], filter_size=1)
    l3 = bn_conv(l3, num_filters=nfilt[2][1], filter_size=(7, 1), pad=(3, 0))
    l3 = bn_conv(l3, num_filters=nfilt[2][2], filter_size=(1, 7), pad=(0, 3))
    l3 = bn_conv(l3, num_filters=nfilt[2][3], filter_size=(7, 1), pad=(3, 0))
    l3 = bn_conv(l3, num_filters=nfilt[2][4], filter_size=(1, 7), pad=(0, 3))

    l4 = Pool2DLayer(
        input_layer, pool_size=3, stride=1, pad=1, mode='average_exc_pad')
    l4 = bn_conv(l4, num_filters=nfilt[3][0], filter_size=1)

    return ConcatLayer([l1, l2, l3, l4]) 
Example #3
Source File: enhance.py    From neural-enhance with GNU Affero General Public License v3.0 6 votes vote down vote up
def __init__(self):
        self.network = collections.OrderedDict()
        self.network['img'] = InputLayer((None, 3, None, None))
        self.network['seed'] = InputLayer((None, 3, None, None))

        config, params = self.load_model()
        self.setup_generator(self.last_layer(), config)

        if args.train:
            concatenated = lasagne.layers.ConcatLayer([self.network['img'], self.network['out']], axis=0)
            self.setup_perceptual(concatenated)
            self.load_perceptual()
            self.setup_discriminator()
        self.load_generator(params)
        self.compile()

    #------------------------------------------------------------------------------------------------------------------
    # Network Configuration
    #------------------------------------------------------------------------------------------------------------------ 
Example #4
Source File: enhance.py    From neural-enhance with GNU Affero General Public License v3.0 6 votes vote down vote up
def setup_discriminator(self):
        c = args.discriminator_size
        self.make_layer('disc1.1', batch_norm(self.network['conv1_2']), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc1.2', self.last_layer(), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc2', batch_norm(self.network['conv2_2']), 2*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc3', batch_norm(self.network['conv3_2']), 3*c, filter_size=(3,3), stride=(1,1), pad=(1,1))
        hypercolumn = ConcatLayer([self.network['disc1.2>'], self.network['disc2>'], self.network['disc3>']])
        self.make_layer('disc4', hypercolumn, 4*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
        self.make_layer('disc5', self.last_layer(), 3*c, filter_size=(3,3), stride=(2,2))
        self.make_layer('disc6', self.last_layer(), 2*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
        self.network['disc'] = batch_norm(ConvLayer(self.last_layer(), 1, filter_size=(1,1),
                                                    nonlinearity=lasagne.nonlinearities.linear))


    #------------------------------------------------------------------------------------------------------------------
    # Input / Output
    #------------------------------------------------------------------------------------------------------------------ 
Example #5
Source File: googlenet.py    From Recipes with MIT License 5 votes vote down vote up
def build_inception_module(name, input_layer, nfilters):
    # nfilters: (pool_proj, 1x1, 3x3_reduce, 3x3, 5x5_reduce, 5x5)
    net = {}
    net['pool'] = PoolLayerDNN(input_layer, pool_size=3, stride=1, pad=1)
    net['pool_proj'] = ConvLayer(
        net['pool'], nfilters[0], 1, flip_filters=False)

    net['1x1'] = ConvLayer(input_layer, nfilters[1], 1, flip_filters=False)

    net['3x3_reduce'] = ConvLayer(
        input_layer, nfilters[2], 1, flip_filters=False)
    net['3x3'] = ConvLayer(
        net['3x3_reduce'], nfilters[3], 3, pad=1, flip_filters=False)

    net['5x5_reduce'] = ConvLayer(
        input_layer, nfilters[4], 1, flip_filters=False)
    net['5x5'] = ConvLayer(
        net['5x5_reduce'], nfilters[5], 5, pad=2, flip_filters=False)

    net['output'] = ConcatLayer([
        net['1x1'],
        net['3x3'],
        net['5x5'],
        net['pool_proj'],
        ])

    return {'{}/{}'.format(name, k): v for k, v in net.items()} 
Example #6
Source File: inception_v3.py    From Recipes with MIT License 5 votes vote down vote up
def inceptionB(input_layer, nfilt):
    # Corresponds to a modified version of figure 10 in the paper
    l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=3, stride=2)

    l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
    l2 = bn_conv(l2, num_filters=nfilt[1][1], filter_size=3, pad=1)
    l2 = bn_conv(l2, num_filters=nfilt[1][2], filter_size=3, stride=2)

    l3 = Pool2DLayer(input_layer, pool_size=3, stride=2)

    return ConcatLayer([l1, l2, l3]) 
Example #7
Source File: inception_v3.py    From Recipes with MIT License 5 votes vote down vote up
def inceptionD(input_layer, nfilt):
    # Corresponds to a modified version of figure 10 in the paper
    l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1)
    l1 = bn_conv(l1, num_filters=nfilt[0][1], filter_size=3, stride=2)

    l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
    l2 = bn_conv(l2, num_filters=nfilt[1][1], filter_size=(1, 7), pad=(0, 3))
    l2 = bn_conv(l2, num_filters=nfilt[1][2], filter_size=(7, 1), pad=(3, 0))
    l2 = bn_conv(l2, num_filters=nfilt[1][3], filter_size=3, stride=2)

    l3 = Pool2DLayer(input_layer, pool_size=3, stride=2)

    return ConcatLayer([l1, l2, l3]) 
Example #8
Source File: Deopen_classification.py    From Deopen with MIT License 5 votes vote down vote up
def create_network():
    l = 1000
    pool_size = 5
    test_size1 = 13
    test_size2 = 7
    test_size3 = 5
    kernel1 = 128
    kernel2 = 128
    kernel3 = 128
    layer1 = InputLayer(shape=(None, 1, 4, l+1024))
    layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1)
    layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1)
    layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2)
    layer2_f = FlattenLayer(layer2_3)
    layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1))
    layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1))
    layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1))
    layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size))
    layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2))
    layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2))
    layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2))
    layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size))
    layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3))
    layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3))
    layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3))
    layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size))
    layer14_d = DenseLayer(layer14, num_units= 256)
    layer3_2 = DenseLayer(layer2_f, num_units = 128)
    layer15 = ConcatLayer([layer14_d,layer3_2])
    layer16 = DropoutLayer(layer15,p=0.5)
    layer17 = DenseLayer(layer16, num_units=256)
    network = DenseLayer(layer17, num_units= 2, nonlinearity=softmax)
    return network


#random search to initialize the weights 
Example #9
Source File: Deopen_regression.py    From Deopen with MIT License 5 votes vote down vote up
def create_network():
    l = 1000
    pool_size = 5
    test_size1 = 13
    test_size2 = 7
    test_size3 = 5
    kernel1 = 128
    kernel2 = 128
    kernel3 = 128
    layer1 = InputLayer(shape=(None, 1, 4, l+1024))
    layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1)
    layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1)
    layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2)
    layer2_f = FlattenLayer(layer2_3)
    layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1))
    layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1))
    layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1))
    layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size))
    layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2))
    layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2))
    layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2))
    layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size))
    layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3))
    layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3))
    layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3))
    layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size))
    layer14_d = DenseLayer(layer14, num_units= 256)
    layer3_2 = DenseLayer(layer2_f, num_units = 128)
    layer15 = ConcatLayer([layer14_d,layer3_2])
    #layer16 = DropoutLayer(layer15,p=0.5)
    layer17 = DenseLayer(layer15, num_units=256)
    network = DenseLayer(layer17, num_units= 1, nonlinearity=None)
    return network


#random search to initialize the weights 
Example #10
Source File: layers_theano.py    From visual_dynamics with MIT License 5 votes vote down vote up
def __init__(self, incoming, channel_layer_class, name=None, **channel_layer_kwargs):
        super(ChannelwiseLayer, self).__init__(incoming, name=name)
        self.channel_layer_class = channel_layer_class
        self.channel_incomings = []
        self.channel_outcomings = []
        for channel in range(lasagne.layers.get_output_shape(incoming)[0]):
            channel_incoming = L.SliceLayer(incoming, indices=slice(channel, channel+1), axis=1,
                                            name='%s.%s%d' % (name, 'slice', channel) if name is not None else None)
            channel_outcoming = channel_layer_class(channel_incoming,
                                                    name='%s.%s%d' % (name, 'op', channel) if name is not None else None,
                                                    **channel_layer_kwargs)
            self.channel_incomings.append(channel_incoming)
            self.channel_outcomings.append(channel_outcoming)
        self.outcoming = L.ConcatLayer(self.channel_outcomings, axis=1,
                                       name='%s.%s' % (name, 'concat') if name is not None else None) 
Example #11
Source File: eeg_cnn_lib.py    From EEGLearn with GNU General Public License v2.0 5 votes vote down vote up
def build_convpool_conv1d(input_vars, nb_classes, imsize=32, n_colors=3, n_timewin=7):
    """
    Builds the complete network with 1D-conv layer to integrate time from sequences of EEG images.

    :param input_vars: list of EEG images (one image per time window)
    :param nb_classes: number of classes
    :param imsize: size of the input image (assumes a square input)
    :param n_colors: number of color channels in the image
    :param n_timewin: number of time windows in the snippet
    :return: a pointer to the output of last layer
    """
    convnets = []
    w_init = None
    # Build 7 parallel CNNs with shared weights
    for i in range(n_timewin):
        if i == 0:
            convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
        else:
            convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
        convnets.append(FlattenLayer(convnet))
    # at this point convnets shape is [numTimeWin][n_samples, features]
    # we want the shape to be [n_samples, features, numTimeWin]
    convpool = ConcatLayer(convnets)
    convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1]))
    convpool = DimshuffleLayer(convpool, (0, 2, 1))
    # input to 1D convlayer should be in (batch_size, num_input_channels, input_length)
    convpool = Conv1DLayer(convpool, 64, 3)
    # A fully-connected layer of 512 units with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
            num_units=512, nonlinearity=lasagne.nonlinearities.rectify)
    # And, finally, the output layer with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
            num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
    return convpool 
Example #12
Source File: eeg_cnn_lib.py    From EEGLearn with GNU General Public License v2.0 5 votes vote down vote up
def build_convpool_lstm(input_vars, nb_classes, grad_clip=110, imsize=32, n_colors=3, n_timewin=7):
    """
    Builds the complete network with LSTM layer to integrate time from sequences of EEG images.

    :param input_vars: list of EEG images (one image per time window)
    :param nb_classes: number of classes
    :param grad_clip:  the gradient messages are clipped to the given value during
                        the backward pass.
    :param imsize: size of the input image (assumes a square input)
    :param n_colors: number of color channels in the image
    :param n_timewin: number of time windows in the snippet
    :return: a pointer to the output of last layer
    """
    convnets = []
    w_init = None
    # Build 7 parallel CNNs with shared weights
    for i in range(n_timewin):
        if i == 0:
            convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
        else:
            convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
        convnets.append(FlattenLayer(convnet))
    # at this point convnets shape is [numTimeWin][n_samples, features]
    # we want the shape to be [n_samples, features, numTimeWin]
    convpool = ConcatLayer(convnets)
    convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1]))
    # Input to LSTM should have the shape as (batch size, SEQ_LENGTH, num_features)
    convpool = LSTMLayer(convpool, num_units=128, grad_clipping=grad_clip,
        nonlinearity=lasagne.nonlinearities.tanh)
    # We only need the final prediction, we isolate that quantity and feed it
    # to the next layer.
    convpool = SliceLayer(convpool, -1, 1)      # Selecting the last prediction
    # A fully-connected layer of 256 units with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
            num_units=256, nonlinearity=lasagne.nonlinearities.rectify)
    # And, finally, the output layer with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
            num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
    return convpool 
Example #13
Source File: layers.py    From Neural-Photo-Editor with MIT License 5 votes vote down vote up
def InceptionUpscaleLayer(incoming,param_dict,block_name):
    branch = [0]*len(param_dict)
    # Loop across branches
    for i,dict in enumerate(param_dict):
        for j,style in enumerate(dict['style']): # Loop up branch
            branch[i] = TC2D(
                incoming = branch[i] if j else incoming,
                num_filters = dict['num_filters'][j],
                filter_size = dict['filter_size'][j],
                crop = dict['pad'][j] if 'pad' in dict else None,
                stride = dict['stride'][j],
                W = initmethod('relu'),
                nonlinearity = dict['nonlinearity'][j],
                name = block_name+'_'+str(i)+'_'+str(j)) if style=='convolutional'\
            else NL(
                    incoming = lasagne.layers.dnn.Pool2DDNNLayer(
                        incoming = lasagne.layers.Upscale2DLayer(
                            incoming=incoming if j == 0 else branch[i],
                            scale_factor = dict['stride'][j]),
                        pool_size = dict['filter_size'][j],
                        stride = [1,1],
                        mode = dict['mode'][j],
                        pad = dict['pad'][j],
                        name = block_name+'_'+str(i)+'_'+str(j)),
                    nonlinearity = dict['nonlinearity'][j])
                # Apply Batchnorm    
            branch[i] = BN(branch[i],name = block_name+'_bnorm_'+str(i)+'_'+str(j)) if dict['bnorm'][j] else branch[i]
        # Concatenate Sublayers        
            
    return CL(incomings=branch,name=block_name)

# Convenience function to efficiently generate param dictionaries for use with InceptioNlayer 
Example #14
Source File: eeg_cnn_lib.py    From EEGLearn with GNU General Public License v2.0 4 votes vote down vote up
def build_convpool_mix(input_vars, nb_classes, grad_clip=110, imsize=32, n_colors=3, n_timewin=7):
    """
    Builds the complete network with LSTM and 1D-conv layers combined

    :param input_vars: list of EEG images (one image per time window)
    :param nb_classes: number of classes
    :param grad_clip:  the gradient messages are clipped to the given value during
                        the backward pass.
    :param imsize: size of the input image (assumes a square input)
    :param n_colors: number of color channels in the image
    :param n_timewin: number of time windows in the snippet
    :return: a pointer to the output of last layer
    """
    convnets = []
    w_init = None
    # Build 7 parallel CNNs with shared weights
    for i in range(n_timewin):
        if i == 0:
            convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
        else:
            convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
        convnets.append(FlattenLayer(convnet))
    # at this point convnets shape is [numTimeWin][n_samples, features]
    # we want the shape to be [n_samples, features, numTimeWin]
    convpool = ConcatLayer(convnets)
    convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1]))
    reformConvpool = DimshuffleLayer(convpool, (0, 2, 1))
    # input to 1D convlayer should be in (batch_size, num_input_channels, input_length)
    conv_out = Conv1DLayer(reformConvpool, 64, 3)
    conv_out = FlattenLayer(conv_out)
    # Input to LSTM should have the shape as (batch size, SEQ_LENGTH, num_features)
    lstm = LSTMLayer(convpool, num_units=128, grad_clipping=grad_clip,
        nonlinearity=lasagne.nonlinearities.tanh)
    lstm_out = SliceLayer(lstm, -1, 1)
    # Merge 1D-Conv and LSTM outputs
    dense_input = ConcatLayer([conv_out, lstm_out])
    # A fully-connected layer of 256 units with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(dense_input, p=.5),
            num_units=512, nonlinearity=lasagne.nonlinearities.rectify)
    # And, finally, the 10-unit output layer with 50% dropout on its inputs:
    convpool = DenseLayer(convpool,
            num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
    return convpool 
Example #15
Source File: doodle.py    From neural-doodle with GNU Affero General Public License v3.0 4 votes vote down vote up
def setup_model(self, input=None):
        """Use lasagne to create a network of convolution layers, first using VGG19 as the framework
        and then adding augmentations for Semantic Style Transfer.
        """
        net, self.channels = {}, {}

        # Primary network for the main image. These are convolution only, and stop at layer 4_2 (rest unused).
        net['img']     = input or InputLayer((None, 3, None, None))
        net['conv1_1'] = ConvLayer(net['img'],     64, 3, pad=1)
        net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1)
        net['pool1']   = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad')
        net['conv2_1'] = ConvLayer(net['pool1'],   128, 3, pad=1)
        net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1)
        net['pool2']   = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad')
        net['conv3_1'] = ConvLayer(net['pool2'],   256, 3, pad=1)
        net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1)
        net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1)
        net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=1)
        net['pool3']   = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad')
        net['conv4_1'] = ConvLayer(net['pool3'],   512, 3, pad=1)
        net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1)
        net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1)
        net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=1)
        net['pool4']   = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad')
        net['conv5_1'] = ConvLayer(net['pool4'],   512, 3, pad=1)
        net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1)
        net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1)
        net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=1)
        net['main']    = net['conv5_4']

        # Auxiliary network for the semantic layers, and the nearest neighbors calculations.
        net['map'] = InputLayer((1, 1, None, None))
        for j, i in itertools.product(range(5), range(4)):
            if j < 2 and i > 1: continue
            suffix = '%i_%i' % (j+1, i+1)

            if i == 0:
                net['map%i'%(j+1)] = PoolLayer(net['map'], 2**j, mode='average_exc_pad')
            self.channels[suffix] = net['conv'+suffix].num_filters
            
            if args.semantic_weight > 0.0:
                net['sem'+suffix] = ConcatLayer([net['conv'+suffix], net['map%i'%(j+1)]])
            else:
                net['sem'+suffix] = net['conv'+suffix]

            net['dup'+suffix] = InputLayer(net['sem'+suffix].output_shape)
            net['nn'+suffix] = ConvLayer(net['dup'+suffix], 1, 3, b=None, pad=0, flip_filters=False)

        self.network = net 
Example #16
Source File: layers.py    From Neural-Photo-Editor with MIT License 4 votes vote down vote up
def InceptionLayer(incoming,param_dict,block_name):
    branch = [0]*len(param_dict)
    # Loop across branches
    for i,dict in enumerate(param_dict):
        for j,style in enumerate(dict['style']): # Loop up branch
            branch[i] = C2D(
                incoming = branch[i] if j else incoming,
                num_filters = dict['num_filters'][j],
                filter_size = dict['filter_size'][j],
                pad =  dict['pad'][j] if 'pad' in dict else None,
                stride = dict['stride'][j],
                W = initmethod('relu'),
                nonlinearity = dict['nonlinearity'][j],
                name = block_name+'_'+str(i)+'_'+str(j)) if style=='convolutional'\
            else NL(lasagne.layers.dnn.Pool2DDNNLayer(
                incoming=incoming if j == 0 else branch[i],
                pool_size = dict['filter_size'][j],
                mode = dict['mode'][j],
                stride = dict['stride'][j],
                pad = dict['pad'][j],
                name = block_name+'_'+str(i)+'_'+str(j)),
                nonlinearity = dict['nonlinearity'][j]) if style=='pool'\
            else lasagne.layers.DilatedConv2DLayer(
                incoming = lasagne.layers.PadLayer(incoming = incoming if j==0 else branch[i],width = dict['pad'][j]) if 'pad' in dict else incoming if j==0 else branch[i],
                num_filters = dict['num_filters'][j],
                filter_size = dict['filter_size'][j],
                dilation = dict['dilation'][j],
                # pad = dict['pad'][j] if 'pad' in dict else None,
                W = initmethod('relu'),
                nonlinearity = dict['nonlinearity'][j],
                name = block_name+'_'+str(i)+'_'+str(j))  if style== 'dilation'\
            else DL(
                    incoming = incoming if j==0 else branch[i],
                    num_units = dict['num_filters'][j],
                    W = initmethod('relu'),
                    b = None,
                    nonlinearity = dict['nonlinearity'][j],
                    name = block_name+'_'+str(i)+'_'+str(j))   
                # Apply Batchnorm    
            branch[i] = BN(branch[i],name = block_name+'_bnorm_'+str(i)+'_'+str(j)) if dict['bnorm'][j] else branch[i]
        # Concatenate Sublayers        
            
    return CL(incomings=branch,name=block_name)

# Convenience function to define an inception-style block with upscaling