Python lasagne.layers.Pool2DLayer() Examples
The following are 30
code examples of lasagne.layers.Pool2DLayer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
lasagne.layers
, or try the search function
.
Example #1
Source File: inception_v3.py From Recipes with MIT License | 6 votes |
def inceptionA(input_layer, nfilt): # Corresponds to a modified version of figure 5 in the paper l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1) l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1) l2 = bn_conv(l2, num_filters=nfilt[1][1], filter_size=5, pad=2) l3 = bn_conv(input_layer, num_filters=nfilt[2][0], filter_size=1) l3 = bn_conv(l3, num_filters=nfilt[2][1], filter_size=3, pad=1) l3 = bn_conv(l3, num_filters=nfilt[2][2], filter_size=3, pad=1) l4 = Pool2DLayer( input_layer, pool_size=3, stride=1, pad=1, mode='average_exc_pad') l4 = bn_conv(l4, num_filters=nfilt[3][0], filter_size=1) return ConcatLayer([l1, l2, l3, l4])
Example #2
Source File: inception_v3.py From Recipes with MIT License | 6 votes |
def inceptionE(input_layer, nfilt, pool_mode): # Corresponds to figure 7 in the paper l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1) l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1) l2a = bn_conv(l2, num_filters=nfilt[1][1], filter_size=(1, 3), pad=(0, 1)) l2b = bn_conv(l2, num_filters=nfilt[1][2], filter_size=(3, 1), pad=(1, 0)) l3 = bn_conv(input_layer, num_filters=nfilt[2][0], filter_size=1) l3 = bn_conv(l3, num_filters=nfilt[2][1], filter_size=3, pad=1) l3a = bn_conv(l3, num_filters=nfilt[2][2], filter_size=(1, 3), pad=(0, 1)) l3b = bn_conv(l3, num_filters=nfilt[2][3], filter_size=(3, 1), pad=(1, 0)) l4 = Pool2DLayer( input_layer, pool_size=3, stride=1, pad=1, mode=pool_mode) l4 = bn_conv(l4, num_filters=nfilt[3][0], filter_size=1) return ConcatLayer([l1, l2a, l2b, l3a, l3b, l4])
Example #3
Source File: padded.py From reseg with GNU General Public License v3.0 | 5 votes |
def __init__(self, incoming, pool_size, stride=None, pad=(0, 0), ignore_border=True, centered=True, **kwargs): """A padded pooling layer Parameters ---------- incoming : lasagne.layers.Layer The input layer pool_size : int The size of the pooling stride : int or iterable of int The stride or subsampling of the convolution pad : int, iterable of int, ``full``, ``same`` or ``valid`` **Ignored!** Kept for compatibility with the :class:``lasagne.layers.Pool2DLayer`` ignore_border : bool See :class:``lasagne.layers.Pool2DLayer`` centered : bool If True, the padding will be added on both sides. If False the zero padding will be applied on the upper left side. **kwargs Any additional keyword arguments are passed to the Layer superclass """ self.centered = centered if pad not in [0, (0, 0), [0, 0]]: warnings.warn('The specified padding will be ignored', RuntimeWarning) super(PaddedPool2DLayer, self).__init__(incoming, pool_size, stride, pad, ignore_border, **kwargs) if self.input_shape[2:] != (None, None): warnings.warn('This Layer should only be used when the size of ' 'the image is not known', RuntimeWarning)
Example #4
Source File: deep_conv_classification_alt57.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_network_from_ae(classn): input_var = T.tensor4('input_var'); target_var = T.imatrix('targets'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = (layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 120, filter_size=(4,4), stride=1, nonlinearity=leaky_rectify)); layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2); layer = (layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad'); network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid); return network, input_var, target_var;
Example #5
Source File: deep_conv_classification_alt51_luad10in20_brca10.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_network_from_ae(classn): input_var = T.tensor4('input_var'); target_var = T.imatrix('targets'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(4,4), stride=1, nonlinearity=leaky_rectify)); layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2); layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad'); network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid); return network, input_var, target_var;
Example #6
Source File: deep_conv_classification_alt56.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_network_from_ae(classn): input_var = T.tensor4('input_var'); target_var = T.imatrix('targets'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = (layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 120, filter_size=(4,4), stride=1, nonlinearity=leaky_rectify)); layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2); layer = (layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad'); network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid); return network, input_var, target_var;
Example #7
Source File: deep_conv_classification_alt51_luad10_luad10in20_brca10x1_heatmap.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_network_from_ae(classn): input_var = T.tensor4('input_var'); target_var = T.imatrix('targets'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(4,4), stride=1, nonlinearity=leaky_rectify)); layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2); layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad'); network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid); return network, input_var, target_var;
Example #8
Source File: deep_conv_classification_alt64.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_network_from_ae(classn): input_var = T.tensor4('input_var'); target_var = T.imatrix('targets'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(4,4), stride=1, nonlinearity=leaky_rectify)); layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2); layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad'); network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid); return network, input_var, target_var;
Example #9
Source File: deep_conv_classification_alt55.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_network_from_ae(classn): input_var = T.tensor4('input_var'); target_var = T.imatrix('targets'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = (layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 120, filter_size=(4,4), stride=1, nonlinearity=leaky_rectify)); layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2); layer = (layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad'); network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid); return network, input_var, target_var;
Example #10
Source File: deep_conv_classification_alt58.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_network_from_ae(classn): input_var = T.tensor4('input_var'); target_var = T.imatrix('targets'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = (layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 120, filter_size=(4,4), stride=1, nonlinearity=leaky_rectify)); layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2); layer = (layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad'); network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid); return network, input_var, target_var;
Example #11
Source File: enhance.py From neural-enhance with GNU Affero General Public License v3.0 | 5 votes |
def setup_perceptual(self, input): """Use lasagne to create a network of convolution layers using pre-trained VGG19 weights. """ offset = np.array([103.939, 116.779, 123.680], dtype=np.float32).reshape((1,3,1,1)) self.network['percept'] = lasagne.layers.NonlinearityLayer(input, lambda x: ((x+0.5)*255.0) - offset) self.network['mse'] = self.network['percept'] self.network['conv1_1'] = ConvLayer(self.network['percept'], 64, 3, pad=1) self.network['conv1_2'] = ConvLayer(self.network['conv1_1'], 64, 3, pad=1) self.network['pool1'] = PoolLayer(self.network['conv1_2'], 2, mode='max') self.network['conv2_1'] = ConvLayer(self.network['pool1'], 128, 3, pad=1) self.network['conv2_2'] = ConvLayer(self.network['conv2_1'], 128, 3, pad=1) self.network['pool2'] = PoolLayer(self.network['conv2_2'], 2, mode='max') self.network['conv3_1'] = ConvLayer(self.network['pool2'], 256, 3, pad=1) self.network['conv3_2'] = ConvLayer(self.network['conv3_1'], 256, 3, pad=1) self.network['conv3_3'] = ConvLayer(self.network['conv3_2'], 256, 3, pad=1) self.network['conv3_4'] = ConvLayer(self.network['conv3_3'], 256, 3, pad=1) self.network['pool3'] = PoolLayer(self.network['conv3_4'], 2, mode='max') self.network['conv4_1'] = ConvLayer(self.network['pool3'], 512, 3, pad=1) self.network['conv4_2'] = ConvLayer(self.network['conv4_1'], 512, 3, pad=1) self.network['conv4_3'] = ConvLayer(self.network['conv4_2'], 512, 3, pad=1) self.network['conv4_4'] = ConvLayer(self.network['conv4_3'], 512, 3, pad=1) self.network['pool4'] = PoolLayer(self.network['conv4_4'], 2, mode='max') self.network['conv5_1'] = ConvLayer(self.network['pool4'], 512, 3, pad=1) self.network['conv5_2'] = ConvLayer(self.network['conv5_1'], 512, 3, pad=1) self.network['conv5_3'] = ConvLayer(self.network['conv5_2'], 512, 3, pad=1) self.network['conv5_4'] = ConvLayer(self.network['conv5_3'], 512, 3, pad=1)
Example #12
Source File: deep_conv_classification_alt51.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_network_from_ae(classn): input_var = T.tensor4('input_var'); target_var = T.imatrix('targets'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(4,4), stride=1, nonlinearity=leaky_rectify)); layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2); layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad'); network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid); return network, input_var, target_var;
Example #13
Source File: benchmark_lasagne.py From vgg-benchmarks with MIT License | 5 votes |
def build_model(input_var): net = {} net['input'] = InputLayer((None, 3, 224, 224), input_var=input_var) net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1, flip_filters=False) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1, flip_filters=False) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1, flip_filters=False) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1, flip_filters=False) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1, flip_filters=False) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1, flip_filters=False) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1, flip_filters=False) net['pool3'] = PoolLayer(net['conv3_3'], 2) net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1, flip_filters=False) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1, flip_filters=False) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1, flip_filters=False) net['pool4'] = PoolLayer(net['conv4_3'], 2) net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1, flip_filters=False) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1, flip_filters=False) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1, flip_filters=False) net['pool5'] = PoolLayer(net['conv5_3'], 2) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5) net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=4096) net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5) net['fc8'] = DenseLayer(net['fc7_dropout'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) return net
Example #14
Source File: model.py From gogh-figure with GNU Affero General Public License v3.0 | 5 votes |
def setup_loss_net(self): """ Create a network of convolution layers based on the VGG16 architecture from the paper: "Very Deep Convolutional Networks for Large-Scale Image Recognition" Original source: https://gist.github.com/ksimonyan/211839e770f7b538e2d8 License: see http://www.robots.ox.ac.uk/~vgg/research/very_deep/ Based on code in the Lasagne Recipes repository: https://github.com/Lasagne/Recipes """ loss_net = self.network['loss_net'] loss_net['input'] = InputLayer(shape=self.shape) loss_net['conv1_1'] = ConvLayer(loss_net['input'], 64, 3, pad=1, flip_filters=False) loss_net['conv1_2'] = ConvLayer(loss_net['conv1_1'], 64, 3, pad=1, flip_filters=False) loss_net['pool1'] = PoolLayer(loss_net['conv1_2'], 2) loss_net['conv2_1'] = ConvLayer(loss_net['pool1'], 128, 3, pad=1, flip_filters=False) loss_net['conv2_2'] = ConvLayer(loss_net['conv2_1'], 128, 3, pad=1, flip_filters=False) loss_net['pool2'] = PoolLayer(loss_net['conv2_2'], 2) loss_net['conv3_1'] = ConvLayer(loss_net['pool2'], 256, 3, pad=1, flip_filters=False) loss_net['conv3_2'] = ConvLayer(loss_net['conv3_1'], 256, 3, pad=1, flip_filters=False) loss_net['conv3_3'] = ConvLayer(loss_net['conv3_2'], 256, 3, pad=1, flip_filters=False) loss_net['pool3'] = PoolLayer(loss_net['conv3_3'], 2) loss_net['conv4_1'] = ConvLayer(loss_net['pool3'], 512, 3, pad=1, flip_filters=False) loss_net['conv4_2'] = ConvLayer(loss_net['conv4_1'], 512, 3, pad=1, flip_filters=False) loss_net['conv4_3'] = ConvLayer(loss_net['conv4_2'], 512, 3, pad=1, flip_filters=False) loss_net['pool4'] = PoolLayer(loss_net['conv4_3'], 2) loss_net['conv5_1'] = ConvLayer(loss_net['pool4'], 512, 3, pad=1, flip_filters=False) loss_net['conv5_2'] = ConvLayer(loss_net['conv5_1'], 512, 3, pad=1, flip_filters=False) loss_net['conv5_3'] = ConvLayer(loss_net['conv5_2'], 512, 3, pad=1, flip_filters=False)
Example #15
Source File: art_it_up.py From ArtsyNetworks with MIT License | 5 votes |
def initialize_network(width): # initialize network - VGG19 style net = {} net['input'] = InputLayer((1, 3, width, width)) net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1) net['pool1'] = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad') net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1) net['pool2'] = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad') net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1) net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=1) net['pool3'] = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad') net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1) net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=1) net['pool4'] = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad') net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1) net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=1) net['pool5'] = PoolLayer(net['conv5_4'], 2, mode='average_exc_pad') return net
Example #16
Source File: inception_v3.py From Recipes with MIT License | 5 votes |
def inceptionD(input_layer, nfilt): # Corresponds to a modified version of figure 10 in the paper l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1) l1 = bn_conv(l1, num_filters=nfilt[0][1], filter_size=3, stride=2) l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1) l2 = bn_conv(l2, num_filters=nfilt[1][1], filter_size=(1, 7), pad=(0, 3)) l2 = bn_conv(l2, num_filters=nfilt[1][2], filter_size=(7, 1), pad=(3, 0)) l2 = bn_conv(l2, num_filters=nfilt[1][3], filter_size=3, stride=2) l3 = Pool2DLayer(input_layer, pool_size=3, stride=2) return ConcatLayer([l1, l2, l3])
Example #17
Source File: inception_v3.py From Recipes with MIT License | 5 votes |
def inceptionB(input_layer, nfilt): # Corresponds to a modified version of figure 10 in the paper l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=3, stride=2) l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1) l2 = bn_conv(l2, num_filters=nfilt[1][1], filter_size=3, pad=1) l2 = bn_conv(l2, num_filters=nfilt[1][2], filter_size=3, stride=2) l3 = Pool2DLayer(input_layer, pool_size=3, stride=2) return ConcatLayer([l1, l2, l3])
Example #18
Source File: deep_conv_classification_alt51_luad10_luad10in20_brca10x2.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_network_from_ae(classn): input_var = T.tensor4('input_var'); target_var = T.imatrix('targets'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(4,4), stride=1, nonlinearity=leaky_rectify)); layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2); layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad'); network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid); return network, input_var, target_var;
Example #19
Source File: deep_conv_classification_alt51_luad10_luad10in20_brca10x1.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_network_from_ae(classn): input_var = T.tensor4('input_var'); target_var = T.imatrix('targets'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(4,4), stride=1, nonlinearity=leaky_rectify)); layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2); layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad'); network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid); return network, input_var, target_var;
Example #20
Source File: deep_conv_ae_spsparse_alt30.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_autoencoder_network(): input_var = T.tensor4('input_var'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(3,3), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 640, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 640, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); mask_map = layer; layer = batch_norm(layers.Conv2DLayer(layer, 300, filter_size=(1,1), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 1000, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 300, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 480, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 480, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 480, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=2, crop=(0,0), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); network = ReshapeLayer(layer, ([0], -1)); mask_var = lasagne.layers.get_output(mask_map); output_var = lasagne.layers.get_output(network); return network, input_var, mask_var, output_var;
Example #21
Source File: deep_conv_ae_spsparse_alt38.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_autoencoder_network(): input_var = T.tensor4('input_var'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 640, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); prely = batch_norm(layers.Conv2DLayer(layer, 1024, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); featm = batch_norm(layers.Conv2DLayer(prely, 640, filter_size=(1,1), nonlinearity=leaky_rectify)); feat_map = batch_norm(layers.Conv2DLayer(featm, 100, filter_size=(1,1), nonlinearity=rectify, name="feat_map")); mask_map = SoftThresPerc(feat_map, perc=98.4, alpha=0.1, beta=init.Constant(0.5), tight=20.0, name="mask_map"); layer = mask_map; layer = batch_norm(layers.Deconv2DLayer(layer, 1024, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); glblf = batch_norm(layers.Conv2DLayer(prely, 128, filter_size=(1,1), nonlinearity=leaky_rectify)); glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad'); glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Conv2DLayer(glblf, 5, filter_size=(1,1), nonlinearity=rectify), name="global_feature"); glblf = batch_norm(layers.Deconv2DLayer(glblf, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); layer = layers.ElemwiseSumLayer([layer, glblf]); network = ReshapeLayer(layer, ([0], -1)); mask_var = lasagne.layers.get_output(mask_map); output_var = lasagne.layers.get_output(network); return network, input_var, mask_var, output_var;
Example #22
Source File: deep_conv_ae_spsparse_alt23.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_autoencoder_network(): input_var = T.tensor4('input_var'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 140, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 160, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 180, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 220, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 360, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); prely = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); featm = batch_norm(layers.Conv2DLayer(prely, 480, filter_size=(1,1), nonlinearity=leaky_rectify)); feat_map = batch_norm(layers.Conv2DLayer(featm, 320, filter_size=(1,1), nonlinearity=rectify, name="feat_map")); maskm = batch_norm(layers.Conv2DLayer(prely, 320, filter_size=(1,1), nonlinearity=leaky_rectify)); mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None); mask_map = SoftThresPerc(mask_rep, perc=99.0, alpha=0.1, beta=init.Constant(0.5), tight=100.0, name="mask_map"); layer = ChInnerProdMerge(feat_map, mask_map, name="encoder"); layer = batch_norm(layers.Deconv2DLayer(layer, 480, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 360, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 220, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 180, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 160, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 140, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); glblf = batch_norm(layers.Conv2DLayer(prely, 128, filter_size=(1,1), nonlinearity=leaky_rectify)); glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad'); glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Conv2DLayer(glblf, 5, filter_size=(1,1), nonlinearity=rectify), name="global_feature"); glblf = batch_norm(layers.Deconv2DLayer(glblf, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); layer = layers.ElemwiseSumLayer([layer, glblf]); network = ReshapeLayer(layer, ([0], -1)); mask_var = lasagne.layers.get_output(mask_map); output_var = lasagne.layers.get_output(network); return network, input_var, mask_var, output_var;
Example #23
Source File: deep_conv_ae_spsparse_alt22.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_autoencoder_network(): input_var = T.tensor4('input_var'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 140, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 160, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 360, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); prely = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); featm = batch_norm(layers.Conv2DLayer(prely, 480, filter_size=(1,1), nonlinearity=leaky_rectify)); feat_map = batch_norm(layers.Conv2DLayer(featm, 320, filter_size=(1,1), nonlinearity=rectify, name="feat_map")); maskm = batch_norm(layers.Conv2DLayer(prely, 320, filter_size=(1,1), nonlinearity=leaky_rectify)); mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None); mask_map = SoftThresPerc(mask_rep, perc=99.0, alpha=0.1, beta=init.Constant(0.5), tight=100.0, name="mask_map"); layer = ChInnerProdMerge(feat_map, mask_map, name="encoder"); layer = batch_norm(layers.Deconv2DLayer(layer, 480, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 360, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 160, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 140, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); glblf = batch_norm(layers.Conv2DLayer(prely, 128, filter_size=(1,1), nonlinearity=leaky_rectify)); glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad'); glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Conv2DLayer(glblf, 5, filter_size=(1,1), nonlinearity=rectify), name="global_feature"); glblf = batch_norm(layers.Deconv2DLayer(glblf, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); layer = layers.ElemwiseSumLayer([layer, glblf]); network = ReshapeLayer(layer, ([0], -1)); mask_var = lasagne.layers.get_output(mask_map); output_var = lasagne.layers.get_output(network); return network, input_var, mask_var, output_var;
Example #24
Source File: deep_conv_ae_spsparse_alt34.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_autoencoder_network(): input_var = T.tensor4('input_var'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); prely = batch_norm(layers.Conv2DLayer(layer, 140, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); featm = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify)); feat_map = batch_norm(layers.Conv2DLayer(featm, 100, filter_size=(1,1), nonlinearity=rectify, name="feat_map")); maskm = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify)); mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None); mask_map = SoftThresPerc(mask_rep, perc=99.9, alpha=0.5, beta=init.Constant(0.1), tight=100.0, name="mask_map"); layer = ChInnerProdMerge(feat_map, mask_map, name="encoder"); layer = batch_norm(layers.Deconv2DLayer(layer, 140, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); glblf = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify)); glblf = layers.Pool2DLayer(glblf, pool_size=(20,20), stride=20, mode='average_inc_pad'); glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Conv2DLayer(glblf, 3, filter_size=(1,1), nonlinearity=rectify), name="global_feature"); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = layers.Upscale2DLayer(glblf, scale_factor=20); glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); layer = layers.ElemwiseSumLayer([layer, glblf]); network = ReshapeLayer(layer, ([0], -1)); mask_var = lasagne.layers.get_output(mask_map); output_var = lasagne.layers.get_output(network); return network, input_var, mask_var, output_var;
Example #25
Source File: deep_conv_ae_spsparse_alt27.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_autoencoder_network(): input_var = T.tensor4('input_var'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); prely = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); featm = batch_norm(layers.Conv2DLayer(prely, 180, filter_size=(1,1), nonlinearity=leaky_rectify)); feat_map = batch_norm(layers.Conv2DLayer(featm, 120, filter_size=(1,1), nonlinearity=rectify, name="feat_map")); mask_map = feat_map; layer = feat_map; layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); glblf = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify)); glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad'); glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Conv2DLayer(glblf, 3, filter_size=(1,1), nonlinearity=rectify), name="global_feature"); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); layer = layers.ElemwiseSumLayer([layer, glblf]); network = ReshapeLayer(layer, ([0], -1)); mask_var = lasagne.layers.get_output(mask_map); output_var = lasagne.layers.get_output(network); return network, input_var, mask_var, output_var;
Example #26
Source File: deep_conv_ae_spsparse_alt35.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_autoencoder_network(): input_var = T.tensor4('input_var'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 140, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 160, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 180, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); prely = batch_norm(layers.Conv2DLayer(layer, 200, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); featm = batch_norm(layers.Conv2DLayer(prely, 160, filter_size=(1,1), nonlinearity=leaky_rectify)); feat_map = batch_norm(layers.Conv2DLayer(featm, 120, filter_size=(1,1), nonlinearity=rectify, name="feat_map")); maskm = batch_norm(layers.Conv2DLayer(prely, 120, filter_size=(1,1), nonlinearity=leaky_rectify)); mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None); mask_map = SoftThresPerc(mask_rep, perc=99.9, alpha=0.5, beta=init.Constant(0.1), tight=100.0, name="mask_map"); layer = ChInnerProdMerge(feat_map, mask_map, name="encoder"); layer = batch_norm(layers.Deconv2DLayer(layer, 200, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 180, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 160, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 140, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); glblf = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify)); glblf = layers.Pool2DLayer(glblf, pool_size=(20,20), stride=20, mode='average_inc_pad'); glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Conv2DLayer(glblf, 3, filter_size=(1,1), nonlinearity=rectify), name="global_feature"); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = layers.Upscale2DLayer(glblf, scale_factor=20); glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); layer = layers.ElemwiseSumLayer([layer, glblf]); network = ReshapeLayer(layer, ([0], -1)); mask_var = lasagne.layers.get_output(mask_map); output_var = lasagne.layers.get_output(network); return network, input_var, mask_var, output_var;
Example #27
Source File: deep_conv_ae_spsparse_alt28.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_autoencoder_network(): input_var = T.tensor4('input_var'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 640, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); prely = batch_norm(layers.Conv2DLayer(layer, 1024, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); featm = batch_norm(layers.Conv2DLayer(prely, 640, filter_size=(1,1), nonlinearity=leaky_rectify)); feat_map = batch_norm(layers.Conv2DLayer(featm, 100, filter_size=(1,1), nonlinearity=rectify, name="feat_map")); mask_map = feat_map; layer = feat_map; layer = batch_norm(layers.Deconv2DLayer(layer, 1024, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); glblf = batch_norm(layers.Conv2DLayer(prely, 128, filter_size=(1,1), nonlinearity=leaky_rectify)); glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad'); glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Conv2DLayer(glblf, 5, filter_size=(1,1), nonlinearity=rectify), name="global_feature"); glblf = batch_norm(layers.Deconv2DLayer(glblf, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); layer = layers.ElemwiseSumLayer([layer, glblf]); network = ReshapeLayer(layer, ([0], -1)); mask_var = lasagne.layers.get_output(mask_map); output_var = lasagne.layers.get_output(network); return network, input_var, mask_var, output_var;
Example #28
Source File: deep_conv_ae_spsparse_alt5.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_autoencoder_network(): input_var = T.tensor4('input_var'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 104, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 208, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 256, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 512, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); prely = batch_norm(layers.Conv2DLayer(layer, 720, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); featm = batch_norm(layers.Conv2DLayer(prely, 512, filter_size=(1,1), nonlinearity=leaky_rectify)); feat_map = batch_norm(layers.Conv2DLayer(featm, 80, filter_size=(1,1), nonlinearity=rectify, name="feat_map")); maskm = batch_norm(layers.Conv2DLayer(prely, 80, filter_size=(1,1), nonlinearity=leaky_rectify)); mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None); mask_map = BNRectifyPerc(mask_rep, perc=97.92, alpha=0.1, beta=init.Constant(0.5), name="mask_map"); layer = ChInnerProdMerge(feat_map, mask_map, name="encoder"); layer = batch_norm(layers.Deconv2DLayer(layer, 720, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 256, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 208, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 208, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 104, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); glblf = batch_norm(layers.Conv2DLayer(prely, 64, filter_size=(1,1), nonlinearity=leaky_rectify)); glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad'); glblf = batch_norm(layers.Conv2DLayer(glblf, 32, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Conv2DLayer(glblf, 3, filter_size=(1,1), nonlinearity=rectify), name="global_feature"); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 16, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 16, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 16, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); layer = layers.ElemwiseSumLayer([layer, glblf]); network = ReshapeLayer(layer, ([0], -1)); mask_var = lasagne.layers.get_output(mask_map); output_var = lasagne.layers.get_output(network); return network, input_var, mask_var, output_var;
Example #29
Source File: train_cae.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_autoencoder_network(): input_var = T.tensor4('input_var'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 640, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); prely = batch_norm(layers.Conv2DLayer(layer, 1024, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); featm = batch_norm(layers.Conv2DLayer(prely, 640, filter_size=(1,1), nonlinearity=leaky_rectify)); feat_map = batch_norm(layers.Conv2DLayer(featm, 100, filter_size=(1,1), nonlinearity=rectify, name="feat_map")); maskm = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify)); mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None); mask_map = SoftThresPerc(mask_rep, perc=98.4, alpha=0.1, beta=init.Constant(0.5), tight=100.0, name="mask_map"); layer = ChInnerProdMerge(feat_map, mask_map, name="encoder"); layer = batch_norm(layers.Deconv2DLayer(layer, 1024, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); glblf = batch_norm(layers.Conv2DLayer(prely, 128, filter_size=(1,1), nonlinearity=leaky_rectify)); glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad'); glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Conv2DLayer(glblf, 5, filter_size=(1,1), nonlinearity=rectify), name="global_feature"); glblf = batch_norm(layers.Deconv2DLayer(glblf, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); layer = layers.ElemwiseSumLayer([layer, glblf]); network = ReshapeLayer(layer, ([0], -1)); mask_var = lasagne.layers.get_output(mask_map); output_var = lasagne.layers.get_output(network); return network, input_var, mask_var, output_var;
Example #30
Source File: train_cae.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_autoencoder_network(): input_var = T.tensor4('input_var'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 640, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); prely = batch_norm(layers.Conv2DLayer(layer, 1024, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); featm = batch_norm(layers.Conv2DLayer(prely, 640, filter_size=(1,1), nonlinearity=leaky_rectify)); feat_map = batch_norm(layers.Conv2DLayer(featm, 100, filter_size=(1,1), nonlinearity=rectify, name="feat_map")); maskm = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify)); mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None); mask_map = SoftThresPerc(mask_rep, perc=98.4, alpha=0.1, beta=init.Constant(0.5), tight=100.0, name="mask_map"); layer = ChInnerProdMerge(feat_map, mask_map, name="encoder"); layer = batch_norm(layers.Deconv2DLayer(layer, 1024, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); glblf = batch_norm(layers.Conv2DLayer(prely, 128, filter_size=(1,1), nonlinearity=leaky_rectify)); glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad'); glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Conv2DLayer(glblf, 5, filter_size=(1,1), nonlinearity=rectify), name="global_feature"); glblf = batch_norm(layers.Deconv2DLayer(glblf, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); layer = layers.ElemwiseSumLayer([layer, glblf]); network = ReshapeLayer(layer, ([0], -1)); mask_var = lasagne.layers.get_output(mask_map); output_var = lasagne.layers.get_output(network); return network, input_var, mask_var, output_var;