Python lasagne.nonlinearities.identity() Examples
The following are 30
code examples of lasagne.nonlinearities.identity().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
lasagne.nonlinearities
, or try the search function
.
Example #1
Source File: graph.py From LasagneNLP with Apache License 2.0 | 6 votes |
def __init__(self, incoming_vertex, incoming_edge, num_filters, filter_size, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs): self.vertex_shape = incoming_vertex.output_shape self.edge_shape = incoming_edge.output_shape self.input_shape = incoming_vertex.output_shape incomings = [incoming_vertex, incoming_edge] self.vertex_incoming_index = 0 self.edge_incoming_index = 1 super(GraphConvLayer, self).__init__(incomings, **kwargs) if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity self.num_filters = num_filters self.filter_size = filter_size self.W = self.add_param(W, self.get_W_shape(), name="W") if b is None: self.b = None else: self.b = self.add_param(b, (num_filters,), name="b", regularizable=False)
Example #2
Source File: highway.py From LasagneNLP with Apache License 2.0 | 6 votes |
def __init__(self, incoming, W_h=init.GlorotUniform(), b_h=init.Constant(0.), W_t=init.GlorotUniform(), b_t=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs): super(HighwayDenseLayer, self).__init__(incoming, **kwargs) self.nonlinearity = (nonlinearities.identity if nonlinearity is None else nonlinearity) num_inputs = int(np.prod(self.input_shape[1:])) self.W_h = self.add_param(W_h, (num_inputs, num_inputs), name="W_h") if b_h is None: self.b_h = None else: self.b_h = self.add_param(b_h, (num_inputs,), name="b_h", regularizable=False) self.W_t = self.add_param(W_t, (num_inputs, num_inputs), name="W_t") if b_t is None: self.b_t = None else: self.b_t = self.add_param(b_t, (num_inputs,), name="b_t", regularizable=False)
Example #3
Source File: lasagne_net.py From BirdCLEF-Baseline with MIT License | 6 votes |
def initialization(name): initializations = {'sigmoid':init.HeNormal(gain=1.0), 'softmax':init.HeNormal(gain=1.0), 'elu':init.HeNormal(gain=1.0), 'relu':init.HeNormal(gain=math.sqrt(2)), 'lrelu':init.HeNormal(gain=math.sqrt(2/(1+0.01**2))), 'vlrelu':init.HeNormal(gain=math.sqrt(2/(1+0.33**2))), 'rectify':init.HeNormal(gain=math.sqrt(2)), 'identity':init.HeNormal(gain=math.sqrt(2)) } return initializations[name] #################### BASELINE MODEL #####################
Example #4
Source File: rotconv.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, incoming, num_filters, num_rot, filter_size, stride=(1, 1), border_mode="valid", untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, convolution=T.nnet.conv2d, **kwargs): super(RotConv, self).__init__(incoming, **kwargs) if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity self.num_filters = num_filters self.num_rot = num_rot; self.filter_size = as_tuple(filter_size, 2) self.stride = as_tuple(stride, 2) self.border_mode = border_mode self.untie_biases = untie_biases self.convolution = convolution if self.border_mode not in ['valid', 'full', 'same']: raise RuntimeError("Invalid border mode: '%s'" % self.border_mode) self.W = self.add_param(W, self.get_W_shape(), name="W") if b is None: self.b = None else: if self.untie_biases: biases_shape = (num_filters, self.output_shape[2], self. output_shape[3]) else: biases_shape = (num_filters,) self.b = self.add_param(b, biases_shape, name="b", regularizable=False)
Example #5
Source File: deep_conv_ae_spsparse_alt29.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_autoencoder_network(): input_var = T.tensor4('input_var'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); mask_map = layer; layer = batch_norm(layers.Conv2DLayer(layer, 10, filter_size=(1,1), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 1000, filter_size=(76,76), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 10, filter_size=(76,76), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, nonlinearity=identity); network = ReshapeLayer(layer, ([0], -1)); mask_var = lasagne.layers.get_output(mask_map); output_var = lasagne.layers.get_output(network); return network, input_var, mask_var, output_var;
Example #6
Source File: batch_norms.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def batch_nmsp(layer, beta=init.Constant(-3.0), **kwargs): nonlinearity = getattr(layer, 'nonlinearity', None) if nonlinearity is not None: layer.nonlinearity = nonlinearities.identity if hasattr(layer, 'b') and layer.b is not None: del layer.params[layer.b] layer.b = None layer = BatchNormSparseLayer(layer, beta=beta, **kwargs) if nonlinearity is not None: from lasagne.layers import NonlinearityLayer layer = NonlinearityLayer(layer, nonlinearity) return layer
Example #7
Source File: batch_norms.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def batch_nmsp(layer, beta=init.Constant(-3.0), **kwargs): nonlinearity = getattr(layer, 'nonlinearity', None) if nonlinearity is not None: layer.nonlinearity = nonlinearities.identity if hasattr(layer, 'b') and layer.b is not None: del layer.params[layer.b] layer.b = None layer = BatchNormSparseLayer(layer, beta=beta, **kwargs) if nonlinearity is not None: from lasagne.layers import NonlinearityLayer layer = NonlinearityLayer(layer, nonlinearity) return layer
Example #8
Source File: rotconv.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, incoming, num_filters, num_rot, filter_size, stride=(1, 1), border_mode="valid", untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, convolution=T.nnet.conv2d, **kwargs): super(RotConv, self).__init__(incoming, **kwargs) if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity self.num_filters = num_filters self.num_rot = num_rot; self.filter_size = as_tuple(filter_size, 2) self.stride = as_tuple(stride, 2) self.border_mode = border_mode self.untie_biases = untie_biases self.convolution = convolution if self.border_mode not in ['valid', 'full', 'same']: raise RuntimeError("Invalid border mode: '%s'" % self.border_mode) self.W = self.add_param(W, self.get_W_shape(), name="W") if b is None: self.b = None else: if self.untie_biases: biases_shape = (num_filters, self.output_shape[2], self. output_shape[3]) else: biases_shape = (num_filters,) self.b = self.add_param(b, biases_shape, name="b", regularizable=False)
Example #9
Source File: rotconv.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, incoming, num_filters, num_rot, filter_size, stride=(1, 1), border_mode="valid", untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, convolution=T.nnet.conv2d, **kwargs): super(RotConv, self).__init__(incoming, **kwargs) if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity self.num_filters = num_filters self.num_rot = num_rot; self.filter_size = as_tuple(filter_size, 2) self.stride = as_tuple(stride, 2) self.border_mode = border_mode self.untie_biases = untie_biases self.convolution = convolution if self.border_mode not in ['valid', 'full', 'same']: raise RuntimeError("Invalid border mode: '%s'" % self.border_mode) self.W = self.add_param(W, self.get_W_shape(), name="W") if b is None: self.b = None else: if self.untie_biases: biases_shape = (num_filters, self.output_shape[2], self. output_shape[3]) else: biases_shape = (num_filters,) self.b = self.add_param(b, biases_shape, name="b", regularizable=False)
Example #10
Source File: rotconv.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, incoming, num_filters, num_rot, filter_size, stride=(1, 1), border_mode="valid", untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, convolution=T.nnet.conv2d, **kwargs): super(RotConv, self).__init__(incoming, **kwargs) if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity self.num_filters = num_filters self.num_rot = num_rot; self.filter_size = as_tuple(filter_size, 2) self.stride = as_tuple(stride, 2) self.border_mode = border_mode self.untie_biases = untie_biases self.convolution = convolution if self.border_mode not in ['valid', 'full', 'same']: raise RuntimeError("Invalid border mode: '%s'" % self.border_mode) self.W = self.add_param(W, self.get_W_shape(), name="W") if b is None: self.b = None else: if self.untie_biases: biases_shape = (num_filters, self.output_shape[2], self. output_shape[3]) else: biases_shape = (num_filters,) self.b = self.add_param(b, biases_shape, name="b", regularizable=False)
Example #11
Source File: rotconv.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, incoming, num_filters, num_rot, filter_size, stride=(1, 1), border_mode="valid", untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, convolution=T.nnet.conv2d, **kwargs): super(RotConv, self).__init__(incoming, **kwargs) if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity self.num_filters = num_filters self.num_rot = num_rot; self.filter_size = as_tuple(filter_size, 2) self.stride = as_tuple(stride, 2) self.border_mode = border_mode self.untie_biases = untie_biases self.convolution = convolution if self.border_mode not in ['valid', 'full', 'same']: raise RuntimeError("Invalid border mode: '%s'" % self.border_mode) self.W = self.add_param(W, self.get_W_shape(), name="W") if b is None: self.b = None else: if self.untie_biases: biases_shape = (num_filters, self.output_shape[2], self. output_shape[3]) else: biases_shape = (num_filters,) self.b = self.add_param(b, biases_shape, name="b", regularizable=False)
Example #12
Source File: rotconv.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, incoming, num_filters, num_rot, filter_size, stride=(1, 1), border_mode="valid", untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, convolution=T.nnet.conv2d, **kwargs): super(RotConv, self).__init__(incoming, **kwargs) if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity self.num_filters = num_filters self.num_rot = num_rot; self.filter_size = as_tuple(filter_size, 2) self.stride = as_tuple(stride, 2) self.border_mode = border_mode self.untie_biases = untie_biases self.convolution = convolution if self.border_mode not in ['valid', 'full', 'same']: raise RuntimeError("Invalid border mode: '%s'" % self.border_mode) self.W = self.add_param(W, self.get_W_shape(), name="W") if b is None: self.b = None else: if self.untie_biases: biases_shape = (num_filters, self.output_shape[2], self. output_shape[3]) else: biases_shape = (num_filters,) self.b = self.add_param(b, biases_shape, name="b", regularizable=False)
Example #13
Source File: layers_theano.py From visual_dynamics with MIT License | 5 votes |
def __init__(self, W_in=init.GlorotUniform(), W_hid=init.GlorotUniform(), W_cell=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.sigmoid): self.W_in = W_in self.W_hid = W_hid # Don't store a cell weight vector when cell is None if W_cell is not None: self.W_cell = W_cell self.b = b # For the nonlinearity, if None is supplied, use identity if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity
Example #14
Source File: batch_norms.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def batch_nmsp(layer, beta=init.Constant(-3.0), **kwargs): nonlinearity = getattr(layer, 'nonlinearity', None) if nonlinearity is not None: layer.nonlinearity = nonlinearities.identity if hasattr(layer, 'b') and layer.b is not None: del layer.params[layer.b] layer.b = None layer = BatchNormSparseLayer(layer, beta=beta, **kwargs) if nonlinearity is not None: from lasagne.layers import NonlinearityLayer layer = NonlinearityLayer(layer, nonlinearity) return layer
Example #15
Source File: batch_norms.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def batch_nmsp(layer, beta=init.Constant(-3.0), **kwargs): nonlinearity = getattr(layer, 'nonlinearity', None) if nonlinearity is not None: layer.nonlinearity = nonlinearities.identity if hasattr(layer, 'b') and layer.b is not None: del layer.params[layer.b] layer.b = None layer = BatchNormSparseLayer(layer, beta=beta, **kwargs) if nonlinearity is not None: from lasagne.layers import NonlinearityLayer layer = NonlinearityLayer(layer, nonlinearity) return layer
Example #16
Source File: batch_norms.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def batch_nmsp(layer, beta=init.Constant(-3.0), **kwargs): nonlinearity = getattr(layer, 'nonlinearity', None) if nonlinearity is not None: layer.nonlinearity = nonlinearities.identity if hasattr(layer, 'b') and layer.b is not None: del layer.params[layer.b] layer.b = None layer = BatchNormSparseLayer(layer, beta=beta, **kwargs) if nonlinearity is not None: from lasagne.layers import NonlinearityLayer layer = NonlinearityLayer(layer, nonlinearity) return layer
Example #17
Source File: batch_norms.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def batch_nmsp(layer, beta=init.Constant(-3.0), **kwargs): nonlinearity = getattr(layer, 'nonlinearity', None) if nonlinearity is not None: layer.nonlinearity = nonlinearities.identity if hasattr(layer, 'b') and layer.b is not None: del layer.params[layer.b] layer.b = None layer = BatchNormSparseLayer(layer, beta=beta, **kwargs) if nonlinearity is not None: from lasagne.layers import NonlinearityLayer layer = NonlinearityLayer(layer, nonlinearity) return layer
Example #18
Source File: nn_heart.py From kaggle-heart with MIT License | 5 votes |
def __init__(self, incoming, nonlinearity=nonlinearities.rectify, **kwargs): super(NonlinearityLayer, self).__init__(incoming, **kwargs) self.nonlinearity = (nonlinearities.identity if nonlinearity is None else nonlinearity)
Example #19
Source File: test_magic.py From gelato with MIT License | 5 votes |
def test_workflow(self): inp = InputLayer(self.x.shape) out = DenseLayer(inp, 1, W=NormalSpec(sd=LognormalSpec()), nonlinearity=to.identity) out = DenseLayer(out, 1, W=NormalSpec(sd=LognormalSpec()), nonlinearity=to.identity) assert out.root is inp with out: pm.Normal('y', mu=get_output(out), sd=self.sd, observed=self.y)
Example #20
Source File: layers_theano.py From visual_dynamics with MIT License | 5 votes |
def __init__(self, incoming, num_filters, filter_size, stride=(2, 2), pad=0, untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nl.rectify, flip_filters=False, **kwargs): super(Deconv2DLayer, self).__init__(incoming, **kwargs) if nonlinearity is None: self.nonlinearity = nl.identity else: self.nonlinearity = nonlinearity self.num_filters = num_filters self.filter_size = as_tuple(filter_size, 2) self.stride = as_tuple(stride, 2) self.untie_biases = untie_biases self.flip_filters = flip_filters if pad == 'valid': self.pad = (0, 0) elif pad == 'full': self.pad = 'full' elif pad == 'same': if any(s % 2 == 0 for s in self.filter_size): raise NotImplementedError( '`same` padding requires odd filter size.') self.pad = (self.filter_size[0] // 2, self.filter_size[1] // 2) else: self.pad = as_tuple(pad, 2, int) self.W = self.add_param(W, self.get_W_shape(), name="W") if b is None: self.b = None else: if self.untie_biases: biases_shape = (num_filters, self.output_shape[2], self.output_shape[3]) else: biases_shape = (num_filters,) self.b = self.add_param(b, biases_shape, name="b", regularizable=False)
Example #21
Source File: lasagne_net.py From BirdCLEF-Baseline with MIT License | 5 votes |
def nonlinearity(name): nonlinearities = {'rectify': nl.rectify, 'relu': nl.rectify, 'lrelu': nl.LeakyRectify(0.01), 'vlrelu': nl.LeakyRectify(0.33), 'elu': nl.elu, 'softmax': nl.softmax, 'sigmoid': nl.sigmoid, 'identity':nl.identity} return nonlinearities[name]
Example #22
Source File: layers.py From clip2frame with ISC License | 5 votes |
def __init__(self, incoming, filter_size, init_std=5., stride=1, pad=0, nonlinearity=None, convolution=conv1d_mc0, **kwargs): super(GaussianScan1DLayer, self).__init__(incoming, **kwargs) if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity self.filter_size = as_tuple(filter_size, 1) self.stride = as_tuple(stride, 1) self.convolution = convolution if pad == 'valid': self.pad = (0,) elif pad in ('full', 'same', 'strictsame'): self.pad = pad else: self.pad = as_tuple(pad, 1, int) init_std = np.asarray(init_std, dtype=floatX) W_logstd = init.Constant(np.log(init_std)) # print(W_std) # W_std = init.Constant(init_std), self.num_input_channels = self.input_shape[1] # self.num_filters = self.num_input_channels self.W_logstd = self.add_param(W_logstd, (self.num_input_channels,), name="W_logstd", regularizable=False, trainable=False) self.W = self.make_gaussian_filter()
Example #23
Source File: layers.py From clip2frame with ISC License | 5 votes |
def __init__(self, incoming, filter_size, init_std=5., W_logstd=None, stride=1, pad=0, nonlinearity=None, convolution=conv1d_mc0, **kwargs): super(GaussianScan1DLayer, self).__init__(incoming, **kwargs) # convolution = conv1d_gpucorrmm_mc0 # convolution = conv.conv1d_mc0 # convolution = T.nnet.conv2d if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity self.filter_size = as_tuple(filter_size, 1) self.stride = as_tuple(stride, 1) self.convolution = convolution # if self.filter_size[0] % 2 == 0: # raise NotImplementedError( # 'GaussianConv1dLayer requires odd filter size.') if pad == 'valid': self.pad = (0,) elif pad in ('full', 'same', 'strictsame'): self.pad = pad else: self.pad = as_tuple(pad, 1, int) if W_logstd is None: init_std = np.asarray(init_std, dtype=floatX) W_logstd = init.Constant(np.log(init_std)) # print(W_std) # W_std = init.Constant(init_std), self.num_input_channels = self.input_shape[1] # self.num_filters = self.num_input_channels self.W_logstd = self.add_param(W_logstd, (self.num_input_channels,), name="W_logstd", regularizable=False) self.W = self.make_gaussian_filter()
Example #24
Source File: layers.py From drmad with MIT License | 5 votes |
def __init__(self, args, incoming, num_units, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, num_leading_axes=1, **kwargs): super(DenseLayerWithReg, self).__init__(incoming, **kwargs) self.nonlinearity = (nonlinearities.identity if nonlinearity is None else nonlinearity) self.num_units = num_units if num_leading_axes >= len(self.input_shape): raise ValueError( "Got num_leading_axes=%d for a %d-dimensional input, " "leaving no trailing axes for the dot product." % (num_leading_axes, len(self.input_shape))) elif num_leading_axes < -len(self.input_shape): raise ValueError( "Got num_leading_axes=%d for a %d-dimensional input, " "requesting more trailing axes than there are input " "dimensions." % (num_leading_axes, len(self.input_shape))) self.num_leading_axes = num_leading_axes if any(s is None for s in self.input_shape[num_leading_axes:]): raise ValueError( "A DenseLayer requires a fixed input shape (except for " "the leading axes). Got %r for num_leading_axes=%d." % (self.input_shape, self.num_leading_axes)) num_inputs = int(np.prod(self.input_shape[num_leading_axes:])) self.W = self.add_param(W, (num_inputs, num_units), name="W") if b is None: self.b = None else: self.b = self.add_param(b, (num_units,), name="b", regularizable=False) if args.regL1 is True: self.L1 = self.add_param(init.Constant(args.regInit['L1']), (num_inputs, num_units), name="L1") if args.regL2 is True: self.L2 = self.add_param(init.Constant(args.regInit['L2']), (num_inputs, num_units), name="L2")
Example #25
Source File: deep_conv_ae_spsparse_alt28.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_autoencoder_network(): input_var = T.tensor4('input_var'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 640, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); prely = batch_norm(layers.Conv2DLayer(layer, 1024, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); featm = batch_norm(layers.Conv2DLayer(prely, 640, filter_size=(1,1), nonlinearity=leaky_rectify)); feat_map = batch_norm(layers.Conv2DLayer(featm, 100, filter_size=(1,1), nonlinearity=rectify, name="feat_map")); mask_map = feat_map; layer = feat_map; layer = batch_norm(layers.Deconv2DLayer(layer, 1024, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); glblf = batch_norm(layers.Conv2DLayer(prely, 128, filter_size=(1,1), nonlinearity=leaky_rectify)); glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad'); glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Conv2DLayer(glblf, 5, filter_size=(1,1), nonlinearity=rectify), name="global_feature"); glblf = batch_norm(layers.Deconv2DLayer(glblf, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); layer = layers.ElemwiseSumLayer([layer, glblf]); network = ReshapeLayer(layer, ([0], -1)); mask_var = lasagne.layers.get_output(mask_map); output_var = lasagne.layers.get_output(network); return network, input_var, mask_var, output_var;
Example #26
Source File: deep_conv_ae_spsparse_alt47.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_autoencoder_network(): input_var = T.tensor4('input_var'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 640, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); prely = batch_norm(layers.Conv2DLayer(layer, 1024, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); featm = batch_norm(layers.Conv2DLayer(prely, 640, filter_size=(1,1), nonlinearity=leaky_rectify)); feat_map = batch_norm(layers.Conv2DLayer(featm, 100, filter_size=(1,1), nonlinearity=rectify, name="feat_map")); layer = BNRectifyPerc(feat_map, perc=98.4, alpha=0.1, beta=init.Constant(0.5)); layer = batch_norm(layers.Deconv2DLayer(layer, 1024, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); glblf = batch_norm(layers.Conv2DLayer(prely, 128, filter_size=(1,1), nonlinearity=leaky_rectify)); glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad'); glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Conv2DLayer(glblf, 5, filter_size=(1,1), nonlinearity=rectify), name="global_feature"); glblf = batch_norm(layers.Deconv2DLayer(glblf, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); layer = layers.ElemwiseSumLayer([layer, glblf]); network = ReshapeLayer(layer, ([0], -1)); output_var = lasagne.layers.get_output(network); return network, input_var, output_var;
Example #27
Source File: batch_norms.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 4 votes |
def batch_norm(layer, **kwargs): """ Apply batch normalization to an existing layer. This is a convenience function modifying an existing layer to include batch normalization: It will steal the layer's nonlinearity if there is one (effectively introducing the normalization right before the nonlinearity), remove the layer's bias if there is one (because it would be redundant), and add a :class:`BatchNormLayer` and :class:`NonlinearityLayer` on top. Parameters ---------- layer : A :class:`Layer` instance The layer to apply the normalization to; note that it will be irreversibly modified as specified above **kwargs Any additional keyword arguments are passed on to the :class:`BatchNormLayer` constructor. Returns ------- BatchNormLayer or NonlinearityLayer instance A batch normalization layer stacked on the given modified `layer`, or a nonlinearity layer stacked on top of both if `layer` was nonlinear. Examples -------- Just wrap any layer into a :func:`batch_norm` call on creating it: >>> from lasagne.layers import InputLayer, DenseLayer, batch_norm >>> from lasagne.nonlinearities import tanh >>> l1 = InputLayer((64, 768)) >>> l2 = batch_norm(DenseLayer(l1, num_units=500, nonlinearity=tanh)) This introduces batch normalization right before its nonlinearity: >>> from lasagne.layers import get_all_layers >>> [l.__class__.__name__ for l in get_all_layers(l2)] ['InputLayer', 'DenseLayer', 'BatchNormLayer', 'NonlinearityLayer'] """ nonlinearity = getattr(layer, 'nonlinearity', None) if nonlinearity is not None: layer.nonlinearity = nonlinearities.identity if hasattr(layer, 'b') and layer.b is not None: del layer.params[layer.b] layer.b = None layer = BatchNormLayer(layer, **kwargs) if nonlinearity is not None: from lasagne.layers import NonlinearityLayer layer = NonlinearityLayer(layer, nonlinearity) return layer
Example #28
Source File: deep_conv_ae_spsparse_alt21.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_autoencoder_network(): input_var = T.tensor4('input_var'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 640, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); prely = batch_norm(layers.Conv2DLayer(layer, 1024, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); featm = batch_norm(layers.Conv2DLayer(prely, 640, filter_size=(1,1), nonlinearity=leaky_rectify)); feat_map = batch_norm(layers.Conv2DLayer(featm, 100, filter_size=(1,1), nonlinearity=rectify, name="feat_map")); maskm = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify)); mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None); mask_map = SoftThresPerc(mask_rep, perc=98.4, alpha=0.1, beta=init.Constant(0.5), tight=100.0, name="mask_map"); layer = ChInnerProdMerge(feat_map, mask_map, name="encoder"); layer = batch_norm(layers.Deconv2DLayer(layer, 1024, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); glblf = batch_norm(layers.Conv2DLayer(prely, 128, filter_size=(1,1), nonlinearity=leaky_rectify)); glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad'); glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Conv2DLayer(glblf, 5, filter_size=(1,1), nonlinearity=rectify), name="global_feature"); glblf = batch_norm(layers.Deconv2DLayer(glblf, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); layer = layers.ElemwiseSumLayer([layer, glblf]); network = ReshapeLayer(layer, ([0], -1)); mask_var = lasagne.layers.get_output(mask_map); output_var = lasagne.layers.get_output(network); return network, input_var, mask_var, output_var;
Example #29
Source File: deep_conv_ae_spsparse_alt32.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 4 votes |
def build_autoencoder_network(): input_var = T.tensor4('input_var'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); prely = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); featm = batch_norm(layers.Conv2DLayer(prely, 180, filter_size=(1,1), nonlinearity=leaky_rectify)); feat_map = batch_norm(layers.Conv2DLayer(featm, 120, filter_size=(1,1), nonlinearity=rectify, name="feat_map")); maskm = batch_norm(layers.Conv2DLayer(prely, 120, filter_size=(1,1), nonlinearity=leaky_rectify)); mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None); mask_map = SoftThresPerc(mask_rep, perc=99.9, alpha=0.5, beta=init.Constant(0.5), tight=50.0, name="mask_map"); layer = ChInnerProdMerge(feat_map, mask_map, name="encoder"); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); glblf = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify)); glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad'); glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Conv2DLayer(glblf, 3, filter_size=(1,1), nonlinearity=rectify), name="global_feature"); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); layer = layers.ElemwiseSumLayer([layer, glblf]); network = ReshapeLayer(layer, ([0], -1)); mask_var = lasagne.layers.get_output(mask_map); output_var = lasagne.layers.get_output(network); return network, input_var, mask_var, output_var;
Example #30
Source File: layers.py From clip2frame with ISC License | 4 votes |
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1), pad=0, untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, convolution=T.nnet.conv2d, **kwargs): super(Conv2DXLayer, self).__init__(incoming, **kwargs) if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity self.num_filters = num_filters self.filter_size = as_tuple(filter_size, 2) self.stride = as_tuple(stride, 2) self.untie_biases = untie_biases self.convolution = convolution if pad == 'same': if any(s % 2 == 0 for s in self.filter_size): raise NotImplementedError( '`same` padding requires odd filter size.') if pad == 'strictsamex': if not (stride == 1 or stride == (1, 1)): raise NotImplementedError( '`strictsamex` padding requires stride=(1, 1) or 1') if pad == 'valid': self.pad = (0, 0) elif pad in ('full', 'same', 'strictsamex'): self.pad = pad else: self.pad = as_tuple(pad, 2, int) self.W = self.add_param(W, self.get_W_shape(), name="W") if b is None: self.b = None else: if self.untie_biases: biases_shape = (num_filters, self.output_shape[2], self. output_shape[3]) else: biases_shape = (num_filters,) self.b = self.add_param(b, biases_shape, name="b", regularizable=False)