Python lasagne.init.GlorotUniform() Examples
The following are 28
code examples of lasagne.init.GlorotUniform().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
lasagne.init
, or try the search function
.
Example #1
Source File: crf.py From LasagneNLP with Apache License 2.0 | 6 votes |
def __init__(self, incoming, num_labels, mask_input=None, W=init.GlorotUniform(), b=init.Constant(0.), **kwargs): # This layer inherits from a MergeLayer, because it can have two # inputs - the layer input, and the mask. # We will just provide the layer input as incomings, unless a mask input was provided. self.input_shape = incoming.output_shape incomings = [incoming] self.mask_incoming_index = -1 if mask_input is not None: incomings.append(mask_input) self.mask_incoming_index = 1 super(CRFLayer, self).__init__(incomings, **kwargs) self.num_labels = num_labels + 1 self.pad_label_index = num_labels num_inputs = self.input_shape[2] self.W = self.add_param(W, (num_inputs, self.num_labels, self.num_labels), name="W") if b is None: self.b = None else: self.b = self.add_param(b, (self.num_labels, self.num_labels), name="b", regularizable=False)
Example #2
Source File: graph.py From LasagneNLP with Apache License 2.0 | 6 votes |
def __init__(self, incoming_vertex, incoming_edge, num_filters, filter_size, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs): self.vertex_shape = incoming_vertex.output_shape self.edge_shape = incoming_edge.output_shape self.input_shape = incoming_vertex.output_shape incomings = [incoming_vertex, incoming_edge] self.vertex_incoming_index = 0 self.edge_incoming_index = 1 super(GraphConvLayer, self).__init__(incomings, **kwargs) if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity self.num_filters = num_filters self.filter_size = filter_size self.W = self.add_param(W, self.get_W_shape(), name="W") if b is None: self.b = None else: self.b = self.add_param(b, (num_filters,), name="b", regularizable=False)
Example #3
Source File: highway.py From LasagneNLP with Apache License 2.0 | 6 votes |
def __init__(self, incoming, W_h=init.GlorotUniform(), b_h=init.Constant(0.), W_t=init.GlorotUniform(), b_t=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs): super(HighwayDenseLayer, self).__init__(incoming, **kwargs) self.nonlinearity = (nonlinearities.identity if nonlinearity is None else nonlinearity) num_inputs = int(np.prod(self.input_shape[1:])) self.W_h = self.add_param(W_h, (num_inputs, num_inputs), name="W_h") if b_h is None: self.b_h = None else: self.b_h = self.add_param(b_h, (num_inputs,), name="b_h", regularizable=False) self.W_t = self.add_param(W_t, (num_inputs, num_inputs), name="W_t") if b_t is None: self.b_t = None else: self.b_t = self.add_param(b_t, (num_inputs,), name="b_t", regularizable=False)
Example #4
Source File: parser.py From LasagneNLP with Apache License 2.0 | 6 votes |
def __init__(self, incoming, num_labels, mask_input=None, W_h=init.GlorotUniform(), W_c=init.GlorotUniform(), b=init.Constant(0.), **kwargs): # This layer inherits from a MergeLayer, because it can have two # inputs - the layer input, and the mask. # We will just provide the layer input as incomings, unless a mask input was provided. self.input_shape = incoming.output_shape incomings = [incoming] self.mask_incoming_index = -1 if mask_input is not None: incomings.append(mask_input) self.mask_incoming_index = 1 super(DepParserLayer, self).__init__(incomings, **kwargs) self.num_labels = num_labels num_inputs = self.input_shape[2] # add parameters self.W_h = self.add_param(W_h, (num_inputs, self.num_labels), name='W_h') self.W_c = self.add_param(W_c, (num_inputs, self.num_labels), name='W_c') if b is None: self.b = None else: self.b = self.add_param(b, (self.num_labels,), name='b', regularizable=False)
Example #5
Source File: layers.py From kusanagi with MIT License | 6 votes |
def __init__(self, incoming, num_units, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, num_leading_axes=1, p=0.5, shared_axes=(), noise_samples=None, **kwargs): super(DenseDropoutLayer, self).__init__( incoming, num_units, W, b, nonlinearity, num_leading_axes, **kwargs) self.p = p self.shared_axes = shared_axes # init randon number generator self._srng = RandomStreams(get_rng().randint(1, 2147462579)) # initialize noise samples self.noise = self.init_noise(noise_samples)
Example #6
Source File: layers.py From kusanagi with MIT License | 5 votes |
def __init__(self, incoming, num_units, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, num_leading_axes=1, p=0.5, log_sigma2=None, shared_axes=(), noise_samples=None, **kwargs): super(DenseGaussianDropoutLayer, self).__init__( incoming, num_units, W, b, nonlinearity, num_leading_axes, p, shared_axes=(), noise_samples=None, **kwargs) self.p = p self.log_sigma2 = log_sigma2 self.init_params()
Example #7
Source File: bidnn.py From BiDNN with GNU Affero General Public License v3.0 | 5 votes |
def __create_toplogy__(self, input_var_first=None, input_var_second=None): # define network topology if (self.conf.rep % 2 != 0): raise ValueError("Representation size should be divisible by two as it's formed by combining two crossmodal translations", self.conf.rep) # input layers l_in_first = InputLayer(shape=(self.conf.batch_size, self.conf.mod1size), input_var=input_var_first) l_in_second = InputLayer(shape=(self.conf.batch_size, self.conf.mod2size), input_var=input_var_second) # first -> second l_hidden1_first = DenseLayer(l_in_first, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=GlorotUniform()) # enc1 l_hidden2_first = DenseLayer(l_hidden1_first, num_units=self.conf.rep//2, nonlinearity=self.conf.act, W=GlorotUniform()) # enc2 l_hidden2_first_d = DropoutLayer(l_hidden2_first, p=self.conf.dropout) l_hidden3_first = DenseLayer(l_hidden2_first_d, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=GlorotUniform()) # dec1 l_out_first = DenseLayer(l_hidden3_first, num_units=self.conf.mod2size, nonlinearity=self.conf.act, W=GlorotUniform()) # dec2 if self.conf.untied: # FREE l_hidden1_second = DenseLayer(l_in_second, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=GlorotUniform()) # enc1 l_hidden2_second = DenseLayer(l_hidden1_second, num_units=self.conf.rep//2, nonlinearity=self.conf.act, W=GlorotUniform()) # enc2 l_hidden2_second_d = DropoutLayer(l_hidden2_second, p=self.conf.dropout) l_hidden3_second = DenseLayer(l_hidden2_second_d, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=GlorotUniform()) # dec1 l_out_second = DenseLayer(l_hidden3_second, num_units=self.conf.mod1size, nonlinearity=self.conf.act, W=GlorotUniform()) # dec2 else: # TIED middle l_hidden1_second = DenseLayer(l_in_second, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=GlorotUniform()) # enc1 l_hidden2_second = DenseLayer(l_hidden1_second, num_units=self.conf.rep//2, nonlinearity=self.conf.act, W=l_hidden3_first.W.T) # enc2 l_hidden2_second_d = DropoutLayer(l_hidden2_second, p=self.conf.dropout) l_hidden3_second = DenseLayer(l_hidden2_second_d, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=l_hidden2_first.W.T) # dec1 l_out_second = DenseLayer(l_hidden3_second, num_units=self.conf.mod1size, nonlinearity=self.conf.act, W=GlorotUniform()) # dec2 l_out = concat([l_out_first, l_out_second]) return l_out, l_hidden2_first, l_hidden2_second
Example #8
Source File: modules.py From Deep-SVDD with MIT License | 5 votes |
def addConvModule(nnet, num_filters, filter_size, pad='same', W_init=None, bias=True, pool_size=(2,2), use_batch_norm=False, dropout=False, p_dropout=0.5, upscale=False): """ add a convolutional module (convolutional layer + (leaky) ReLU + MaxPool) to the network """ if W_init is None: W = GlorotUniform(gain=(2/(1+0.01**2)) ** 0.5) # gain adjusted for leaky ReLU with alpha=0.01 else: W = W_init if bias is True: b = Constant(0.) else: b = None # build module if dropout: nnet.addDropoutLayer(p=p_dropout) nnet.addConvLayer(use_batch_norm=use_batch_norm, num_filters=num_filters, filter_size=filter_size, pad=pad, W=W, b=b) if Cfg.leaky_relu: nnet.addLeakyReLU() else: nnet.addReLU() if upscale: nnet.addUpscale(scale_factor=pool_size) else: nnet.addMaxPool(pool_size=pool_size)
Example #9
Source File: rotconv.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, incoming, num_filters, num_rot, filter_size, stride=(1, 1), border_mode="valid", untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, convolution=T.nnet.conv2d, **kwargs): super(RotConv, self).__init__(incoming, **kwargs) if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity self.num_filters = num_filters self.num_rot = num_rot; self.filter_size = as_tuple(filter_size, 2) self.stride = as_tuple(stride, 2) self.border_mode = border_mode self.untie_biases = untie_biases self.convolution = convolution if self.border_mode not in ['valid', 'full', 'same']: raise RuntimeError("Invalid border mode: '%s'" % self.border_mode) self.W = self.add_param(W, self.get_W_shape(), name="W") if b is None: self.b = None else: if self.untie_biases: biases_shape = (num_filters, self.output_shape[2], self. output_shape[3]) else: biases_shape = (num_filters,) self.b = self.add_param(b, biases_shape, name="b", regularizable=False)
Example #10
Source File: rotconv.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, incoming, num_filters, num_rot, filter_size, stride=(1, 1), border_mode="valid", untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, convolution=T.nnet.conv2d, **kwargs): super(RotConv, self).__init__(incoming, **kwargs) if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity self.num_filters = num_filters self.num_rot = num_rot; self.filter_size = as_tuple(filter_size, 2) self.stride = as_tuple(stride, 2) self.border_mode = border_mode self.untie_biases = untie_biases self.convolution = convolution if self.border_mode not in ['valid', 'full', 'same']: raise RuntimeError("Invalid border mode: '%s'" % self.border_mode) self.W = self.add_param(W, self.get_W_shape(), name="W") if b is None: self.b = None else: if self.untie_biases: biases_shape = (num_filters, self.output_shape[2], self. output_shape[3]) else: biases_shape = (num_filters,) self.b = self.add_param(b, biases_shape, name="b", regularizable=False)
Example #11
Source File: rotconv.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, incoming, num_filters, num_rot, filter_size, stride=(1, 1), border_mode="valid", untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, convolution=T.nnet.conv2d, **kwargs): super(RotConv, self).__init__(incoming, **kwargs) if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity self.num_filters = num_filters self.num_rot = num_rot; self.filter_size = as_tuple(filter_size, 2) self.stride = as_tuple(stride, 2) self.border_mode = border_mode self.untie_biases = untie_biases self.convolution = convolution if self.border_mode not in ['valid', 'full', 'same']: raise RuntimeError("Invalid border mode: '%s'" % self.border_mode) self.W = self.add_param(W, self.get_W_shape(), name="W") if b is None: self.b = None else: if self.untie_biases: biases_shape = (num_filters, self.output_shape[2], self. output_shape[3]) else: biases_shape = (num_filters,) self.b = self.add_param(b, biases_shape, name="b", regularizable=False)
Example #12
Source File: rotconv.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, incoming, num_filters, num_rot, filter_size, stride=(1, 1), border_mode="valid", untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, convolution=T.nnet.conv2d, **kwargs): super(RotConv, self).__init__(incoming, **kwargs) if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity self.num_filters = num_filters self.num_rot = num_rot; self.filter_size = as_tuple(filter_size, 2) self.stride = as_tuple(stride, 2) self.border_mode = border_mode self.untie_biases = untie_biases self.convolution = convolution if self.border_mode not in ['valid', 'full', 'same']: raise RuntimeError("Invalid border mode: '%s'" % self.border_mode) self.W = self.add_param(W, self.get_W_shape(), name="W") if b is None: self.b = None else: if self.untie_biases: biases_shape = (num_filters, self.output_shape[2], self. output_shape[3]) else: biases_shape = (num_filters,) self.b = self.add_param(b, biases_shape, name="b", regularizable=False)
Example #13
Source File: rotconv.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, incoming, num_filters, num_rot, filter_size, stride=(1, 1), border_mode="valid", untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, convolution=T.nnet.conv2d, **kwargs): super(RotConv, self).__init__(incoming, **kwargs) if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity self.num_filters = num_filters self.num_rot = num_rot; self.filter_size = as_tuple(filter_size, 2) self.stride = as_tuple(stride, 2) self.border_mode = border_mode self.untie_biases = untie_biases self.convolution = convolution if self.border_mode not in ['valid', 'full', 'same']: raise RuntimeError("Invalid border mode: '%s'" % self.border_mode) self.W = self.add_param(W, self.get_W_shape(), name="W") if b is None: self.b = None else: if self.untie_biases: biases_shape = (num_filters, self.output_shape[2], self. output_shape[3]) else: biases_shape = (num_filters,) self.b = self.add_param(b, biases_shape, name="b", regularizable=False)
Example #14
Source File: rotconv.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, incoming, num_filters, num_rot, filter_size, stride=(1, 1), border_mode="valid", untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, convolution=T.nnet.conv2d, **kwargs): super(RotConv, self).__init__(incoming, **kwargs) if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity self.num_filters = num_filters self.num_rot = num_rot; self.filter_size = as_tuple(filter_size, 2) self.stride = as_tuple(stride, 2) self.border_mode = border_mode self.untie_biases = untie_biases self.convolution = convolution if self.border_mode not in ['valid', 'full', 'same']: raise RuntimeError("Invalid border mode: '%s'" % self.border_mode) self.W = self.add_param(W, self.get_W_shape(), name="W") if b is None: self.b = None else: if self.untie_biases: biases_shape = (num_filters, self.output_shape[2], self. output_shape[3]) else: biases_shape = (num_filters,) self.b = self.add_param(b, biases_shape, name="b", regularizable=False)
Example #15
Source File: layers.py From drmad with MIT License | 5 votes |
def __init__(self, args, incoming, num_units, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, num_leading_axes=1, **kwargs): super(DenseLayerWithReg, self).__init__(incoming, **kwargs) self.nonlinearity = (nonlinearities.identity if nonlinearity is None else nonlinearity) self.num_units = num_units if num_leading_axes >= len(self.input_shape): raise ValueError( "Got num_leading_axes=%d for a %d-dimensional input, " "leaving no trailing axes for the dot product." % (num_leading_axes, len(self.input_shape))) elif num_leading_axes < -len(self.input_shape): raise ValueError( "Got num_leading_axes=%d for a %d-dimensional input, " "requesting more trailing axes than there are input " "dimensions." % (num_leading_axes, len(self.input_shape))) self.num_leading_axes = num_leading_axes if any(s is None for s in self.input_shape[num_leading_axes:]): raise ValueError( "A DenseLayer requires a fixed input shape (except for " "the leading axes). Got %r for num_leading_axes=%d." % (self.input_shape, self.num_leading_axes)) num_inputs = int(np.prod(self.input_shape[num_leading_axes:])) self.W = self.add_param(W, (num_inputs, num_units), name="W") if b is None: self.b = None else: self.b = self.add_param(b, (num_units,), name="b", regularizable=False) if args.regL1 is True: self.L1 = self.add_param(init.Constant(args.regInit['L1']), (num_inputs, num_units), name="L1") if args.regL2 is True: self.L2 = self.add_param(init.Constant(args.regInit['L2']), (num_inputs, num_units), name="L2")
Example #16
Source File: layers.py From kusanagi with MIT License | 5 votes |
def __init__(self, incoming, num_units, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, num_leading_axes=1, p=0.5, logit_p=None, temp=0.1, shared_axes=(), noise_samples=None, **kwargs): super(DenseConcreteDropoutLayer, self).__init__( incoming, num_units, W, b, nonlinearity, num_leading_axes, p, shared_axes=(), noise_samples=None, **kwargs) self.temp = temp self.logit_p = logit_p self.init_params()
Example #17
Source File: layers.py From kusanagi with MIT License | 5 votes |
def __init__(self, incoming, num_units, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, num_leading_axes=1, logit_posterior_mean=None, logit_posterior_std=None, interval=[-4.0, 0.0], shared_axes=(), noise_samples=None, **kwargs): super(DenseLogNormalDropoutLayer, self).__init__( incoming, num_units, W, b, nonlinearity, num_leading_axes, shared_axes=(), noise_samples=None, **kwargs) self.logit_posterior_mean = logit_posterior_mean self.logit_posterior_std = logit_posterior_std self.interval = interval self.init_params()
Example #18
Source File: base.py From gelato with MIT License | 5 votes |
def smart_init(shape): if len(shape) > 1: return init.GlorotUniform()(shape) else: return init.Normal()(shape)
Example #19
Source File: layers.py From kusanagi with MIT License | 5 votes |
def __init__(self, incoming, num_units, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, num_leading_axes=1, p=0.5, logit_alpha=None, shared_axes=(), noise_samples=None, max_alpha=0.5, **kwargs): super(DenseGaussianDropoutLayer, self).__init__( incoming, num_units, W, b, nonlinearity, num_leading_axes, p, shared_axes=(), noise_samples=None, **kwargs) self.max_alpha = max_alpha self.logit_alpha = logit_alpha self.p = p self.init_params()
Example #20
Source File: FaceAlignment.py From DeepAlignmentNetwork with MIT License | 5 votes |
def createCNN(self): net = {} net['input'] = lasagne.layers.InputLayer(shape=(None, self.nChannels, self.imageHeight, self.imageWidth), input_var=self.data) print("Input shape: {0}".format(net['input'].output_shape)) #STAGE 1 net['s1_conv1_1'] = batch_norm(Conv2DLayer(net['input'], 64, 3, pad='same', W=GlorotUniform('relu'))) net['s1_conv1_2'] = batch_norm(Conv2DLayer(net['s1_conv1_1'], 64, 3, pad='same', W=GlorotUniform('relu'))) net['s1_pool1'] = lasagne.layers.Pool2DLayer(net['s1_conv1_2'], 2) net['s1_conv2_1'] = batch_norm(Conv2DLayer(net['s1_pool1'], 128, 3, pad=1, W=GlorotUniform('relu'))) net['s1_conv2_2'] = batch_norm(Conv2DLayer(net['s1_conv2_1'], 128, 3, pad=1, W=GlorotUniform('relu'))) net['s1_pool2'] = lasagne.layers.Pool2DLayer(net['s1_conv2_2'], 2) net['s1_conv3_1'] = batch_norm (Conv2DLayer(net['s1_pool2'], 256, 3, pad=1, W=GlorotUniform('relu'))) net['s1_conv3_2'] = batch_norm (Conv2DLayer(net['s1_conv3_1'], 256, 3, pad=1, W=GlorotUniform('relu'))) net['s1_pool3'] = lasagne.layers.Pool2DLayer(net['s1_conv3_2'], 2) net['s1_conv4_1'] = batch_norm(Conv2DLayer(net['s1_pool3'], 512, 3, pad=1, W=GlorotUniform('relu'))) net['s1_conv4_2'] = batch_norm (Conv2DLayer(net['s1_conv4_1'], 512, 3, pad=1, W=GlorotUniform('relu'))) net['s1_pool4'] = lasagne.layers.Pool2DLayer(net['s1_conv4_2'], 2) net['s1_fc1_dropout'] = lasagne.layers.DropoutLayer(net['s1_pool4'], p=0.5) net['s1_fc1'] = batch_norm(lasagne.layers.DenseLayer(net['s1_fc1_dropout'], num_units=256, W=GlorotUniform('relu'))) net['s1_output'] = lasagne.layers.DenseLayer(net['s1_fc1'], num_units=136, nonlinearity=None) net['s1_landmarks'] = LandmarkInitLayer(net['s1_output'], self.initLandmarks) if self.confidenceLayer: net['s1_confidence'] = lasagne.layers.DenseLayer(net['s1_fc1'], num_units=2, W=GlorotUniform('relu'), nonlinearity=lasagne.nonlinearities.softmax) for i in range(1, self.nStages): self.addDANStage(i + 1, net) net['output'] = net['s' + str(self.nStages) + '_landmarks'] if self.confidenceLayer: net['output'] = lasagne.layers.ConcatLayer([net['output'], net['s1_confidence']]) return net
Example #21
Source File: FaceAlignmentTraining.py From DeepAlignmentNetwork with MIT License | 5 votes |
def createCNN(self): net = {} net['input'] = lasagne.layers.InputLayer(shape=(None, self.nChannels, self.imageHeight, self.imageWidth), input_var=self.data) print("Input shape: {0}".format(net['input'].output_shape)) #STAGE 1 net['s1_conv1_1'] = batch_norm(Conv2DLayer(net['input'], 64, 3, pad='same', W=GlorotUniform('relu'))) net['s1_conv1_2'] = batch_norm(Conv2DLayer(net['s1_conv1_1'], 64, 3, pad='same', W=GlorotUniform('relu'))) net['s1_pool1'] = lasagne.layers.Pool2DLayer(net['s1_conv1_2'], 2) net['s1_conv2_1'] = batch_norm(Conv2DLayer(net['s1_pool1'], 128, 3, pad=1, W=GlorotUniform('relu'))) net['s1_conv2_2'] = batch_norm(Conv2DLayer(net['s1_conv2_1'], 128, 3, pad=1, W=GlorotUniform('relu'))) net['s1_pool2'] = lasagne.layers.Pool2DLayer(net['s1_conv2_2'], 2) net['s1_conv3_1'] = batch_norm (Conv2DLayer(net['s1_pool2'], 256, 3, pad=1, W=GlorotUniform('relu'))) net['s1_conv3_2'] = batch_norm (Conv2DLayer(net['s1_conv3_1'], 256, 3, pad=1, W=GlorotUniform('relu'))) net['s1_pool3'] = lasagne.layers.Pool2DLayer(net['s1_conv3_2'], 2) net['s1_conv4_1'] = batch_norm(Conv2DLayer(net['s1_pool3'], 512, 3, pad=1, W=GlorotUniform('relu'))) net['s1_conv4_2'] = batch_norm (Conv2DLayer(net['s1_conv4_1'], 512, 3, pad=1, W=GlorotUniform('relu'))) net['s1_pool4'] = lasagne.layers.Pool2DLayer(net['s1_conv4_2'], 2) net['s1_fc1_dropout'] = lasagne.layers.DropoutLayer(net['s1_pool4'], p=0.5) net['s1_fc1'] = batch_norm(lasagne.layers.DenseLayer(net['s1_fc1_dropout'], num_units=256, W=GlorotUniform('relu'))) net['s1_output'] = lasagne.layers.DenseLayer(net['s1_fc1'], num_units=136, nonlinearity=None) net['s1_landmarks'] = LandmarkInitLayer(net['s1_output'], self.initLandmarks) for i in range(1, self.nStages): self.addDANStage(i + 1, net) net['output'] = net['s' + str(self.nStages) + '_landmarks'] return net
Example #22
Source File: layers_theano.py From visual_dynamics with MIT License | 5 votes |
def __init__(self, incoming, num_filters, filter_size, stride=(2, 2), pad=0, untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nl.rectify, flip_filters=False, **kwargs): super(Deconv2DLayer, self).__init__(incoming, **kwargs) if nonlinearity is None: self.nonlinearity = nl.identity else: self.nonlinearity = nonlinearity self.num_filters = num_filters self.filter_size = as_tuple(filter_size, 2) self.stride = as_tuple(stride, 2) self.untie_biases = untie_biases self.flip_filters = flip_filters if pad == 'valid': self.pad = (0, 0) elif pad == 'full': self.pad = 'full' elif pad == 'same': if any(s % 2 == 0 for s in self.filter_size): raise NotImplementedError( '`same` padding requires odd filter size.') self.pad = (self.filter_size[0] // 2, self.filter_size[1] // 2) else: self.pad = as_tuple(pad, 2, int) self.W = self.add_param(W, self.get_W_shape(), name="W") if b is None: self.b = None else: if self.untie_biases: biases_shape = (num_filters, self.output_shape[2], self.output_shape[3]) else: biases_shape = (num_filters,) self.b = self.add_param(b, biases_shape, name="b", regularizable=False)
Example #23
Source File: layers_theano.py From visual_dynamics with MIT License | 5 votes |
def __init__(self, W_in=init.GlorotUniform(), W_hid=init.GlorotUniform(), W_cell=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.sigmoid): self.W_in = W_in self.W_hid = W_hid # Don't store a cell weight vector when cell is None if W_cell is not None: self.W_cell = W_cell self.b = b # For the nonlinearity, if None is supplied, use identity if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity
Example #24
Source File: layers.py From drmad with MIT License | 4 votes |
def __init__(self, args, incoming, num_filters, filter_size, stride=(1, 1), pad=0, untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, convolution=T.nnet.conv2d, **kwargs): super(Conv2DLayerWithReg, self).__init__(incoming, **kwargs) if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity self.num_filters = num_filters self.filter_size = _as_tuple(filter_size, 2) self.stride = _as_tuple(stride, 2) self.untie_biases = untie_biases self.convolution = convolution if pad == 'valid': self.pad = (0, 0) elif pad in ('full', 'same'): self.pad = pad else: self.pad = _as_tuple(pad, 2, int) self.W = self.add_param(W, self.get_W_shape(), name="W") if b is None: self.b = None else: if self.untie_biases: biases_shape = (num_filters, self.output_shape[2], self. output_shape[3]) else: biases_shape = (num_filters,) self.b = self.add_param(b, biases_shape, name="b", regularizable=False) if args.regL1 is True: self.L1 = self.add_param(init.Constant(args.regInit['L1']), self.get_W_shape() , name="L1") if args.regL2 is True: self.L2 = self.add_param(init.Constant(args.regInit['L2']), self.get_W_shape() , name="L2")
Example #25
Source File: layers.py From clip2frame with ISC License | 4 votes |
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1), pad=0, untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, convolution=T.nnet.conv2d, **kwargs): super(Conv2DXLayer, self).__init__(incoming, **kwargs) if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity self.num_filters = num_filters self.filter_size = as_tuple(filter_size, 2) self.stride = as_tuple(stride, 2) self.untie_biases = untie_biases self.convolution = convolution if pad == 'same': if any(s % 2 == 0 for s in self.filter_size): raise NotImplementedError( '`same` padding requires odd filter size.') if pad == 'strictsamex': if not (stride == 1 or stride == (1, 1)): raise NotImplementedError( '`strictsamex` padding requires stride=(1, 1) or 1') if pad == 'valid': self.pad = (0, 0) elif pad in ('full', 'same', 'strictsamex'): self.pad = pad else: self.pad = as_tuple(pad, 2, int) self.W = self.add_param(W, self.get_W_shape(), name="W") if b is None: self.b = None else: if self.untie_biases: biases_shape = (num_filters, self.output_shape[2], self. output_shape[3]) else: biases_shape = (num_filters,) self.b = self.add_param(b, biases_shape, name="b", regularizable=False)
Example #26
Source File: padded.py From reseg with GNU General Public License v3.0 | 4 votes |
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1), pad=0, untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, flip_filters=True, convolution=theano.tensor.nnet.conv2d, centered=True, **kwargs): """A padded convolutional layer Note ---- If used in place of a :class:``lasagne.layers.Conv2DLayer`` be sure to specify `flag_filters=False`, which is the default for that layer Parameters ---------- incoming : lasagne.layers.Layer The input layer num_filters : int The number of filters or kernels of the convolution filter_size : int or iterable of int The size of the filters stride : int or iterable of int The stride or subsampling of the convolution pad : int, iterable of int, ``full``, ``same`` or ``valid`` **Ignored!** Kept for compatibility with the :class:``lasagne.layers.Conv2DLayer`` untie_biases : bool See :class:``lasagne.layers.Conv2DLayer`` W : Theano shared variable, expression, numpy array or callable See :class:``lasagne.layers.Conv2DLayer`` b : Theano shared variable, expression, numpy array, callable or None See :class:``lasagne.layers.Conv2DLayer`` nonlinearity : callable or None See :class:``lasagne.layers.Conv2DLayer`` flip_filters : bool See :class:``lasagne.layers.Conv2DLayer`` convolution : callable See :class:``lasagne.layers.Conv2DLayer`` centered : bool If True, the padding will be added on both sides. If False the zero padding will be applied on the upper left side. **kwargs Any additional keyword arguments are passed to the :class:``lasagne.layers.Layer`` superclass """ self.centered = centered if pad not in [0, (0, 0), [0, 0]]: warnings.warn('The specified padding will be ignored', RuntimeWarning) super(PaddedConv2DLayer, self).__init__(incoming, num_filters, filter_size, stride, pad, untie_biases, W, b, nonlinearity, flip_filters, **kwargs) if self.input_shape[2:] != (None, None): warnings.warn('This Layer should only be used when the size of ' 'the image is not known', RuntimeWarning)
Example #27
Source File: FaceAlignment.py From DeepAlignmentNetwork with MIT License | 4 votes |
def addDANStage(self, stageIdx, net): prevStage = 's' + str(stageIdx - 1) curStage = 's' + str(stageIdx) #CONNNECTION LAYERS OF PREVIOUS STAGE net[prevStage + '_transform_params'] = TransformParamsLayer(net[prevStage + '_landmarks'], self.initLandmarks) net[prevStage + '_img_output'] = AffineTransformLayer(net['input'], net[prevStage + '_transform_params']) net[prevStage + '_landmarks_affine'] = LandmarkTransformLayer(net[prevStage + '_landmarks'], net[prevStage + '_transform_params']) net[prevStage + '_img_landmarks'] = LandmarkImageLayer(net[prevStage + '_landmarks_affine'], (self.imageHeight, self.imageWidth), self.landmarkPatchSize) net[prevStage + '_img_feature'] = lasagne.layers.DenseLayer(net[prevStage + '_fc1'], num_units=56 * 56, W=GlorotUniform('relu')) net[prevStage + '_img_feature'] = lasagne.layers.ReshapeLayer(net[prevStage + '_img_feature'], (-1, 1, 56, 56)) net[prevStage + '_img_feature'] = lasagne.layers.Upscale2DLayer(net[prevStage + '_img_feature'], 2) #CURRENT STAGE net[curStage + '_input'] = batch_norm(lasagne.layers.ConcatLayer([net[prevStage + '_img_output'], net[prevStage + '_img_landmarks'], net[prevStage + '_img_feature']], 1)) net[curStage + '_conv1_1'] = batch_norm(Conv2DLayer(net[curStage + '_input'], 64, 3, pad='same', W=GlorotUniform('relu'))) net[curStage + '_conv1_2'] = batch_norm(Conv2DLayer(net[curStage + '_conv1_1'], 64, 3, pad='same', W=GlorotUniform('relu'))) net[curStage + '_pool1'] = lasagne.layers.Pool2DLayer(net[curStage + '_conv1_2'], 2) net[curStage + '_conv2_1'] = batch_norm(Conv2DLayer(net[curStage + '_pool1'], 128, 3, pad=1, W=GlorotUniform('relu'))) net[curStage + '_conv2_2'] = batch_norm(Conv2DLayer(net[curStage + '_conv2_1'], 128, 3, pad=1, W=GlorotUniform('relu'))) net[curStage + '_pool2'] = lasagne.layers.Pool2DLayer(net[curStage + '_conv2_2'], 2) net[curStage + '_conv3_1'] = batch_norm (Conv2DLayer(net[curStage + '_pool2'], 256, 3, pad=1, W=GlorotUniform('relu'))) net[curStage + '_conv3_2'] = batch_norm (Conv2DLayer(net[curStage + '_conv3_1'], 256, 3, pad=1, W=GlorotUniform('relu'))) net[curStage + '_pool3'] = lasagne.layers.Pool2DLayer(net[curStage + '_conv3_2'], 2) net[curStage + '_conv4_1'] = batch_norm(Conv2DLayer(net[curStage + '_pool3'], 512, 3, pad=1, W=GlorotUniform('relu'))) net[curStage + '_conv4_2'] = batch_norm (Conv2DLayer(net[curStage + '_conv4_1'], 512, 3, pad=1, W=GlorotUniform('relu'))) net[curStage + '_pool4'] = lasagne.layers.Pool2DLayer(net[curStage + '_conv4_2'], 2) net[curStage + '_pool4'] = lasagne.layers.FlattenLayer(net[curStage + '_pool4']) net[curStage + '_fc1_dropout'] = lasagne.layers.DropoutLayer(net[curStage + '_pool4'], p=0.5) net[curStage + '_fc1'] = batch_norm(lasagne.layers.DenseLayer(net[curStage + '_fc1_dropout'], num_units=256, W=GlorotUniform('relu'))) net[curStage + '_output'] = lasagne.layers.DenseLayer(net[curStage + '_fc1'], num_units=136, nonlinearity=None) net[curStage + '_landmarks'] = lasagne.layers.ElemwiseSumLayer([net[prevStage + '_landmarks_affine'], net[curStage + '_output']]) net[curStage + '_landmarks'] = LandmarkTransformLayer(net[curStage + '_landmarks'], net[prevStage + '_transform_params'], True)
Example #28
Source File: FaceAlignmentTraining.py From DeepAlignmentNetwork with MIT License | 4 votes |
def addDANStage(self, stageIdx, net): prevStage = 's' + str(stageIdx - 1) curStage = 's' + str(stageIdx) #CONNNECTION LAYERS OF PREVIOUS STAGE net[prevStage + '_transform_params'] = TransformParamsLayer(net[prevStage + '_landmarks'], self.initLandmarks) net[prevStage + '_img_output'] = AffineTransformLayer(net['input'], net[prevStage + '_transform_params']) net[prevStage + '_landmarks_affine'] = LandmarkTransformLayer(net[prevStage + '_landmarks'], net[prevStage + '_transform_params']) net[prevStage + '_img_landmarks'] = LandmarkImageLayer(net[prevStage + '_landmarks_affine'], (self.imageHeight, self.imageWidth), self.landmarkPatchSize) net[prevStage + '_img_feature'] = lasagne.layers.DenseLayer(net[prevStage + '_fc1'], num_units=56 * 56, W=GlorotUniform('relu')) net[prevStage + '_img_feature'] = lasagne.layers.ReshapeLayer(net[prevStage + '_img_feature'], (-1, 1, 56, 56)) net[prevStage + '_img_feature'] = lasagne.layers.Upscale2DLayer(net[prevStage + '_img_feature'], 2) #CURRENT STAGE net[curStage + '_input'] = batch_norm(lasagne.layers.ConcatLayer([net[prevStage + '_img_output'], net[prevStage + '_img_landmarks'], net[prevStage + '_img_feature']], 1)) net[curStage + '_conv1_1'] = batch_norm(Conv2DLayer(net[curStage + '_input'], 64, 3, pad='same', W=GlorotUniform('relu'))) net[curStage + '_conv1_2'] = batch_norm(Conv2DLayer(net[curStage + '_conv1_1'], 64, 3, pad='same', W=GlorotUniform('relu'))) net[curStage + '_pool1'] = lasagne.layers.Pool2DLayer(net[curStage + '_conv1_2'], 2) net[curStage + '_conv2_1'] = batch_norm(Conv2DLayer(net[curStage + '_pool1'], 128, 3, pad=1, W=GlorotUniform('relu'))) net[curStage + '_conv2_2'] = batch_norm(Conv2DLayer(net[curStage + '_conv2_1'], 128, 3, pad=1, W=GlorotUniform('relu'))) net[curStage + '_pool2'] = lasagne.layers.Pool2DLayer(net[curStage + '_conv2_2'], 2) net[curStage + '_conv3_1'] = batch_norm (Conv2DLayer(net[curStage + '_pool2'], 256, 3, pad=1, W=GlorotUniform('relu'))) net[curStage + '_conv3_2'] = batch_norm (Conv2DLayer(net[curStage + '_conv3_1'], 256, 3, pad=1, W=GlorotUniform('relu'))) net[curStage + '_pool3'] = lasagne.layers.Pool2DLayer(net[curStage + '_conv3_2'], 2) net[curStage + '_conv4_1'] = batch_norm(Conv2DLayer(net[curStage + '_pool3'], 512, 3, pad=1, W=GlorotUniform('relu'))) net[curStage + '_conv4_2'] = batch_norm (Conv2DLayer(net[curStage + '_conv4_1'], 512, 3, pad=1, W=GlorotUniform('relu'))) net[curStage + '_pool4'] = lasagne.layers.Pool2DLayer(net[curStage + '_conv4_2'], 2) net[curStage + '_pool4'] = lasagne.layers.FlattenLayer(net[curStage + '_pool4']) net[curStage + '_fc1_dropout'] = lasagne.layers.DropoutLayer(net[curStage + '_pool4'], p=0.5) net[curStage + '_fc1'] = batch_norm(lasagne.layers.DenseLayer(net[curStage + '_fc1_dropout'], num_units=256, W=GlorotUniform('relu'))) net[curStage + '_output'] = lasagne.layers.DenseLayer(net[curStage + '_fc1'], num_units=136, nonlinearity=None) net[curStage + '_landmarks'] = lasagne.layers.ElemwiseSumLayer([net[prevStage + '_landmarks_affine'], net[curStage + '_output']]) net[curStage + '_landmarks'] = LandmarkTransformLayer(net[curStage + '_landmarks'], net[prevStage + '_transform_params'], True)