Python lasagne.init() Examples
The following are 10
code examples of lasagne.init().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
lasagne
, or try the search function
.
Example #1
Source File: layers.py From Neural-Photo-Editor with MIT License | 6 votes |
def mdclW(num_filters,num_channels,filter_size,winit,name,scales): # Coefficient Initializer sinit = lasagne.init.Constant(1.0/(1+len(scales))) # Total filter size size = filter_size + (filter_size-1)*(scales[-1]-1) # Multiscale Dilated Filter W = T.zeros((num_filters,num_channels,size,size)) # Undilated Base Filter baseW = theano.shared(lasagne.utils.floatX(winit.sample((num_filters,num_channels,filter_size,filter_size))),name=name+'.W') for scale in enumerate(scales[::-1]): # enumerate backwards so that we place the main filter on top W = T.set_subtensor(W[:,:,scales[-1]-scale:size-scales[-1]+scale:scale,scales[-1]-scale:size-scales[-1]+scale:scale], baseW*theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)), name+'.coeff_'+str(scale)).dimshuffle(0,'x','x','x')) return W # Subpixel Upsample Layer from (https://arxiv.org/abs/1609.05158) # This layer uses a set of r^2 set_subtensor calls to reorganize the tensor in a subpixel-layer upscaling style # as done in the ESPCN Magic ony paper for super-resolution. # r is the upscale factor. # c is the number of output channels.
Example #2
Source File: layers.py From Neural-Photo-Editor with MIT License | 5 votes |
def __init__(self, incoming, RMAX,DMAX,axes='auto', epsilon=1e-4, alpha=0.1, beta=lasagne.init.Constant(0), gamma=lasagne.init.Constant(1), mean=lasagne.init.Constant(0), inv_std=lasagne.init.Constant(1), **kwargs): super(BatchReNormDNNLayer, self).__init__( incoming, axes, epsilon, alpha, beta, gamma, mean, inv_std, **kwargs) all_but_second_axis = (0,) + tuple(range(2, len(self.input_shape))) self.RMAX,self.DMAX = RMAX,DMAX if self.axes not in ((0,), all_but_second_axis): raise ValueError("BatchNormDNNLayer only supports normalization " "across the first axis, or across all but the " "second axis, got axes=%r" % (axes,))
Example #3
Source File: layers.py From Neural-Photo-Editor with MIT License | 5 votes |
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1), crop=0, untie_biases=False, W=initmethod(), b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.rectify, flip_filters=False, **kwargs): super(DeconvLayer, self).__init__( incoming, num_filters, filter_size, stride, crop, untie_biases, W, b, nonlinearity, flip_filters, n=2, **kwargs) # rename self.crop to self.pad self.crop = self.pad del self.pad
Example #4
Source File: layers.py From Neural-Photo-Editor with MIT License | 5 votes |
def __init__(self, incoming, num_kernels, dim_per_kernel=5, theta=lasagne.init.Normal(0.05), log_weight_scale=lasagne.init.Constant(0.), b=lasagne.init.Constant(-1.), **kwargs): super(MinibatchLayer, self).__init__(incoming, **kwargs) self.num_kernels = num_kernels num_inputs = int(np.prod(self.input_shape[1:])) self.theta = self.add_param(theta, (num_inputs, num_kernels, dim_per_kernel), name="theta") self.log_weight_scale = self.add_param(log_weight_scale, (num_kernels, dim_per_kernel), name="log_weight_scale") self.W = self.theta * (T.exp(self.log_weight_scale)/T.sqrt(T.sum(T.square(self.theta),axis=0))).dimshuffle('x',0,1) self.b = self.add_param(b, (num_kernels,), name="b")
Example #5
Source File: layers.py From Neural-Photo-Editor with MIT License | 5 votes |
def __init__(self, incoming, num_units, mask_generator,layerIdx,W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.rectify, **kwargs): super(MaskedLayer, self).__init__(incoming, num_units, W,b, nonlinearity,**kwargs) self.mask_generator = mask_generator num_inputs = int(np.prod(self.input_shape[1:])) self.weights_mask = self.add_param(spec = np.ones((num_inputs, num_units),dtype=np.float32), shape = (num_inputs, num_units), name='weights_mask', trainable=False, regularizable=False) self.layerIdx = layerIdx self.shuffle_update = [(self.weights_mask, mask_generator.get_mask_layer_UPDATE(self.layerIdx))]
Example #6
Source File: layers.py From Neural-Photo-Editor with MIT License | 5 votes |
def __init__(self, incoming, num_units, mask_generator,layerIdx,W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.), nonlinearity=None,**kwargs): super(DIML, self).__init__(incoming, num_units, W,b, nonlinearity,**kwargs) self.mask_generator = mask_generator self.layerIdx = layerIdx num_inputs = int(np.prod(self.input_shape[1:])) self.weights_mask = self.add_param(spec = np.ones((num_inputs, num_units),dtype=np.float32), shape = (num_inputs, num_units), name='weights_mask', trainable=False, regularizable=False) self.shuffle_update = [(self.weights_mask, self.mask_generator.get_direct_input_mask_layer_UPDATE(self.layerIdx + 1))]
Example #7
Source File: layers.py From Neural-Photo-Editor with MIT License | 5 votes |
def get_output_for(self,input, **kwargs): if input.ndim > 2: input = input.flatten(2) activation = T.dot(input, self.W*self.weights_mask) if self.b is not None: activation = activation + self.b.dimshuffle('x', 0) return self.nonlinearity(activation) # Conditioning Masked Layer # Currently not used. # class CML(MaskedLayer): # def __init__(self, incoming, num_units, mask_generator,use_cond_mask=False,U=lasagne.init.GlorotUniform(),W=lasagne.init.GlorotUniform(), # b=init.Constant(0.), nonlinearity=lasagne.nonlinearities.rectify, **kwargs): # super(CML, self).__init__(incoming, num_units, mask_generator,W, # b, nonlinearity,**kwargs) # self.use_cond_mask=use_cond_mask # if use_cond_mask: # self.U = self.add_param(spec = U, # shape = (num_inputs, num_units), # name='U', # trainable=True, # regularizable=False)theano.shared(value=self.weights_initialization((self.n_in, self.n_out)), name=self.name+'U', borrow=True) # self.add_param(self.U,name = # def get_output_for(self,input,**kwargs): # lin = self.lin_output = T.dot(input, self.W * self.weights_mask) + self.b # if self.use_cond_mask: # lin = lin+T.dot(T.ones_like(input), self.U * self.weights_mask) # return lin if self._activation is None else self._activation(lin) # Made layer, adopted from M.Germain
Example #8
Source File: glove_select.py From neural-dep-srl with Apache License 2.0 | 5 votes |
def main(): voc, full_embeddings = sys.argv[1], sys.argv[2] voc = set([line.strip() for line in open(voc, 'r')] + ['_UNK']) # get all embeddings from full embeddings embeddings = dict() for i, line in enumerate(open(full_embeddings, 'r')): parts = line.rstrip().split() word = parts[0] if word in voc: # print(parts[1:]) try: embeddings[word] = list(map(float, parts[1:])) except Exception as e: print('cannot parse line %i' % i, file=sys.stderr) # estimate dim dim = len(list(embeddings.values())[0]) if dim == 0: raise Exception('embedding dim is 0, probably parsing error') # init unk embeddings['_UNK'] = initializer((dim,)) # handle missing embeddings for word in voc: if word not in embeddings: print("no embedding for %s, skipping it " % word, file=sys.stderr) emb = initializer((dim,)) else: emb = embeddings[word] print(word + '\t' + ' '.join(map(str, emb)))
Example #9
Source File: generate.py From opt-mmd with BSD 3-Clause "New" or "Revised" License | 4 votes |
def _sample_trained_minibatch_gan(params_file, n, batch_size, rs): import lasagne from lasagne.init import Normal import lasagne.layers as ll import theano as th from theano.sandbox.rng_mrg import MRG_RandomStreams import theano.tensor as T import nn theano_rng = MRG_RandomStreams(rs.randint(2 ** 15)) lasagne.random.set_rng(np.random.RandomState(rs.randint(2 ** 15))) noise_dim = (batch_size, 100) noise = theano_rng.uniform(size=noise_dim) ls = [ll.InputLayer(shape=noise_dim, input_var=noise)] ls.append(nn.batch_norm( ll.DenseLayer(ls[-1], num_units=4*4*512, W=Normal(0.05), nonlinearity=nn.relu), g=None)) ls.append(ll.ReshapeLayer(ls[-1], (batch_size,512,4,4))) ls.append(nn.batch_norm( nn.Deconv2DLayer(ls[-1], (batch_size,256,8,8), (5,5), W=Normal(0.05), nonlinearity=nn.relu), g=None)) # 4 -> 8 ls.append(nn.batch_norm( nn.Deconv2DLayer(ls[-1], (batch_size,128,16,16), (5,5), W=Normal(0.05), nonlinearity=nn.relu), g=None)) # 8 -> 16 ls.append(nn.weight_norm( nn.Deconv2DLayer(ls[-1], (batch_size,3,32,32), (5,5), W=Normal(0.05), nonlinearity=T.tanh), train_g=True, init_stdv=0.1)) # 16 -> 32 gen_dat = ll.get_output(ls[-1]) with np.load(params_file) as d: params = [d['arr_{}'.format(i)] for i in range(9)] ll.set_all_param_values(ls[-1], params, trainable=True) sample_batch = th.function(inputs=[], outputs=gen_dat) samps = [] while len(samps) < n: samps.extend(sample_batch()) samps = np.array(samps[:n]) return samps
Example #10
Source File: layers.py From Neural-Photo-Editor with MIT License | 4 votes |
def MDCL(incoming,num_filters,scales,name,dnn=True): if dnn: from lasagne.layers.dnn import Conv2DDNNLayer as C2D # W initialization method--this should also work as Orthogonal('relu'), but I have yet to validate that as thoroughly. winit = initmethod(0.02) # Initialization method for the coefficients sinit = lasagne.init.Constant(1.0/(1+len(scales))) # Number of incoming channels ni =lasagne.layers.get_output_shape(incoming)[1] # Weight parameter--the primary parameter for this block W = theano.shared(lasagne.utils.floatX(winit.sample((num_filters,lasagne.layers.get_output_shape(incoming)[1],3,3))),name=name+'W') # Primary Convolution Layer--No Dilation n = C2D(incoming = incoming, num_filters = num_filters, filter_size = [3,3], stride = [1,1], pad = (1,1), W = W*theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)), name+'_coeff_base').dimshuffle(0,'x','x','x'), # Note the broadcasting dimshuffle for the num_filter scalars. b = None, nonlinearity = None, name = name+'base' ) # List of remaining layers. This should probably just all be concatenated into a single list rather than being a separate deal. nd = [] for i,scale in enumerate(scales): # I don't think 0 dilation is technically defined (or if it is it's just the regular filter) but I use it here as a convenient keyword to grab the 1x1 mean conv. if scale==0: nd.append(C2D(incoming = incoming, num_filters = num_filters, filter_size = [1,1], stride = [1,1], pad = (0,0), W = T.mean(W,axis=[2,3]).dimshuffle(0,1,'x','x')*theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)), name+'_coeff_1x1').dimshuffle(0,'x','x','x'), b = None, nonlinearity = None, name = name+str(scale))) # Note the dimshuffles in this layer--these are critical as the current DilatedConv2D implementation uses a backward pass. else: nd.append(lasagne.layers.DilatedConv2DLayer(incoming = lasagne.layers.PadLayer(incoming = incoming, width=(scale,scale)), num_filters = num_filters, filter_size = [3,3], dilation=(scale,scale), W = W.dimshuffle(1,0,2,3)*theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)), name+'_coeff_'+str(scale)).dimshuffle('x',0,'x','x'), b = None, nonlinearity = None, name = name+str(scale))) return ESL(nd+[n]) # MDC-based Upsample Layer. # This is a prototype I don't make use of extensively. It's operational but it doesn't seem to improve results yet.