Python lasagne.nonlinearities.rectify() Examples
The following are 30
code examples of lasagne.nonlinearities.rectify().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
lasagne.nonlinearities
, or try the search function
.
Example #1
Source File: layers.py From kusanagi with MIT License | 6 votes |
def __init__(self, incoming, num_units, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, num_leading_axes=1, p=0.5, shared_axes=(), noise_samples=None, **kwargs): super(DenseDropoutLayer, self).__init__( incoming, num_units, W, b, nonlinearity, num_leading_axes, **kwargs) self.p = p self.shared_axes = shared_axes # init randon number generator self._srng = RandomStreams(get_rng().randint(1, 2147462579)) # initialize noise samples self.noise = self.init_noise(noise_samples)
Example #2
Source File: custom_layers.py From acnn with GNU General Public License v3.0 | 6 votes |
def __init__(self, incomings, nfilters, nrings=5, nrays=16, W=LI.GlorotNormal(), b=LI.Constant(0.0), normalize_rings=False, normalize_input=False, take_max=True, nonlinearity=LN.rectify, **kwargs): super(GCNNLayer, self).__init__(incomings, **kwargs) # patch operator sizes self.nfilters = nfilters self.nrings = nrings self.nrays = nrays self.filter_shape = (nfilters, self.input_shapes[0][1], nrings, nrays) self.biases_shape = (nfilters, ) # path operator parameters self.normalize_rings = normalize_rings self.normalize_input = normalize_input self.take_max = take_max self.nonlinearity = nonlinearity # layer parameters: # y = Wx + b, where x are the input features and y are the output features self.W = self.add_param(W, self.filter_shape, name="W") self.b = self.add_param(b, self.biases_shape, name="b", regularizable=False)
Example #3
Source File: models_uncond.py From EvolutionaryGAN with MIT License | 6 votes |
def build_discriminator_toy(image=None, nd=512, GP_norm=None): Input = InputLayer(shape=(None, 2), input_var=image) print ("Dis input:", Input.output_shape) dis0 = DenseLayer(Input, nd, W=Normal(0.02), nonlinearity=relu) print ("Dis fc0:", dis0.output_shape) if GP_norm is True: dis1 = DenseLayer(dis0, nd, W=Normal(0.02), nonlinearity=relu) else: dis1 = batch_norm(DenseLayer(dis0, nd, W=Normal(0.02), nonlinearity=relu)) print ("Dis fc1:", dis1.output_shape) if GP_norm is True: dis2 = batch_norm(DenseLayer(dis1, nd, W=Normal(0.02), nonlinearity=relu)) else: dis2 = DenseLayer(dis1, nd, W=Normal(0.02), nonlinearity=relu) print ("Dis fc2:", dis2.output_shape) disout = DenseLayer(dis2, 1, W=Normal(0.02), nonlinearity=sigmoid) print ("Dis output:", disout.output_shape) return disout
Example #4
Source File: models_uncond.py From EvolutionaryGAN with MIT License | 6 votes |
def build_generator_32(noise=None, ngf=128): # noise input InputNoise = InputLayer(shape=(None, 100), input_var=noise) #FC Layer gnet0 = DenseLayer(InputNoise, ngf*4*4*4, W=Normal(0.02), nonlinearity=relu) print ("Gen fc1:", gnet0.output_shape) #Reshape Layer gnet1 = ReshapeLayer(gnet0,([0],ngf*4,4,4)) print ("Gen rs1:", gnet1.output_shape) # DeConv Layer gnet2 = Deconv2DLayer(gnet1, ngf*2, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu) print ("Gen deconv1:", gnet2.output_shape) # DeConv Layer gnet3 = Deconv2DLayer(gnet2, ngf, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu) print ("Gen deconv2:", gnet3.output_shape) # DeConv Layer gnet4 = Deconv2DLayer(gnet3, 3, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=tanh) print ("Gen output:", gnet4.output_shape) return gnet4
Example #5
Source File: models_uncond.py From EvolutionaryGAN with MIT License | 6 votes |
def build_generator_64(noise=None, ngf=128): # noise input InputNoise = InputLayer(shape=(None, 100), input_var=noise) #FC Layer gnet0 = DenseLayer(InputNoise, ngf*8*4*4, W=Normal(0.02), nonlinearity=relu) print ("Gen fc1:", gnet0.output_shape) #Reshape Layer gnet1 = ReshapeLayer(gnet0,([0],ngf*8,4,4)) print ("Gen rs1:", gnet1.output_shape) # DeConv Layer gnet2 = Deconv2DLayer(gnet1, ngf*8, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu) print ("Gen deconv2:", gnet2.output_shape) # DeConv Layer gnet3 = Deconv2DLayer(gnet2, ngf*4, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu) print ("Gen deconv3:", gnet3.output_shape) # DeConv Layer gnet4 = Deconv2DLayer(gnet3, ngf*4, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu) print ("Gen deconv4:", gnet4.output_shape) # DeConv Layer gnet5 = Deconv2DLayer(gnet4, ngf*2, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu) print ("Gen deconv5:", gnet5.output_shape) # DeConv Layer gnet6 = Deconv2DLayer(gnet5, 3, (3,3), (1,1), crop='same', W=Normal(0.02),nonlinearity=tanh) print ("Gen output:", gnet6.output_shape) return gnet6
Example #6
Source File: adda_network.py From adda_mnist64 with MIT License | 6 votes |
def network_classifier(self, input_var): network = {} network['classifier/input'] = InputLayer(shape=(None, 3, 64, 64), input_var=input_var, name='classifier/input') network['classifier/conv1'] = Conv2DLayer(network['classifier/input'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv1') network['classifier/pool1'] = MaxPool2DLayer(network['classifier/conv1'], pool_size=2, stride=2, pad=0, name='classifier/pool1') network['classifier/conv2'] = Conv2DLayer(network['classifier/pool1'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv2') network['classifier/pool2'] = MaxPool2DLayer(network['classifier/conv2'], pool_size=2, stride=2, pad=0, name='classifier/pool2') network['classifier/conv3'] = Conv2DLayer(network['classifier/pool2'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv3') network['classifier/pool3'] = MaxPool2DLayer(network['classifier/conv3'], pool_size=2, stride=2, pad=0, name='classifier/pool3') network['classifier/conv4'] = Conv2DLayer(network['classifier/pool3'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv4') network['classifier/pool4'] = MaxPool2DLayer(network['classifier/conv4'], pool_size=2, stride=2, pad=0, name='classifier/pool4') network['classifier/dense1'] = DenseLayer(network['classifier/pool4'], num_units=64, nonlinearity=rectify, name='classifier/dense1') network['classifier/output'] = DenseLayer(network['classifier/dense1'], num_units=10, nonlinearity=softmax, name='classifier/output') return network
Example #7
Source File: conv_sup_cc_mllsll.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def build_network_from_ae(classn): input_var = T.tensor4('inputs'); aug_var = T.matrix('aug_var'); target_var = T.matrix('targets'); ae = pickle.load(open('model/conv_ae.pkl', 'rb')); input_layer_index = map(lambda pair : pair[0], ae.layers).index('input'); first_layer = ae.get_all_layers()[input_layer_index + 1]; input_layer = layers.InputLayer(shape=(None, 3, 32, 32), input_var = input_var); first_layer.input_layer = input_layer; encode_layer_index = map(lambda pair : pair[0], ae.layers).index('encode_layer'); encode_layer = ae.get_all_layers()[encode_layer_index]; aug_layer = layers.InputLayer(shape=(None, classn), input_var = aug_var); cat_layer = lasagne.layers.ConcatLayer([encode_layer, aug_layer], axis = 1); hidden_layer = layers.DenseLayer(incoming = cat_layer, num_units = 100, nonlinearity = rectify); network_mll = layers.DenseLayer(incoming = hidden_layer, num_units = 12, nonlinearity = sigmoid); network_sll = layers.DenseLayer(incoming = hidden_layer, num_units = 7, nonlinearity = sigmoid); network = lasagne.layers.ConcatLayer([network_mll, network_sll], axis = 1); return network, encode_layer, input_var, aug_var, target_var;
Example #8
Source File: highway.py From LasagneNLP with Apache License 2.0 | 6 votes |
def __init__(self, incoming, W_h=init.GlorotUniform(), b_h=init.Constant(0.), W_t=init.GlorotUniform(), b_t=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs): super(HighwayDenseLayer, self).__init__(incoming, **kwargs) self.nonlinearity = (nonlinearities.identity if nonlinearity is None else nonlinearity) num_inputs = int(np.prod(self.input_shape[1:])) self.W_h = self.add_param(W_h, (num_inputs, num_inputs), name="W_h") if b_h is None: self.b_h = None else: self.b_h = self.add_param(b_h, (num_inputs,), name="b_h", regularizable=False) self.W_t = self.add_param(W_t, (num_inputs, num_inputs), name="W_t") if b_t is None: self.b_t = None else: self.b_t = self.add_param(b_t, (num_inputs,), name="b_t", regularizable=False)
Example #9
Source File: layers_theano.py From visual_dynamics with MIT License | 6 votes |
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1), pad=0, untie_biases=False, groups=1, W=init.Uniform(), b=init.Constant(0.), nonlinearity=nl.rectify, flip_filters=True, convolution=T.nnet.conv2d, filter_dilation=(1, 1), **kwargs): assert num_filters % groups == 0 self.groups = groups super(GroupConv2DLayer, self).__init__(incoming, num_filters, filter_size, stride=stride, pad=pad, untie_biases=untie_biases, W=W, b=b, nonlinearity=nonlinearity, flip_filters=flip_filters, convolution=convolution, filter_dilation=filter_dilation, **kwargs)
Example #10
Source File: lasagne_net.py From BirdCLEF-Baseline with MIT License | 6 votes |
def initialization(name): initializations = {'sigmoid':init.HeNormal(gain=1.0), 'softmax':init.HeNormal(gain=1.0), 'elu':init.HeNormal(gain=1.0), 'relu':init.HeNormal(gain=math.sqrt(2)), 'lrelu':init.HeNormal(gain=math.sqrt(2/(1+0.01**2))), 'vlrelu':init.HeNormal(gain=math.sqrt(2/(1+0.33**2))), 'rectify':init.HeNormal(gain=math.sqrt(2)), 'identity':init.HeNormal(gain=math.sqrt(2)) } return initializations[name] #################### BASELINE MODEL #####################
Example #11
Source File: conv_sup_cc_4ch.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def build_network_from_ae(classn): input_var = T.tensor4('inputs'); aug_var = T.matrix('aug_var'); target_var = T.matrix('targets'); ae = pickle.load(open('model_4ch/conv_ae.pkl', 'rb')); input_layer_index = map(lambda pair : pair[0], ae.layers).index('input'); first_layer = ae.get_all_layers()[input_layer_index + 1]; input_layer = layers.InputLayer(shape=(None, 4, 32, 32), input_var = input_var); first_layer.input_layer = input_layer; encode_layer_index = map(lambda pair : pair[0], ae.layers).index('encode_layer'); encode_layer = ae.get_all_layers()[encode_layer_index]; aug_layer = layers.InputLayer(shape=(None, classn), input_var = aug_var); cat_layer = lasagne.layers.ConcatLayer([encode_layer, aug_layer], axis = 1); hidden_layer = layers.DenseLayer(incoming = cat_layer, num_units = 100, nonlinearity = rectify); network = layers.DenseLayer(incoming = hidden_layer, num_units = classn, nonlinearity = sigmoid); return network, encode_layer, input_var, aug_var, target_var;
Example #12
Source File: graph.py From LasagneNLP with Apache License 2.0 | 6 votes |
def __init__(self, incoming_vertex, incoming_edge, num_filters, filter_size, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs): self.vertex_shape = incoming_vertex.output_shape self.edge_shape = incoming_edge.output_shape self.input_shape = incoming_vertex.output_shape incomings = [incoming_vertex, incoming_edge] self.vertex_incoming_index = 0 self.edge_incoming_index = 1 super(GraphConvLayer, self).__init__(incomings, **kwargs) if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity self.num_filters = num_filters self.filter_size = filter_size self.W = self.add_param(W, self.get_W_shape(), name="W") if b is None: self.b = None else: self.b = self.add_param(b, (num_filters,), name="b", regularizable=False)
Example #13
Source File: conv_sup_regression_syn.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_network_from_ae(classn): input_var = T.tensor4('inputs'); target_var = T.matrix('targets'); ae = pickle.load(open('model/conv_ae.pkl', 'rb')); input_layer_index = map(lambda pair : pair[0], ae.layers).index('input'); first_layer = ae.get_all_layers()[input_layer_index + 1]; input_layer = layers.InputLayer(shape = (None, 3, 32, 32), input_var = input_var); first_layer.input_layer = input_layer; encode_layer_index = map(lambda pair : pair[0], ae.layers).index('encode_layer'); encode_layer = ae.get_all_layers()[encode_layer_index]; # conventional recitified linear units #hidden_layer = layers.DenseLayer(incoming = encode_layer, num_units = 200, nonlinearity = rectify); #network = layers.DenseLayer(incoming = hidden_layer, num_units = classn, nonlinearity = linear); #stack_params = [network.W, network.b, hidden_layer.W, hidden_layer.b]; # smooth activation function hidden_layer = layers.DenseLayer(incoming = encode_layer, num_units = 200, nonlinearity = linear); smth_act_layer = SmthAct2Layer(incoming = hidden_layer, x_start = -10.0, x_end = 10.0, num_segs = 20); network = layers.DenseLayer(incoming = smth_act_layer, num_units = classn, nonlinearity = linear); stack_params = [network.W, network.b, hidden_layer.W, hidden_layer.b, smth_act_layer.W]; return (encode_layer, hidden_layer, smth_act_layer, network), input_var, target_var, stack_params;
Example #14
Source File: conv_sup_regression_hseg_4ch.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_network_from_ae(classn): input_var = T.tensor4('inputs'); aug_var = T.matrix('aug_var'); target_var = T.matrix('targets'); ae = pickle.load(open('model_fullsize_nopool_4ch/conv_ae.pkl', 'rb')); input_layer_index = map(lambda pair : pair[0], ae.layers).index('input'); first_layer = ae.get_all_layers()[input_layer_index + 1]; input_layer = layers.InputLayer(shape = (None, 4, 50, 50), input_var = input_var); first_layer.input_layer = input_layer; encode_layer_index = map(lambda pair : pair[0], ae.layers).index('encode_layer'); encode_layer = ae.get_all_layers()[encode_layer_index]; aug_layer = layers.InputLayer(shape=(None, classn), input_var = aug_var); cat_layer = lasagne.layers.ConcatLayer([encode_layer, aug_layer], axis = 1); # conventional recitified linear units #hidden_layer = layers.DenseLayer(incoming = cat_layer, num_units = 200, nonlinearity = rectify); #network = layers.DenseLayer(incoming = hidden_layer, num_units = classn, nonlinearity = linear); #stack_params = [network.W, network.b, hidden_layer.W, hidden_layer.b]; # smooth activation function hidden_layer = layers.DenseLayer(incoming = cat_layer, num_units = 200, nonlinearity = linear); smth_act_layer = SmthAct2Layer(incoming = hidden_layer, x_start = -10.0, x_end = 10.0, num_segs = 20); network = SumLayer(incoming = smth_act_layer); stack_params = [network.b, hidden_layer.W, hidden_layer.b, smth_act_layer.W]; return (encode_layer, hidden_layer, smth_act_layer, network), input_var, aug_var, target_var, stack_params;
Example #15
Source File: conv_sup_regression_hseg_4ch_he.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_network_from_ae(classn): input_var = T.tensor4('inputs'); aug_var = T.matrix('aug_var'); target_var = T.matrix('targets'); ae = pickle.load(open('model_fullsize_nopool_4ch/conv_ae.pkl', 'rb')); input_layer_index = map(lambda pair : pair[0], ae.layers).index('input'); first_layer = ae.get_all_layers()[input_layer_index + 1]; input_layer = layers.InputLayer(shape = (None, 4, 50, 50), input_var = input_var); first_layer.input_layer = input_layer; encode_layer_index = map(lambda pair : pair[0], ae.layers).index('encode_layer'); encode_layer = ae.get_all_layers()[encode_layer_index]; aug_layer = layers.InputLayer(shape=(None, classn), input_var = aug_var); cat_layer = lasagne.layers.ConcatLayer([encode_layer, aug_layer], axis = 1); # conventional recitified linear units #hidden_layer = layers.DenseLayer(incoming = cat_layer, num_units = 200, nonlinearity = rectify); #network = layers.DenseLayer(incoming = hidden_layer, num_units = classn, nonlinearity = linear); #stack_params = [network.W, network.b, hidden_layer.W, hidden_layer.b]; # smooth activation function hidden_layer = layers.DenseLayer(incoming = cat_layer, num_units = 200, nonlinearity = linear); he_layer = HeLayer(incoming = hidden_layer); network = layers.DenseLayer(incoming = he_layer, num_units = classn, nonlinearity = linear); stack_params = [network.W, network.b, hidden_layer.W, hidden_layer.b, he_layer.W]; return (encode_layer, hidden_layer, he_layer, network), input_var, aug_var, target_var, stack_params;
Example #16
Source File: conv_sup_regression_4ch_he.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_network_from_ae(classn): input_var = T.tensor4('inputs'); aug_var = T.matrix('aug_var'); target_var = T.matrix('targets'); ae = pickle.load(open('model_4ch/conv_ae.pkl', 'rb')); input_layer_index = map(lambda pair : pair[0], ae.layers).index('input'); first_layer = ae.get_all_layers()[input_layer_index + 1]; input_layer = layers.InputLayer(shape = (None, 4, 32, 32), input_var = input_var); first_layer.input_layer = input_layer; encode_layer_index = map(lambda pair : pair[0], ae.layers).index('encode_layer'); encode_layer = ae.get_all_layers()[encode_layer_index]; aug_layer = layers.InputLayer(shape=(None, classn), input_var = aug_var); cat_layer = lasagne.layers.ConcatLayer([encode_layer, aug_layer], axis = 1); # conventional recitified linear units #hidden_layer = layers.DenseLayer(incoming = cat_layer, num_units = 200, nonlinearity = rectify); #network = layers.DenseLayer(incoming = hidden_layer, num_units = classn, nonlinearity = linear); #stack_params = [network.W, network.b, hidden_layer.W, hidden_layer.b]; # smooth activation function hidden_layer = layers.DenseLayer(incoming = cat_layer, num_units = 200, nonlinearity = linear); he_layer = HeLayer(incoming = hidden_layer); network = layers.DenseLayer(incoming = he_layer, num_units = classn, nonlinearity = linear); stack_params = [network.W, network.b, hidden_layer.W, hidden_layer.b, he_layer.W]; return (encode_layer, hidden_layer, he_layer, network), input_var, aug_var, target_var, stack_params;
Example #17
Source File: conv_sup_regression_baseline.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_network_from_ae(classn): input_var = T.tensor4('inputs'); aug_var = T.matrix('aug_var'); target_var = T.matrix('targets'); ae = pickle.load(open('model/conv_ae.pkl', 'rb')); input_layer_index = map(lambda pair : pair[0], ae.layers).index('input'); first_layer = ae.get_all_layers()[input_layer_index + 1]; input_layer = layers.InputLayer(shape = (None, 3, 32, 32), input_var = input_var); first_layer.input_layer = input_layer; encode_layer_index = map(lambda pair : pair[0], ae.layers).index('encode_layer'); encode_layer = ae.get_all_layers()[encode_layer_index]; aug_layer = layers.InputLayer(shape=(None, classn), input_var = aug_var); cat_layer = lasagne.layers.ConcatLayer([encode_layer, aug_layer], axis = 1); # conventional recitified linear units hidden_layer = layers.DenseLayer(incoming = cat_layer, num_units = 200, nonlinearity = rectify); network = layers.DenseLayer(incoming = hidden_layer, num_units = classn, nonlinearity = linear); stack_params = [network.W, network.b, hidden_layer.W, hidden_layer.b]; # smooth activation function #hidden_layer = layers.DenseLayer(incoming = cat_layer, num_units = 200, nonlinearity = linear); #smth_act_layer = SmthAct2Layer(incoming = hidden_layer, x_start = -10.0, x_end = 10.0, num_segs = 20); #network = layers.DenseLayer(incoming = smth_act_layer, num_units = classn, nonlinearity = linear); #stack_params = [network.W, network.b, hidden_layer.W, hidden_layer.b, smth_act_layer.W]; return (encode_layer, hidden_layer, network), input_var, aug_var, target_var, stack_params;
Example #18
Source File: custom_layers.py From acnn with GNU General Public License v3.0 | 5 votes |
def __init__(self, incomings, nfilters, nrings=5, nrays=16, W=LI.GlorotNormal(), b=LI.Constant(0.0), normalize_rings=False, normalize_input=False, take_max=True, nonlinearity=LN.rectify, **kwargs): super(ACNNLayer, self).__init__(incomings, nfilters, nrings, nrays, W, b, normalize_rings, normalize_input, take_max, nonlinearity, **kwargs) # def get_output_shape_for(self, input_shapes): # super(ACNNLayer, self).get_output_shape_for(input_shapes) # def get_output_for(self, inputs, **kwargs): # super(ACNNLayer, self).get_output_for(inputs, **kwargs) # Covariance layer
Example #19
Source File: nn_rmsprop_features.py From kaggle_otto with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_model(self, input_dim): l_in = InputLayer(shape=(self.batch_size, input_dim)) l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden / 2, nonlinearity=rectify) l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout) l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden, nonlinearity=rectify) l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout) l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden / 2, nonlinearity=rectify) l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout) l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax) return l_out
Example #20
Source File: nn_adagrad.py From kaggle_otto with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_model(self, input_dim): l_in = InputLayer(shape=(self.batch_size, input_dim)) l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify) l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout) l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden, nonlinearity=rectify) l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout) l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden, nonlinearity=rectify) l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout) l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax) return l_out
Example #21
Source File: conv_sup_regression_hseg_4ch_ago.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_network_from_ae(classn): input_var = T.tensor4('inputs'); aug_var = T.matrix('aug_var'); target_var = T.matrix('targets'); ae = pickle.load(open('model_fullsize_nopool_4ch/conv_ae.pkl', 'rb')); input_layer_index = map(lambda pair : pair[0], ae.layers).index('input'); first_layer = ae.get_all_layers()[input_layer_index + 1]; input_layer = layers.InputLayer(shape = (None, 4, 50, 50), input_var = input_var); first_layer.input_layer = input_layer; encode_layer_index = map(lambda pair : pair[0], ae.layers).index('encode_layer'); encode_layer = ae.get_all_layers()[encode_layer_index]; aug_layer = layers.InputLayer(shape=(None, classn), input_var = aug_var); cat_layer = lasagne.layers.ConcatLayer([encode_layer, aug_layer], axis = 1); # conventional recitified linear units #hidden_layer = layers.DenseLayer(incoming = cat_layer, num_units = 200, nonlinearity = rectify); #network = layers.DenseLayer(incoming = hidden_layer, num_units = classn, nonlinearity = linear); #stack_params = [network.W, network.b, hidden_layer.W, hidden_layer.b]; # smooth activation function hidden_layer = layers.DenseLayer(incoming = cat_layer, num_units = 200, nonlinearity = linear); ago_layer = AgoLayer(incoming = hidden_layer, num_segs = 5); network = layers.DenseLayer(incoming = ago_layer, num_units = classn, nonlinearity = linear); stack_params = [network.W, network.b, hidden_layer.W, hidden_layer.b, ago_layer.W]; return (encode_layer, hidden_layer, ago_layer, network), input_var, aug_var, target_var, stack_params;
Example #22
Source File: conv_sup_regression_4ch_ago.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_network_from_ae(classn): input_var = T.tensor4('inputs'); aug_var = T.matrix('aug_var'); target_var = T.matrix('targets'); ae = pickle.load(open('model_4ch/conv_ae.pkl', 'rb')); input_layer_index = map(lambda pair : pair[0], ae.layers).index('input'); first_layer = ae.get_all_layers()[input_layer_index + 1]; input_layer = layers.InputLayer(shape = (None, 4, 32, 32), input_var = input_var); first_layer.input_layer = input_layer; encode_layer_index = map(lambda pair : pair[0], ae.layers).index('encode_layer'); encode_layer = ae.get_all_layers()[encode_layer_index]; aug_layer = layers.InputLayer(shape=(None, classn), input_var = aug_var); cat_layer = lasagne.layers.ConcatLayer([encode_layer, aug_layer], axis = 1); # conventional recitified linear units #hidden_layer = layers.DenseLayer(incoming = cat_layer, num_units = 200, nonlinearity = rectify); #network = layers.DenseLayer(incoming = hidden_layer, num_units = classn, nonlinearity = linear); #stack_params = [network.W, network.b, hidden_layer.W, hidden_layer.b]; # smooth activation function hidden_layer = layers.DenseLayer(incoming = cat_layer, num_units = 200, nonlinearity = linear); ago_layer = AgoLayer(incoming = hidden_layer, num_segs = 20); network = layers.DenseLayer(incoming = ago_layer, num_units = classn, nonlinearity = linear); stack_params = [network.W, network.b, hidden_layer.W, hidden_layer.b, ago_layer.W]; return (encode_layer, hidden_layer, ago_layer, network), input_var, aug_var, target_var, stack_params;
Example #23
Source File: bagging_nn_nesterov.py From kaggle_otto with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_model(self, input_dim): l_in = InputLayer(shape=(self.batch_size, input_dim)) l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify) l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout) l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden, nonlinearity=rectify) l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout) l_out = DenseLayer(l_hidden2_dropout, num_units=self.n_classes_, nonlinearity=softmax) return l_out
Example #24
Source File: densenet_fast.py From Recipes with MIT License | 5 votes |
def affine_relu_conv(network, channels, filter_size, dropout, name_prefix): network = ScaleLayer(network, name=name_prefix + '_scale') network = BiasLayer(network, name=name_prefix + '_shift') network = NonlinearityLayer(network, nonlinearity=rectify, name=name_prefix + '_relu') network = Conv2DLayer(network, channels, filter_size, pad='same', W=lasagne.init.HeNormal(gain='relu'), b=None, nonlinearity=None, name=name_prefix + '_conv') if dropout: network = DropoutLayer(network, dropout) return network
Example #25
Source File: nn_adagrad_pca.py From kaggle_otto with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_model(self, input_dim): l_in = InputLayer(shape=(self.batch_size, input_dim)) l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify) l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout) l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden, nonlinearity=rectify) l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout) l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden, nonlinearity=rectify) l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout) l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax) return l_out
Example #26
Source File: nn_adagrad_log.py From kaggle_otto with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_model(self, input_dim): l_in = InputLayer(shape=(self.batch_size, input_dim)) l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify) l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout) l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden / 2, nonlinearity=rectify) l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout) l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden / 4, nonlinearity=rectify) l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout) l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax) return l_out
Example #27
Source File: nn_adagrad_pca.py From kaggle_otto with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_model(self, input_dim): l_in = InputLayer(shape=(self.batch_size, input_dim)) l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify) l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout) l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden / 2, nonlinearity=rectify) l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout) l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden, nonlinearity=rectify) l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout) l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax) return l_out
Example #28
Source File: layers.py From drmad with MIT License | 5 votes |
def __init__(self, args, incoming, num_units, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, num_leading_axes=1, **kwargs): super(DenseLayerWithReg, self).__init__(incoming, **kwargs) self.nonlinearity = (nonlinearities.identity if nonlinearity is None else nonlinearity) self.num_units = num_units if num_leading_axes >= len(self.input_shape): raise ValueError( "Got num_leading_axes=%d for a %d-dimensional input, " "leaving no trailing axes for the dot product." % (num_leading_axes, len(self.input_shape))) elif num_leading_axes < -len(self.input_shape): raise ValueError( "Got num_leading_axes=%d for a %d-dimensional input, " "requesting more trailing axes than there are input " "dimensions." % (num_leading_axes, len(self.input_shape))) self.num_leading_axes = num_leading_axes if any(s is None for s in self.input_shape[num_leading_axes:]): raise ValueError( "A DenseLayer requires a fixed input shape (except for " "the leading axes). Got %r for num_leading_axes=%d." % (self.input_shape, self.num_leading_axes)) num_inputs = int(np.prod(self.input_shape[num_leading_axes:])) self.W = self.add_param(W, (num_inputs, num_units), name="W") if b is None: self.b = None else: self.b = self.add_param(b, (num_units,), name="b", regularizable=False) if args.regL1 is True: self.L1 = self.add_param(init.Constant(args.regInit['L1']), (num_inputs, num_units), name="L1") if args.regL2 is True: self.L2 = self.add_param(init.Constant(args.regInit['L2']), (num_inputs, num_units), name="L2")
Example #29
Source File: layers.py From kusanagi with MIT License | 5 votes |
def __init__(self, incoming, num_units, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, num_leading_axes=1, logit_posterior_mean=None, logit_posterior_std=None, interval=[-4.0, 0.0], shared_axes=(), noise_samples=None, **kwargs): super(DenseLogNormalDropoutLayer, self).__init__( incoming, num_units, W, b, nonlinearity, num_leading_axes, shared_axes=(), noise_samples=None, **kwargs) self.logit_posterior_mean = logit_posterior_mean self.logit_posterior_std = logit_posterior_std self.interval = interval self.init_params()
Example #30
Source File: layers.py From kusanagi with MIT License | 5 votes |
def __init__(self, incoming, num_units, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, num_leading_axes=1, p=0.5, log_sigma2=None, shared_axes=(), noise_samples=None, **kwargs): super(DenseGaussianDropoutLayer, self).__init__( incoming, num_units, W, b, nonlinearity, num_leading_axes, p, shared_axes=(), noise_samples=None, **kwargs) self.p = p self.log_sigma2 = log_sigma2 self.init_params()