Python lasagne.layers.get_all_layers() Examples

The following are 23 code examples of lasagne.layers.get_all_layers(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module lasagne.layers , or try the search function .
Example #1
Source File: lasagne_net.py    From BirdCLEF-Baseline with MIT License 6 votes vote down vote up
def train_function(net):

    # We use dynamic learning rates which change after some epochs
    lr_dynamic = T.scalar(name='learning_rate')

    # Theano variable for the class targets
    targets = T.matrix('targets', dtype=theano.config.floatX)

    # Get the network output
    prediction = l.get_output(net)
    
    # The theano train functions takes images and class targets as input
    log.i("COMPILING TRAIN FUNCTION...", new_line=False)
    start = time.time()
    loss = loss_function(net, prediction, targets)
    updates = net_updates(net, loss, lr_dynamic)
    train_net = theano.function([l.get_all_layers(net)[0].input_var, targets, lr_dynamic], loss, updates=updates, allow_input_downcast=True)
    log.i(("DONE! (", int(time.time() - start), "s )"))

    return train_net

################# PREDICTION FUNCTION #################### 
Example #2
Source File: nn.py    From kaggle_diabetic with MIT License 6 votes vote down vote up
def get_objective(l1=0.0, l2=0.0005):
    class RegularizedObjective(Objective):

        def get_loss(self, input=None, target=None, aggregation=None,
                     deterministic=False, **kwargs):

            l1_layer = get_all_layers(self.input_layer)[1]

            loss = super(RegularizedObjective, self).get_loss(
                input=input, target=target, aggregation=aggregation,
                deterministic=deterministic, **kwargs)
            if not deterministic:
                return loss \
                    + l1 * lasagne.regularization.regularize_layer_params(
                        l1_layer, lasagne.regularization.l1) \
                    + l2 * lasagne.regularization.regularize_network_params(
                        self.input_layer, lasagne.regularization.l2)
            else:
                return loss
    return RegularizedObjective 
Example #3
Source File: lasagne_net.py    From BirdCLEF-Baseline with MIT License 6 votes vote down vote up
def test_function(net, hasTargets=True, layer_index=-1):    

    # We need the prediction function to calculate the validation accuracy
    # this way we can test the net during/after training
    # We need a version with targets and one without
    prediction = l.get_output(l.get_all_layers(net)[layer_index], deterministic=True)

    log.i("COMPILING TEST FUNCTION...", new_line=False)
    start = time.time()
    if hasTargets:
        # Theano variable for the class targets
        targets = T.matrix('targets', dtype=theano.config.floatX)
        
        loss = loss_function(net, prediction, targets)
        accuracy = accuracy_function(net, prediction, targets)
        
        test_net = theano.function([l.get_all_layers(net)[0].input_var, targets], [prediction, loss, accuracy], allow_input_downcast=True)

    else:
        test_net = theano.function([l.get_all_layers(net)[0].input_var], prediction, allow_input_downcast=True)
        
    log.i(("DONE! (", int(time.time() - start), "s )"))

    return test_net 
Example #4
Source File: lasagne_net.py    From BirdCLEF-Baseline with MIT License 6 votes vote down vote up
def loadPretrained(net):

    if cfg.MODEL_NAME:

        # Load saved model
        n, c = io.loadModel(cfg.MODEL_NAME)

        # Set params
        params = l.get_all_param_values(n)
        if cfg.LOAD_OUTPUT_LAYER:
            l.set_all_param_values(net, params)
        else:
            l.set_all_param_values(l.get_all_layers(net)[:-1], params[:-2])

    return net

#################### LOSS FUNCTION ###################### 
Example #5
Source File: AED_train.py    From AcousticEventDetection with MIT License 6 votes vote down vote up
def loadModel(filename):
    print "IMPORTING MODEL PARAMS...",
    net_filename = MODEL_PATH + filename

    with open(net_filename, 'rb') as f:
        data = pickle.load(f)

    #for training, we only want to load the model params
    net = data['net']
    params = l.get_all_param_values(net)
    if LOAD_OUTPUT_LAYER:
        l.set_all_param_values(NET, params)
    else:
        l.set_all_param_values(l.get_all_layers(NET)[:-1], params[:-2])    

    print "DONE!" 
Example #6
Source File: network_repr.py    From Recipes with MIT License 6 votes vote down vote up
def example2():
    """ Two branches"""
    # Input
    l_in = lasagne.layers.InputLayer((100, 1, 20, 20))
    # Branch one
    l_conv1 = lasagne.layers.Conv2DLayer(l_in, num_filters=32, filter_size=(5, 5))
    l_pool1 = lasagne.layers.MaxPool2DLayer(l_conv1, pool_size=(2, 2))
    l_dense1 = lasagne.layers.DenseLayer(l_pool1, num_units=20)
    # Branch two
    l_conv2 = lasagne.layers.Conv2DLayer(l_in, num_filters=32, filter_size=(5, 5))
    l_pool2 = lasagne.layers.MaxPool2DLayer(l_conv2, pool_size=(2, 2))
    l_dense2 = lasagne.layers.DenseLayer(l_pool2, num_units=20)
    # Merge
    l_concat = lasagne.layers.ConcatLayer((l_dense1, l_dense2))
    # Output
    l_out = lasagne.layers.DenseLayer(l_concat, num_units=10)
    layers = get_all_layers(l_out)
    print(get_network_str(layers, get_network=False, incomings=True, outgoings=True))
    return None 
Example #7
Source File: model.py    From BirdNET with MIT License 5 votes vote down vote up
def test_function(net, layer_index=-1):

    log.p('COMPILING THEANO TEST FUNCTION FUNCTION...', new_line=False)    

    prediction = l.get_output(l.get_all_layers(net)[layer_index], deterministic=True)    
    test_function = theano.function([l.get_all_layers(net)[0].input_var], prediction, allow_input_downcast=True)        

    log.p('DONE!')

    return test_function 
Example #8
Source File: birdCLEF_train.py    From BirdCLEF2017 with MIT License 5 votes vote down vote up
def loadParams(epoch, filename=None):
    print "IMPORTING MODEL PARAMS...",
    if filename == None:
        net_filename = MODEL_PATH + "birdCLEF_" + RUN_NAME + "_model_params_epoch_" + str(epoch) + ".pkl"
    else:
        net_filename = MODEL_PATH + filename
    with open(net_filename, 'rb') as f:
        params = pickle.load(f)
    if LOAD_OUTPUT_LAYER:
        l.set_all_param_values(NET, params)
    else:
        l.set_all_param_values(l.get_all_layers(NET)[:-1], params[:-2])
    print "DONE!" 
Example #9
Source File: birdCLEF_test.py    From BirdCLEF2017 with MIT License 5 votes vote down vote up
def getPredictionFuntion(net):
    net_output = l.get_output(net, deterministic=True)

    print "COMPILING THEANO TEST FUNCTION...",
    start = time.time()
    test_net = theano.function([l.get_all_layers(NET)[0].input_var], net_output, allow_input_downcast=True)
    print "DONE! (", int(time.time() - start), "s )"

    return test_net 
Example #10
Source File: birdCLEF_evaluate.py    From BirdCLEF2017 with MIT License 5 votes vote down vote up
def getPredictionFuntion(net):
    net_output = l.get_output(net, deterministic=True)

    print "COMPILING THEANO TEST FUNCTION...",
    start = time.time()
    test_net = theano.function([l.get_all_layers(net)[0].input_var], net_output, allow_input_downcast=True)
    print "DONE! (", int(time.time() - start), "s )"

    return test_net

################# PREDICTION POOLING #################### 
Example #11
Source File: birdCLEF_evaluate.py    From BirdCLEF2017 with MIT License 5 votes vote down vote up
def loadParams(epoch, filename=None):
    print "IMPORTING MODEL PARAMS...",
    net_filename = MODEL_PATH + filename
    with open(net_filename, 'rb') as f:
        params = pickle.load(f)
    if LOAD_OUTPUT_LAYER:
        l.set_all_param_values(NET, params)
    else:
        l.set_all_param_values(l.get_all_layers(NET)[:-1], params[:-2])
    print "DONE!"

################  PREDICTION SAVE/LOAD  ################## 
Example #12
Source File: padded.py    From reseg with GNU General Public License v3.0 5 votes vote down vote up
def get_equivalent_input_padding(layer, layers_args=[]):
    """Compute the equivalent padding in the input layer

    A function to compute the equivalent padding of a sequence of
    convolutional and pooling layers. It memorizes the padding
    of all the Layers up to the first InputLayer.
    It then computes what would be the equivalent padding in the Layer
    immediately before the chain of Layers that is being taken into account.
    """
    # Initialize the DynamicPadding layers
    lasagne.layers.get_output(layer)
    # Loop through conv and pool to collect data
    all_layers = get_all_layers(layer)
    # while(not isinstance(layer, (InputLayer))):
    for layer in all_layers:
        # Note: stride is numerical, but pad *could* be symbolic
        try:
            pad, stride = (layer.pad, layer.stride)
            if isinstance(pad, int):
                pad = pad, pad
            if isinstance(stride, int):
                stride = stride, stride
            layers_args.append((pad, stride))
        except(AttributeError):
            pass

    # Loop backward to compute the equivalent padding in the input
    # layer
    tot_pad = T.zeros(2)
    pad_factor = T.ones(2)
    while(layers_args):
        pad, stride = layers_args.pop()
        tot_pad += pad * pad_factor
        pad_factor *= stride

    return tot_pad 
Example #13
Source File: AED_test.py    From AcousticEventDetection with MIT License 5 votes vote down vote up
def getPredictionFuntion(net):
    net_output = l.get_output(net, deterministic=True)

    print "COMPILING THEANO TEST FUNCTION...",
    start = time.time()
    test_net = theano.function([l.get_all_layers(NET)[0].input_var], net_output, allow_input_downcast=True)
    print "DONE! (", int(time.time() - start), "s )"

    return test_net

################# PREDICTION POOLING #################### 
Example #14
Source File: model.py    From BirdNET with MIT License 5 votes vote down vote up
def classificationBranch(net, kernel_size):

    # Post Convolution
    branch = l.batch_norm(l.Conv2DLayer(net,
                        num_filters=int(FILTERS[-1] * RESNET_K),
                        filter_size=kernel_size,
                        nonlinearity=nl.rectify))

    #log.p(("\t\tPOST  CONV SHAPE:", l.get_output_shape(branch), "LAYER:", len(l.get_all_layers(branch)) - 1))

    # Dropout Layer
    branch = l.DropoutLayer(branch)
    
    # Dense Convolution
    branch = l.batch_norm(l.Conv2DLayer(branch,
                        num_filters=int(FILTERS[-1] * RESNET_K * 2),
                        filter_size=1,
                        nonlinearity=nl.rectify))

    #log.p(("\t\tDENSE CONV SHAPE:", l.get_output_shape(branch), "LAYER:", len(l.get_all_layers(branch)) - 1))
    
    # Dropout Layer
    branch = l.DropoutLayer(branch)
    
    # Class Convolution
    branch = l.Conv2DLayer(branch,
                        num_filters=len(cfg.CLASSES),
                        filter_size=1,
                        nonlinearity=None)
    return branch 
Example #15
Source File: sequence_encoder.py    From daps with MIT License 5 votes vote down vote up
def _build(self, forget_bias=5.0, grad_clip=10.0):
        """Build architecture
        """
        network = InputLayer(shape=(None, self.seq_length, self.input_size),
                             name='input')
        self.input_var = network.input_var

        # Hidden layers
        tanh = lasagne.nonlinearities.tanh
        gate, constant = lasagne.layers.Gate, lasagne.init.Constant
        for _ in range(self.depth):
            network = LSTMLayer(network, self.width, nonlinearity=tanh,
                                grad_clipping=grad_clip,
                                forgetgate=gate(b=constant(forget_bias)))

        # Retain last-output state
        network = SliceLayer(network, -1, 1)

        # Output layer
        sigmoid = lasagne.nonlinearities.sigmoid
        loc_layer = DenseLayer(network, self.num_outputs * 2)
        conf_layer = DenseLayer(network, self.num_outputs,
                                nonlinearity=sigmoid)

        # Grab all layers into DAPs instance
        self.network = get_all_layers([loc_layer, conf_layer])

        # Get theano expression for outputs of DAPs model
        self.loc_var, self.conf_var = get_output([loc_layer, conf_layer],
                                                 deterministic=True) 
Example #16
Source File: AED_eval.py    From AcousticEventDetection with MIT License 5 votes vote down vote up
def getPredictionFuntion(net):
    net_output = l.get_output(net, deterministic=True)

    print "COMPILING THEANO TEST FUNCTION...",
    start = time.time()
    test_net = theano.function([l.get_all_layers(NET)[0].input_var], net_output, allow_input_downcast=True)
    print "DONE! (", int(time.time() - start), "s )"

    return test_net

################# PREDICTION POOLING #################### 
Example #17
Source File: network_repr.py    From Recipes with MIT License 4 votes vote down vote up
def get_network_str(layer, get_network=True, incomings=False, outgoings=False):
    """ Returns a string representation of the entire network contained under this layer.

        Parameters
        ----------
        layer : Layer or list
            the :class:`Layer` instance for which to gather all layers feeding
            into it, or a list of :class:`Layer` instances.

        get_network : boolean
            if True, calls `get_all_layers` on `layer`
            if False, assumes `layer` already contains all `Layer` instances intended for representation

        incomings : boolean
            if True, representation includes a list of all incomings for each `Layer` instance

        outgoings: boolean
            if True, representation includes a list of all outgoings for each `Layer` instance

        Returns
        -------
        str
            A string representation of `layer`. Each layer is assigned an ID which is it's corresponding index
            in the list obtained from `get_all_layers`.
        """

    # `layer` can either be a single `Layer` instance or a list of `Layer` instances.
    # If list, it can already be the result from `get_all_layers` or not, indicated by the `get_network` flag
    # Get network using get_all_layers if required:
    if get_network:
        network = get_all_layers(layer)
    else:
        network = layer

    # Initialize a list of lists to (temporarily) hold the str representation of each component, insert header
    network_str = deque([])
    network_str = _insert_header(network_str, incomings=incomings, outgoings=outgoings)

    # The representation can optionally display incoming and outgoing layers for each layer, similar to adjacency lists.
    # If requested (using the incomings and outgoings flags), build the adjacency lists.
    # The numbers/ids in the adjacency lists correspond to the layer's index in `network`
    if incomings or outgoings:
        ins, outs = _get_adjacency_lists(network)

    # For each layer in the network, build a representation and append to `network_str`
    for i, current_layer in enumerate(network):

        # Initialize list to (temporarily) hold str of layer
        layer_str = deque([])

        # First column for incomings, second for the layer itself, third for outgoings, fourth for layer description
        if incomings:
            layer_str.append(ins[i])
        layer_str.append(i)
        if outgoings:
            layer_str.append(outs[i])
        layer_str.append(str(current_layer))    # default representation can be changed by overriding __str__
        network_str.append(layer_str)
    return _get_table_str(network_str) 
Example #18
Source File: birdCLEF_evaluate.py    From BirdCLEF2017 with MIT License 4 votes vote down vote up
def buildModel(mtype=1):

    print "BUILDING MODEL TYPE", mtype, "..."

    #default settings (Model 1)
    filters = 64
    first_stride = 2
    last_filter_multiplier = 16

    #specific model type settings (see working notes for details)
    if mtype == 2:
        first_stride = 1
    elif mtype == 3:
        filters = 32
        last_filter_multiplier = 8

    #input layer
    net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0]))

    #conv layers
    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    if mtype == 2:
        net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
        net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net) 

    #dense layers
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))

    #Classification Layer
    if MULTI_LABEL:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1))
    else:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1))

    print "...DONE!"

    #model stats
    print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"
    print "MODEL HAS", l.count_params(net), "PARAMS"

    return net 
Example #19
Source File: solver.py    From visual_dynamics with MIT License 4 votes vote down vote up
def standarize(self, net, aggregating_batch_size=100, check=False):
        start_time = time.time()
        print("Standarizing outputs...")

        # training data (one pass)
        train_data_once_gen = DataGenerator(self.train_data_fnames,
                                            data_name_offset_pairs=self.data_name_offset_pairs,
                                            transformers=net.transformers,
                                            once=True,
                                            batch_size=aggregating_batch_size,
                                            shuffle=False,
                                            dtype=theano.config.floatX)
        train_data_once_gen = ParallelGenerator(train_data_once_gen, nb_worker=1)

        standarize_layers = set()
        for pred_layer in net.pred_layers.values():
            for layer in L.get_all_layers(pred_layer):
                if isinstance(layer, LT.StandarizeLayer):
                    standarize_layers.add(layer)
        standarize_layers = list(standarize_layers)
        standarize_layer_names = [standarize_layer.name for standarize_layer in standarize_layers]
        online_stats = [OnlineStatistics(axis=standarize_layer.shared_axes) for standarize_layer in standarize_layers]

        for batch_data in train_data_once_gen:
            X = batch_data[0]
            outputs = net.predict(standarize_layer_names, [X], preprocessed=True)  # assume the only input is X
            for output, online_stat in zip(outputs, online_stats):
                online_stat.add_data(output)

        for online_stat, standarize_layer in zip(online_stats, standarize_layers):
            standarize_layer.offset.set_value(online_stat.mean.astype(theano.config.floatX))
            standarize_layer.scale.set_value(online_stat.std.astype(theano.config.floatX))

        print("... finished in %.2f s" % (time.time() - start_time))

        if check:
            train_data_once_gen = DataGenerator(self.train_data_fnames,
                                                data_name_offset_pairs=self.data_name_offset_pairs,
                                                transformers=net.transformers,
                                                once=True,
                                                batch_size=aggregating_batch_size,
                                                shuffle=False,
                                                dtype=theano.config.floatX)
            train_data_once_gen = ParallelGenerator(train_data_once_gen, nb_worker=1)

            check_online_stats = [OnlineStatistics(axis=standarize_layer.shared_axes) for standarize_layer in standarize_layers]

            for batch_data in train_data_once_gen:
                X = batch_data[0]
                outputs = net.predict(standarize_layer_names, [X], preprocessed=True)  # assume the only input is X
                for output, online_stat in zip(outputs, check_online_stats):
                    online_stat.add_data(output)

            # check that the standard deviation after standarization is actually 1
            for online_stat in check_online_stats:
                assert np.allclose(online_stat.mean, 0, atol=1e-5)
                assert np.allclose(online_stat.std, 1) 
Example #20
Source File: lasagne_net.py    From BirdCLEF-Baseline with MIT License 4 votes vote down vote up
def build_pi_model():

    log.i('BUILDING RASBPERRY PI MODEL...')

    # Random Seed
    lasagne_random.set_rng(cfg.getRandomState())

    # Input layer for images
    net = l.InputLayer((None, cfg.IM_DIM, cfg.IM_SIZE[1], cfg.IM_SIZE[0]))

    # Convolutinal layer groups
    for i in range(len(cfg.FILTERS)):
        
        # 3x3 Convolution + Stride
        net = batch_norm(l.Conv2DLayer(net,
                                       num_filters=cfg.FILTERS[i],
                                       filter_size=cfg.KERNEL_SIZES[i],
                                       num_groups=cfg.NUM_OF_GROUPS[i],
                                       pad='same',
                                       stride=2,
                                       W=initialization(cfg.NONLINEARITY),
                                       nonlinearity=nonlinearity(cfg.NONLINEARITY)))
        
        log.i(('\tGROUP', i + 1, 'OUT SHAPE:', l.get_output_shape(net)))
        
    # Fully connected layers + dropout layers
    net = l.DenseLayer(net, cfg.DENSE_UNITS, nonlinearity=nonlinearity(cfg.NONLINEARITY), W=initialization(cfg.NONLINEARITY))    
    net = l.DropoutLayer(net, p=cfg.DROPOUT)
    
    net = l.DenseLayer(net, cfg.DENSE_UNITS, nonlinearity=nonlinearity(cfg.NONLINEARITY), W=initialization(cfg.NONLINEARITY))        
    net = l.DropoutLayer(net, p=cfg.DROPOUT)
    
    # Classification Layer (Softmax)
    net = l.DenseLayer(net, len(cfg.CLASSES), nonlinearity=nonlinearity('softmax'), W=initialization('softmax'))
    
    log.i(("\tFINAL NET OUT SHAPE:", l.get_output_shape(net)))
    log.i("...DONE!")

    # Model stats
    log.i(("MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"))
    log.i(("MODEL HAS", l.count_params(net), "PARAMS"))

    return net

################## BUILDING THE MODEL ################### 
Example #21
Source File: birdCLEF_test.py    From BirdCLEF2017 with MIT License 4 votes vote down vote up
def buildModel(mtype=1):

    print "BUILDING MODEL TYPE", mtype, "..."

    #default settings (Model 1)
    filters = 64
    first_stride = 2
    last_filter_multiplier = 16

    #specific model type settings (see working notes for details)
    if mtype == 2:
        first_stride = 1
    elif mtype == 3:
        filters = 32
        last_filter_multiplier = 8

    #input layer
    net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0]))

    #conv layers
    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    if mtype == 2:
        net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
        net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net) 

    #dense layers
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))

    #Classification Layer
    if MULTI_LABEL:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1))
    else:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1))

    print "...DONE!"

    #model stats
    print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"
    print "MODEL HAS", l.count_params(net), "PARAMS"

    return net 
Example #22
Source File: lasagne_net.py    From BirdCLEF-Baseline with MIT License 4 votes vote down vote up
def build_resnet_model():

    log.i('BUILDING RESNET MODEL...')

    # Random Seed
    lasagne_random.set_rng(cfg.getRandomState())

    # Input layer for images
    net = l.InputLayer((None, cfg.IM_DIM, cfg.IM_SIZE[1], cfg.IM_SIZE[0]))

    # First Convolution
    net = l.Conv2DLayer(net,
                        num_filters=cfg.FILTERS[0],
                        filter_size=cfg.KERNEL_SIZES[0],
                        pad='same',
                        W=initialization(cfg.NONLINEARITY),
                        nonlinearity=None)
    
    log.i(("\tFIRST CONV OUT SHAPE:", l.get_output_shape(net), "LAYER:", len(l.get_all_layers(net)) - 1))

    # Residual Stacks
    for i in range(0, len(cfg.FILTERS)):
        net = resblock(net, filters=cfg.FILTERS[i] * cfg.RESNET_K, kernel_size=cfg.KERNEL_SIZES[i], stride=2, num_groups=cfg.NUM_OF_GROUPS[i])
        for _ in range(1, cfg.RESNET_N):
            net = resblock(net, filters=cfg.FILTERS[i] * cfg.RESNET_K, kernel_size=cfg.KERNEL_SIZES[i], num_groups=cfg.NUM_OF_GROUPS[i], preactivated=False)
        log.i(("\tRES STACK", i + 1, "OUT SHAPE:", l.get_output_shape(net), "LAYER:", len(l.get_all_layers(net)) - 1))
        
    # Post Activation
    net = batch_norm(net)
    net = l.NonlinearityLayer(net, nonlinearity=nonlinearity(cfg.NONLINEARITY))
        
    # Pooling
    net = l.GlobalPoolLayer(net)
    log.i(("\tFINAL POOLING SHAPE:", l.get_output_shape(net), "LAYER:", len(l.get_all_layers(net)) - 1))

    # Classification Layer    
    net = l.DenseLayer(net, len(cfg.CLASSES), nonlinearity=nonlinearity('identity'), W=initialization('identity'))
    net = l.NonlinearityLayer(net, nonlinearity=nonlinearity('softmax'))

    log.i(("\tFINAL NET OUT SHAPE:", l.get_output_shape(net), "LAYER:", len(l.get_all_layers(net))))
    log.i("...DONE!")

    # Model stats
    log.i(("MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"))
    log.i(("MODEL HAS", l.count_params(net), "PARAMS"))

    return net

################## PASPBERRY PI NET ##################### 
Example #23
Source File: AED_train.py    From AcousticEventDetection with MIT License 4 votes vote down vote up
def buildModel():

    print "BUILDING MODEL TYPE..."

    #default settings
    filters = 64
    first_stride = 2
    last_filter_multiplier = 16

    #input layer
    net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0]))

    #conv layers
    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net) 

    #dense layers
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.DropoutLayer(net, DROPOUT)  
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.DropoutLayer(net, DROPOUT)  

    #Classification Layer
    if MULTI_LABEL:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1))
    else:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1))

    print "...DONE!"

    #model stats
    print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"
    print "MODEL HAS", l.count_params(net), "PARAMS"

    return net