Python lasagne.layers.FlattenLayer() Examples
The following are 9
code examples of lasagne.layers.FlattenLayer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
lasagne.layers
, or try the search function
.
Example #1
Source File: Deopen_classification.py From Deopen with MIT License | 5 votes |
def create_network(): l = 1000 pool_size = 5 test_size1 = 13 test_size2 = 7 test_size3 = 5 kernel1 = 128 kernel2 = 128 kernel3 = 128 layer1 = InputLayer(shape=(None, 1, 4, l+1024)) layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1) layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1) layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2) layer2_f = FlattenLayer(layer2_3) layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1)) layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1)) layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1)) layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size)) layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2)) layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2)) layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2)) layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size)) layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3)) layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3)) layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3)) layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size)) layer14_d = DenseLayer(layer14, num_units= 256) layer3_2 = DenseLayer(layer2_f, num_units = 128) layer15 = ConcatLayer([layer14_d,layer3_2]) layer16 = DropoutLayer(layer15,p=0.5) layer17 = DenseLayer(layer16, num_units=256) network = DenseLayer(layer17, num_units= 2, nonlinearity=softmax) return network #random search to initialize the weights
Example #2
Source File: Deopen_regression.py From Deopen with MIT License | 5 votes |
def create_network(): l = 1000 pool_size = 5 test_size1 = 13 test_size2 = 7 test_size3 = 5 kernel1 = 128 kernel2 = 128 kernel3 = 128 layer1 = InputLayer(shape=(None, 1, 4, l+1024)) layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1) layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1) layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2) layer2_f = FlattenLayer(layer2_3) layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1)) layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1)) layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1)) layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size)) layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2)) layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2)) layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2)) layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size)) layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3)) layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3)) layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3)) layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size)) layer14_d = DenseLayer(layer14, num_units= 256) layer3_2 = DenseLayer(layer2_f, num_units = 128) layer15 = ConcatLayer([layer14_d,layer3_2]) #layer16 = DropoutLayer(layer15,p=0.5) layer17 = DenseLayer(layer15, num_units=256) network = DenseLayer(layer17, num_units= 1, nonlinearity=None) return network #random search to initialize the weights
Example #3
Source File: eeg_cnn_lib.py From EEGLearn with GNU General Public License v2.0 | 5 votes |
def build_convpool_conv1d(input_vars, nb_classes, imsize=32, n_colors=3, n_timewin=7): """ Builds the complete network with 1D-conv layer to integrate time from sequences of EEG images. :param input_vars: list of EEG images (one image per time window) :param nb_classes: number of classes :param imsize: size of the input image (assumes a square input) :param n_colors: number of color channels in the image :param n_timewin: number of time windows in the snippet :return: a pointer to the output of last layer """ convnets = [] w_init = None # Build 7 parallel CNNs with shared weights for i in range(n_timewin): if i == 0: convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors) else: convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors) convnets.append(FlattenLayer(convnet)) # at this point convnets shape is [numTimeWin][n_samples, features] # we want the shape to be [n_samples, features, numTimeWin] convpool = ConcatLayer(convnets) convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1])) convpool = DimshuffleLayer(convpool, (0, 2, 1)) # input to 1D convlayer should be in (batch_size, num_input_channels, input_length) convpool = Conv1DLayer(convpool, 64, 3) # A fully-connected layer of 512 units with 50% dropout on its inputs: convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5), num_units=512, nonlinearity=lasagne.nonlinearities.rectify) # And, finally, the output layer with 50% dropout on its inputs: convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5), num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax) return convpool
Example #4
Source File: eeg_cnn_lib.py From EEGLearn with GNU General Public License v2.0 | 5 votes |
def build_convpool_lstm(input_vars, nb_classes, grad_clip=110, imsize=32, n_colors=3, n_timewin=7): """ Builds the complete network with LSTM layer to integrate time from sequences of EEG images. :param input_vars: list of EEG images (one image per time window) :param nb_classes: number of classes :param grad_clip: the gradient messages are clipped to the given value during the backward pass. :param imsize: size of the input image (assumes a square input) :param n_colors: number of color channels in the image :param n_timewin: number of time windows in the snippet :return: a pointer to the output of last layer """ convnets = [] w_init = None # Build 7 parallel CNNs with shared weights for i in range(n_timewin): if i == 0: convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors) else: convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors) convnets.append(FlattenLayer(convnet)) # at this point convnets shape is [numTimeWin][n_samples, features] # we want the shape to be [n_samples, features, numTimeWin] convpool = ConcatLayer(convnets) convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1])) # Input to LSTM should have the shape as (batch size, SEQ_LENGTH, num_features) convpool = LSTMLayer(convpool, num_units=128, grad_clipping=grad_clip, nonlinearity=lasagne.nonlinearities.tanh) # We only need the final prediction, we isolate that quantity and feed it # to the next layer. convpool = SliceLayer(convpool, -1, 1) # Selecting the last prediction # A fully-connected layer of 256 units with 50% dropout on its inputs: convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5), num_units=256, nonlinearity=lasagne.nonlinearities.rectify) # And, finally, the output layer with 50% dropout on its inputs: convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5), num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax) return convpool
Example #5
Source File: util.py From neural-dep-srl with Apache License 2.0 | 5 votes |
def mask_loss(loss, mask): return loss * lo(LL.FlattenLayer(mask, 1))
Example #6
Source File: cifar10_nin.py From Recipes with MIT License | 4 votes |
def build_model(): net = {} net['input'] = InputLayer((None, 3, 32, 32)) net['conv1'] = ConvLayer(net['input'], num_filters=192, filter_size=5, pad=2, flip_filters=False) net['cccp1'] = ConvLayer( net['conv1'], num_filters=160, filter_size=1, flip_filters=False) net['cccp2'] = ConvLayer( net['cccp1'], num_filters=96, filter_size=1, flip_filters=False) net['pool1'] = PoolLayer(net['cccp2'], pool_size=3, stride=2, mode='max', ignore_border=False) net['drop3'] = DropoutLayer(net['pool1'], p=0.5) net['conv2'] = ConvLayer(net['drop3'], num_filters=192, filter_size=5, pad=2, flip_filters=False) net['cccp3'] = ConvLayer( net['conv2'], num_filters=192, filter_size=1, flip_filters=False) net['cccp4'] = ConvLayer( net['cccp3'], num_filters=192, filter_size=1, flip_filters=False) net['pool2'] = PoolLayer(net['cccp4'], pool_size=3, stride=2, mode='average_exc_pad', ignore_border=False) net['drop6'] = DropoutLayer(net['pool2'], p=0.5) net['conv3'] = ConvLayer(net['drop6'], num_filters=192, filter_size=3, pad=1, flip_filters=False) net['cccp5'] = ConvLayer( net['conv3'], num_filters=192, filter_size=1, flip_filters=False) net['cccp6'] = ConvLayer( net['cccp5'], num_filters=10, filter_size=1, flip_filters=False) net['pool3'] = PoolLayer(net['cccp6'], pool_size=8, mode='average_exc_pad', ignore_border=False) net['output'] = FlattenLayer(net['pool3']) return net
Example #7
Source File: eeg_cnn_lib.py From EEGLearn with GNU General Public License v2.0 | 4 votes |
def build_convpool_mix(input_vars, nb_classes, grad_clip=110, imsize=32, n_colors=3, n_timewin=7): """ Builds the complete network with LSTM and 1D-conv layers combined :param input_vars: list of EEG images (one image per time window) :param nb_classes: number of classes :param grad_clip: the gradient messages are clipped to the given value during the backward pass. :param imsize: size of the input image (assumes a square input) :param n_colors: number of color channels in the image :param n_timewin: number of time windows in the snippet :return: a pointer to the output of last layer """ convnets = [] w_init = None # Build 7 parallel CNNs with shared weights for i in range(n_timewin): if i == 0: convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors) else: convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors) convnets.append(FlattenLayer(convnet)) # at this point convnets shape is [numTimeWin][n_samples, features] # we want the shape to be [n_samples, features, numTimeWin] convpool = ConcatLayer(convnets) convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1])) reformConvpool = DimshuffleLayer(convpool, (0, 2, 1)) # input to 1D convlayer should be in (batch_size, num_input_channels, input_length) conv_out = Conv1DLayer(reformConvpool, 64, 3) conv_out = FlattenLayer(conv_out) # Input to LSTM should have the shape as (batch size, SEQ_LENGTH, num_features) lstm = LSTMLayer(convpool, num_units=128, grad_clipping=grad_clip, nonlinearity=lasagne.nonlinearities.tanh) lstm_out = SliceLayer(lstm, -1, 1) # Merge 1D-Conv and LSTM outputs dense_input = ConcatLayer([conv_out, lstm_out]) # A fully-connected layer of 256 units with 50% dropout on its inputs: convpool = DenseLayer(lasagne.layers.dropout(dense_input, p=.5), num_units=512, nonlinearity=lasagne.nonlinearities.rectify) # And, finally, the 10-unit output layer with 50% dropout on its inputs: convpool = DenseLayer(convpool, num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax) return convpool
Example #8
Source File: convnade.py From NADE with BSD 3-Clause "New" or "Revised" License | 4 votes |
def network(self): if self._network is not None: return self._network # Build the computational graph using a dummy input. import lasagne from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer from lasagne.layers import ElemwiseSumLayer, NonlinearityLayer, InputLayer, FlattenLayer, DenseLayer from lasagne.layers import batch_norm from lasagne.nonlinearities import rectify self._network_in = InputLayer(shape=(None, self.nb_channels,) + self.image_shape, input_var=None) network_out = [] if self.convnet_blueprint is not None: convnet_layers = [self._network_in] layer_blueprints = list(map(str.strip, self.convnet_blueprint.split("->"))) for i, layer_blueprint in enumerate(layer_blueprints, start=1): # eg. "64@3x3(valid) -> 64@3x3(full)" nb_filters, rest = layer_blueprint.split("@") filter_shape, rest = rest.split("(") nb_filters = int(nb_filters) filter_shape = tuple(map(int, filter_shape.split("x"))) pad = rest[:-1] preact = ConvLayer(convnet_layers[-1], num_filters=nb_filters, filter_size=filter_shape, stride=(1, 1), nonlinearity=None, pad=pad, W=lasagne.init.HeNormal(gain='relu'), name="layer_{}_conv".format(i)) if self.use_batch_norm: preact = batch_norm(preact) layer = NonlinearityLayer(preact, nonlinearity=rectify) convnet_layers.append(layer) network_out.append(FlattenLayer(preact)) if self.fullnet_blueprint is not None: fullnet_layers = [FlattenLayer(self._network_in)] layer_blueprints = list(map(str.strip, self.fullnet_blueprint.split("->"))) for i, layer_blueprint in enumerate(layer_blueprints, start=1): # e.g. "500 -> 500 -> 784" hidden_size = int(layer_blueprint) preact = DenseLayer(fullnet_layers[-1], num_units=hidden_size, nonlinearity=None, W=lasagne.init.HeNormal(gain='relu'), name="layer_{}_dense".format(i)) if self.use_batch_norm: preact = batch_norm(preact) layer = NonlinearityLayer(preact, nonlinearity=rectify) fullnet_layers.append(layer) network_out.append(preact) self._network = ElemwiseSumLayer(network_out) # TODO: sigmoid should be applied here instead of within loss function. print("Nb. of parameters in model: {}".format(lasagne.layers.count_params(self._network, trainable=True))) return self._network
Example #9
Source File: convnade.py From NADE with BSD 3-Clause "New" or "Revised" License | 4 votes |
def network(self): if self._network is not None: return self._network # Build the computational graph using a dummy input. import lasagne from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer from lasagne.layers import ElemwiseSumLayer, NonlinearityLayer, ExpressionLayer, PadLayer, InputLayer, FlattenLayer, SliceLayer # from lasagne.layers import batch_norm from lasagne.nonlinearities import rectify self._network_in = InputLayer(shape=(None, self.nb_channels,) + self.image_shape, input_var=None) convnet_layers = [self._network_in] convnet_layers_preact = [self._network_in] layer_blueprints = list(map(str.strip, self.convnet_blueprint.split("->"))) for i, layer_blueprint in enumerate(layer_blueprints, start=1): "64@3x3(valid) -> 64@3x3(full)" nb_filters, rest = layer_blueprint.split("@") filter_shape, rest = rest.split("(") nb_filters = int(nb_filters) filter_shape = tuple(map(int, filter_shape.split("x"))) pad = rest[:-1] preact = ConvLayer(convnet_layers[-1], num_filters=nb_filters, filter_size=filter_shape, stride=(1, 1), nonlinearity=None, pad=pad, W=lasagne.init.HeNormal(gain='relu')) if i > len(layer_blueprints) // 2 and i != len(layer_blueprints): shortcut = convnet_layers_preact[len(layer_blueprints)-i] if i == len(layer_blueprints): if preact.output_shape[1] != shortcut.output_shape[1]: shortcut = SliceLayer(shortcut, slice(0, 1), axis=1) else: raise NameError("Something is wrong.") print("Shortcut from {} to {}".format(len(layer_blueprints)-i, i)) preact = ElemwiseSumLayer([preact, shortcut]) convnet_layers_preact.append(preact) layer = NonlinearityLayer(preact, nonlinearity=rectify) convnet_layers.append(layer) self._network = FlattenLayer(preact) # network = DenseLayer(l, num_units=int(np.prod(self.image_shape)), # W=lasagne.init.HeNormal(), # nonlinearity=None) print("Nb. of parameters in model: {}".format(lasagne.layers.count_params(self._network, trainable=True))) return self._network