Python keras.layers.ELU Examples

The following are 27 code examples of keras.layers.ELU(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source File: train_steering_model.py    From research with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_model(time_len=1):
  ch, row, col = 3, 160, 320  # camera format

  model = Sequential()
  model.add(Lambda(lambda x: x/127.5 - 1.,
            input_shape=(ch, row, col),
            output_shape=(ch, row, col)))
  model.add(Convolution2D(16, 8, 8, subsample=(4, 4), border_mode="same"))
  model.add(ELU())
  model.add(Convolution2D(32, 5, 5, subsample=(2, 2), border_mode="same"))
  model.add(ELU())
  model.add(Convolution2D(64, 5, 5, subsample=(2, 2), border_mode="same"))
  model.add(Flatten())
  model.add(Dropout(.2))
  model.add(ELU())
  model.add(Dense(512))
  model.add(Dropout(.5))
  model.add(ELU())
  model.add(Dense(1))

  model.compile(optimizer="adam", loss="mse")

  return model 
Example #2
Source File: advanced_activations_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_elu():
    for alpha in [0., .5, -1.]:
        layer_test(layers.ELU, kwargs={'alpha': alpha},
                   input_shape=(2, 3, 4)) 
Example #3
Source File: malwaresnet.py    From youarespecial with MIT License 5 votes vote down vote up
def ResidualBlock1D_helper(layers, kernel_size, filters, final_stride=1):
    def f(_input):
        basic = _input
        for ln in range(layers):
            #basic = BatchNormalization()( basic ) # triggers known keras bug w/ TimeDistributed: https://github.com/fchollet/keras/issues/5221
            basic = ELU()(basic)  
            basic = Conv1D(filters, kernel_size, kernel_initializer='he_normal',
                           kernel_regularizer=l2(1.e-4), padding='same')(basic)

        # note that this strides without averaging
        return AveragePooling1D(pool_size=1, strides=final_stride)(Add()([_input, basic]))

    return f 
Example #4
Source File: simple_multilayer.py    From youarespecial with MIT License 5 votes vote down vote up
def create_model(input_shape, hidden_layers=[1024, 512, 256], input_dropout=0.1, hidden_dropout=0.5):
    '''Define a simple multilayer perceptron.

    Args:
        input_shape (tuple): input shape to the model. For this model, should be of shape (dim,)
        input_dropout (float): fraction of input features to drop out during training
        hidden_layers (tuple): a tuple/list with number of hidden units in each hidden layer

    Returns:
        keras.models.Sequential : a model to train
    '''
    model = Sequential()

    # dropout the input to prevent overfitting to any one feature
    # (a similar concept to randomization in random forests,
    #   but we choose less severe feature sampling  )
    model.add(Dropout(input_dropout, input_shape=input_shape))

    # set up hidden layers
    for n_hidden_units in hidden_layers:
        # the layer...activation will come later
        model.add(Dense(n_hidden_units))
        # dropout to prevent overfitting
        model.add(Dropout(hidden_dropout))
        # batchnormalization helps training
        model.add(BatchNormalization())
        # ...the activation!
        model.add(ELU())

    # the output layer
    model.add(Dense(1, activation='sigmoid'))

    # we'll optimize with plain old sgd
    model.compile(loss='binary_crossentropy',
                  optimizer='sgd', metrics=['accuracy'])

    return model 
Example #5
Source File: mnist_swwae.py    From pCVR with Apache License 2.0 5 votes vote down vote up
def convresblock(x, nfeats=8, ksize=3, nskipped=2, elu=True):
    """The proposed residual block from [4].

    Running with elu=True will use ELU nonlinearity and running with
    elu=False will use BatchNorm + RELU nonlinearity.  While ELU's are fast
    due to the fact they do not suffer from BatchNorm overhead, they may
    overfit because they do not offer the stochastic element of the batch
    formation process of BatchNorm, which acts as a good regularizer.

    # Arguments
        x: 4D tensor, the tensor to feed through the block
        nfeats: Integer, number of feature maps for conv layers.
        ksize: Integer, width and height of conv kernels in first convolution.
        nskipped: Integer, number of conv layers for the residual function.
        elu: Boolean, whether to use ELU or BN+RELU.

    # Input shape
        4D tensor with shape:
        `(batch, channels, rows, cols)`

    # Output shape
        4D tensor with shape:
        `(batch, filters, rows, cols)`
    """
    y0 = Conv2D(nfeats, ksize, padding='same')(x)
    y = y0
    for i in range(nskipped):
        if elu:
            y = ELU()(y)
        else:
            y = BatchNormalization(axis=1)(y)
            y = Activation('relu')(y)
        y = Conv2D(nfeats, 1, padding='same')(y)
    return layers.add([y0, y]) 
Example #6
Source File: advanced_activations_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_elu():
    for alpha in [0., .5, -1.]:
        layer_test(layers.ELU, kwargs={'alpha': alpha},
                   input_shape=(2, 3, 4)) 
Example #7
Source File: mnist_swwae.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def convresblock(x, nfeats=8, ksize=3, nskipped=2, elu=True):
    """The proposed residual block from [4].

    Running with elu=True will use ELU nonlinearity and running with
    elu=False will use BatchNorm + RELU nonlinearity.  While ELU's are fast
    due to the fact they do not suffer from BatchNorm overhead, they may
    overfit because they do not offer the stochastic element of the batch
    formation process of BatchNorm, which acts as a good regularizer.

    # Arguments
        x: 4D tensor, the tensor to feed through the block
        nfeats: Integer, number of feature maps for conv layers.
        ksize: Integer, width and height of conv kernels in first convolution.
        nskipped: Integer, number of conv layers for the residual function.
        elu: Boolean, whether to use ELU or BN+RELU.

    # Input shape
        4D tensor with shape:
        `(batch, channels, rows, cols)`

    # Output shape
        4D tensor with shape:
        `(batch, filters, rows, cols)`
    """
    y0 = Conv2D(nfeats, ksize, padding='same')(x)
    y = y0
    for i in range(nskipped):
        if elu:
            y = ELU()(y)
        else:
            y = BatchNormalization(axis=1)(y)
            y = Activation('relu')(y)
        y = Conv2D(nfeats, 1, padding='same')(y)
    return layers.add([y0, y]) 
Example #8
Source File: advanced_activations_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_elu():
    for alpha in [0., .5, -1.]:
        layer_test(layers.ELU, kwargs={'alpha': alpha},
                   input_shape=(2, 3, 4)) 
Example #9
Source File: mnist_swwae.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def convresblock(x, nfeats=8, ksize=3, nskipped=2, elu=True):
    """The proposed residual block from [4].

    Running with elu=True will use ELU nonlinearity and running with
    elu=False will use BatchNorm + RELU nonlinearity.  While ELU's are fast
    due to the fact they do not suffer from BatchNorm overhead, they may
    overfit because they do not offer the stochastic element of the batch
    formation process of BatchNorm, which acts as a good regularizer.

    # Arguments
        x: 4D tensor, the tensor to feed through the block
        nfeats: Integer, number of feature maps for conv layers.
        ksize: Integer, width and height of conv kernels in first convolution.
        nskipped: Integer, number of conv layers for the residual function.
        elu: Boolean, whether to use ELU or BN+RELU.

    # Input shape
        4D tensor with shape:
        `(batch, channels, rows, cols)`

    # Output shape
        4D tensor with shape:
        `(batch, filters, rows, cols)`
    """
    y0 = Conv2D(nfeats, ksize, padding='same')(x)
    y = y0
    for i in range(nskipped):
        if elu:
            y = ELU()(y)
        else:
            y = BatchNormalization(axis=1)(y)
            y = Activation('relu')(y)
        y = Conv2D(nfeats, 1, padding='same')(y)
    return layers.add([y0, y]) 
Example #10
Source File: mnist_swwae.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def convresblock(x, nfeats=8, ksize=3, nskipped=2, elu=True):
    """The proposed residual block from [4].

    Running with elu=True will use ELU nonlinearity and running with
    elu=False will use BatchNorm + RELU nonlinearity.  While ELU's are fast
    due to the fact they do not suffer from BatchNorm overhead, they may
    overfit because they do not offer the stochastic element of the batch
    formation process of BatchNorm, which acts as a good regularizer.

    # Arguments
        x: 4D tensor, the tensor to feed through the block
        nfeats: Integer, number of feature maps for conv layers.
        ksize: Integer, width and height of conv kernels in first convolution.
        nskipped: Integer, number of conv layers for the residual function.
        elu: Boolean, whether to use ELU or BN+RELU.

    # Input shape
        4D tensor with shape:
        `(batch, channels, rows, cols)`

    # Output shape
        4D tensor with shape:
        `(batch, filters, rows, cols)`
    """
    y0 = Conv2D(nfeats, ksize, padding='same')(x)
    y = y0
    for i in range(nskipped):
        if elu:
            y = ELU()(y)
        else:
            y = BatchNormalization(axis=1)(y)
            y = Activation('relu')(y)
        y = Conv2D(nfeats, 1, padding='same')(y)
    return layers.add([y0, y]) 
Example #11
Source File: advanced_activations_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_elu():
    for alpha in [0., .5, -1.]:
        layer_test(layers.ELU, kwargs={'alpha': alpha},
                   input_shape=(2, 3, 4)) 
Example #12
Source File: mnist_swwae.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def convresblock(x, nfeats=8, ksize=3, nskipped=2, elu=True):
    """The proposed residual block from [4].

    Running with elu=True will use ELU nonlinearity and running with
    elu=False will use BatchNorm + RELU nonlinearity.  While ELU's are fast
    due to the fact they do not suffer from BatchNorm overhead, they may
    overfit because they do not offer the stochastic element of the batch
    formation process of BatchNorm, which acts as a good regularizer.

    # Arguments
        x: 4D tensor, the tensor to feed through the block
        nfeats: Integer, number of feature maps for conv layers.
        ksize: Integer, width and height of conv kernels in first convolution.
        nskipped: Integer, number of conv layers for the residual function.
        elu: Boolean, whether to use ELU or BN+RELU.

    # Input shape
        4D tensor with shape:
        `(batch, channels, rows, cols)`

    # Output shape
        4D tensor with shape:
        `(batch, filters, rows, cols)`
    """
    y0 = Conv2D(nfeats, ksize, padding='same')(x)
    y = y0
    for i in range(nskipped):
        if elu:
            y = ELU()(y)
        else:
            y = BatchNormalization(axis=1)(y)
            y = Activation('relu')(y)
        y = Conv2D(nfeats, 1, padding='same')(y)
    return layers.add([y0, y]) 
Example #13
Source File: advanced_activations_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_elu():
    for alpha in [0., .5, -1.]:
        layer_test(layers.ELU, kwargs={'alpha': alpha},
                   input_shape=(2, 3, 4)) 
Example #14
Source File: mnist_swwae.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def convresblock(x, nfeats=8, ksize=3, nskipped=2, elu=True):
    """The proposed residual block from [4].

    Running with elu=True will use ELU nonlinearity and running with
    elu=False will use BatchNorm + RELU nonlinearity.  While ELU's are fast
    due to the fact they do not suffer from BatchNorm overhead, they may
    overfit because they do not offer the stochastic element of the batch
    formation process of BatchNorm, which acts as a good regularizer.

    # Arguments
        x: 4D tensor, the tensor to feed through the block
        nfeats: Integer, number of feature maps for conv layers.
        ksize: Integer, width and height of conv kernels in first convolution.
        nskipped: Integer, number of conv layers for the residual function.
        elu: Boolean, whether to use ELU or BN+RELU.

    # Input shape
        4D tensor with shape:
        `(batch, channels, rows, cols)`

    # Output shape
        4D tensor with shape:
        `(batch, filters, rows, cols)`
    """
    y0 = Conv2D(nfeats, ksize, padding='same')(x)
    y = y0
    for i in range(nskipped):
        if elu:
            y = ELU()(y)
        else:
            y = BatchNormalization(axis=1)(y)
            y = Activation('relu')(y)
        y = Conv2D(nfeats, 1, padding='same')(y)
    return layers.add([y0, y]) 
Example #15
Source File: mnist_swwae.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def convresblock(x, nfeats=8, ksize=3, nskipped=2, elu=True):
    """The proposed residual block from [4].

    Running with elu=True will use ELU nonlinearity and running with
    elu=False will use BatchNorm + RELU nonlinearity.  While ELU's are fast
    due to the fact they do not suffer from BatchNorm overhead, they may
    overfit because they do not offer the stochastic element of the batch
    formation process of BatchNorm, which acts as a good regularizer.

    # Arguments
        x: 4D tensor, the tensor to feed through the block
        nfeats: Integer, number of feature maps for conv layers.
        ksize: Integer, width and height of conv kernels in first convolution.
        nskipped: Integer, number of conv layers for the residual function.
        elu: Boolean, whether to use ELU or BN+RELU.

    # Input shape
        4D tensor with shape:
        `(batch, channels, rows, cols)`

    # Output shape
        4D tensor with shape:
        `(batch, filters, rows, cols)`
    """
    y0 = Conv2D(nfeats, ksize, padding='same')(x)
    y = y0
    for i in range(nskipped):
        if elu:
            y = ELU()(y)
        else:
            y = BatchNormalization(axis=1)(y)
            y = Activation('relu')(y)
        y = Conv2D(nfeats, 1, padding='same')(y)
    return layers.add([y0, y]) 
Example #16
Source File: train_agent_kerasrl.py    From gym-malware with MIT License 5 votes vote down vote up
def generate_dense_model(input_shape, layers, nb_actions):
    model = Sequential()
    model.add(Flatten(input_shape=input_shape))
    model.add(Dropout(0.1))  # drop out the input to make model less sensitive to any 1 feature

    for layer in layers:
        model.add(Dense(layer))
        model.add(BatchNormalization())
        model.add(ELU(alpha=1.0))

    model.add(Dense(nb_actions))
    model.add(Activation('linear'))
    print(model.summary())

    return model 
Example #17
Source File: mnist_swwae.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def convresblock(x, nfeats=8, ksize=3, nskipped=2, elu=True):
    """The proposed residual block from [4].

    Running with elu=True will use ELU nonlinearity and running with
    elu=False will use BatchNorm + RELU nonlinearity.  While ELU's are fast
    due to the fact they do not suffer from BatchNorm overhead, they may
    overfit because they do not offer the stochastic element of the batch
    formation process of BatchNorm, which acts as a good regularizer.

    # Arguments
        x: 4D tensor, the tensor to feed through the block
        nfeats: Integer, number of feature maps for conv layers.
        ksize: Integer, width and height of conv kernels in first convolution.
        nskipped: Integer, number of conv layers for the residual function.
        elu: Boolean, whether to use ELU or BN+RELU.

    # Input shape
        4D tensor with shape:
        `(batch, channels, rows, cols)`

    # Output shape
        4D tensor with shape:
        `(batch, filters, rows, cols)`
    """
    y0 = Conv2D(nfeats, ksize, padding='same')(x)
    y = y0
    for i in range(nskipped):
        if elu:
            y = ELU()(y)
        else:
            y = BatchNormalization(axis=1)(y)
            y = Activation('relu')(y)
        y = Conv2D(nfeats, 1, padding='same')(y)
    return layers.add([y0, y]) 
Example #18
Source File: advanced_activations_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_elu():
    for alpha in [0., .5, -1.]:
        layer_test(layers.ELU, kwargs={'alpha': alpha},
                   input_shape=(2, 3, 4)) 
Example #19
Source File: mnist_swwae.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def convresblock(x, nfeats=8, ksize=3, nskipped=2, elu=True):
    """The proposed residual block from [4].

    Running with elu=True will use ELU nonlinearity and running with
    elu=False will use BatchNorm + RELU nonlinearity.  While ELU's are fast
    due to the fact they do not suffer from BatchNorm overhead, they may
    overfit because they do not offer the stochastic element of the batch
    formation process of BatchNorm, which acts as a good regularizer.

    # Arguments
        x: 4D tensor, the tensor to feed through the block
        nfeats: Integer, number of feature maps for conv layers.
        ksize: Integer, width and height of conv kernels in first convolution.
        nskipped: Integer, number of conv layers for the residual function.
        elu: Boolean, whether to use ELU or BN+RELU.

    # Input shape
        4D tensor with shape:
        `(batch, channels, rows, cols)`

    # Output shape
        4D tensor with shape:
        `(batch, filters, rows, cols)`
    """
    y0 = Conv2D(nfeats, ksize, padding='same')(x)
    y = y0
    for i in range(nskipped):
        if elu:
            y = ELU()(y)
        else:
            y = BatchNormalization(axis=1)(y)
            y = Activation('relu')(y)
        y = Conv2D(nfeats, 1, padding='same')(y)
    return layers.add([y0, y]) 
Example #20
Source File: test_layers.py    From nn-transfer with MIT License 5 votes vote down vote up
def test_elu(self):
        keras_model = Sequential()
        keras_model.add(ELU(input_shape=(3, 32, 32), name='elu'))
        keras_model.compile(loss=keras.losses.categorical_crossentropy,
                            optimizer=keras.optimizers.SGD())

        pytorch_model = ELUNet()

        self.transfer(keras_model, pytorch_model)
        self.assertEqualPrediction(keras_model, pytorch_model, self.test_data)

    # Tests activation function with learned parameters 
Example #21
Source File: test_layers.py    From nn-transfer with MIT License 5 votes vote down vote up
def __init__(self):
        super(ELUNet, self).__init__()
        self.elu = nn.ELU() 
Example #22
Source File: sequence_blocks.py    From Neural-Chatbot with GNU General Public License v3.0 5 votes vote down vote up
def Decoder(hidden_size, activation=None, return_sequences=True, bidirectional=False, use_gru=True):
    if activation is None:
        activation = ELU()
    if use_gru:
        def _decoder(x):
            if bidirectional:
                x = Bidirectional(
                    GRU(int(hidden_size/2), activation='linear', return_sequences=return_sequences))(x)
                x = activation(x)
                return x
            else:
                x = GRU(hidden_size, activation='linear',
                        return_sequences=return_sequences)(x)
                x = activation(x)
                return x
    else:
        def _decoder(x):
            if bidirectional:
                x = Bidirectional(
                    LSTM(int(hidden_size/2), activation='linear', return_sequences=return_sequences))(x)
                x = activation(x)
                return x
            else:
                x = LSTM(hidden_size, activation='linear',
                         return_sequences=return_sequences)(x)
                x = activation(x)
                return x
    return _decoder 
Example #23
Source File: sequence_blocks.py    From Neural-Chatbot with GNU General Public License v3.0 5 votes vote down vote up
def AttentionDecoder(hidden_size, activation=None, return_sequences=True, bidirectional=False, use_gru=True):
    if activation is None:
        activation = ELU()
    if use_gru:
        def _decoder(x, attention):
            if bidirectional:
                branch_1 = AttentionWrapper(GRU(int(hidden_size/2), activation='linear', return_sequences=return_sequences,
                                                go_backwards=False), attention, single_attention_param=True)(x)
                branch_2 = AttentionWrapper(GRU(int(hidden_size/2), activation='linear', return_sequences=return_sequences,
                                                go_backwards=True), attention, single_attention_param=True)(x)
                x = concatenate([branch_1, branch_2])
                return activation(x)
            else:
                x = AttentionWrapper(GRU(hidden_size, activation='linear',
                                         return_sequences=return_sequences), attention, single_attention_param=True)(x)
                x = activation(x)
                return x
    else:
        def _decoder(x, attention):
            if bidirectional:
                branch_1 = AttentionWrapper(LSTM(int(hidden_size/2), activation='linear', return_sequences=return_sequences,
                                                 go_backwards=False), attention, single_attention_param=True)(x)
                branch_2 = AttentionWrapper(LSTM(hidden_size, activation='linear', return_sequences=return_sequences,
                                                go_backwards=True), attention, single_attention_param=True)(x)
                x = concatenate([branch_1, branch_2])
                x = activation(x)
                return x
            else:
                x = AttentionWrapper(LSTM(hidden_size, activation='linear', return_sequences=return_sequences),
                                     attention, single_attention_param=True)(x)
                x = activation(x)
                return x

    return _decoder 
Example #24
Source File: sequence_blocks.py    From Neural-Chatbot with GNU General Public License v3.0 5 votes vote down vote up
def Encoder(hidden_size, activation=None, return_sequences=True, bidirectional=False, use_gru=True):
    if activation is None:
        activation = ELU()
    if use_gru:
        def _encoder(x):
            if bidirectional:
                branch_1 = GRU(int(hidden_size/2), activation='linear',
                               return_sequences=return_sequences, go_backwards=False)(x)
                branch_2 = GRU(int(hidden_size/2), activation='linear',
                               return_sequences=return_sequences, go_backwards=True)(x)
                x = concatenate([branch_1, branch_2])
                x = activation(x)
                return x
            else:
                x = GRU(hidden_size, activation='linear',
                        return_sequences=return_sequences)(x)
                x = activation(x)
                return x
    else:
        def _encoder(x):
            if bidirectional:
                branch_1 = LSTM(int(hidden_size/2), activation='linear',
                                return_sequences=return_sequences, go_backwards=False)(x)
                branch_2 = LSTM(int(hidden_size/2), activation='linear',
                                return_sequences=return_sequences, go_backwards=True)(x)
                x = concatenate([branch_1, branch_2])
                x = activation(x)
                return x
            else:
                x = LSTM(hidden_size, activation='linear',
                         return_sequences=return_sequences)(x)
                x = activation(x)
                return x
    return _encoder 
Example #25
Source File: cnn_prediction.py    From self-driving with MIT License 4 votes vote down vote up
def buildModel(cameraFormat=(3, 480, 640)):
  """
  Build and return a CNN; details in the comments.
  The intent is a scaled down version of the model from "End to End Learning
  for Self-Driving Cars": https://arxiv.org/abs/1604.07316.

  Args:
    cameraFormat: (3-tuple) Ints to specify the input dimensions (color
        channels, rows, columns).
  Returns:
    A compiled Keras model.
  """
  print "Building model..."
  ch, row, col = cameraFormat

  model = Sequential()

  # Use a lambda layer to normalize the input data
  model.add(Lambda(
      lambda x: x/127.5 - 1.,
      input_shape=(ch, row, col),
      output_shape=(ch, row, col))
  )

  # Several convolutional layers, each followed by ELU activation
  # 8x8 convolution (kernel) with 4x4 stride over 16 output filters
  model.add(Convolution2D(16, 8, 8, subsample=(4, 4), border_mode="same"))
  model.add(ELU())
  # 5x5 convolution (kernel) with 2x2 stride over 32 output filters
  model.add(Convolution2D(32, 5, 5, subsample=(2, 2), border_mode="same"))
  model.add(ELU())
  # 5x5 convolution (kernel) with 2x2 stride over 64 output filters
  model.add(Convolution2D(64, 5, 5, subsample=(2, 2), border_mode="same"))
  # Flatten the input to the next layer
  model.add(Flatten())
  # Apply dropout to reduce overfitting
  model.add(Dropout(.2))
  model.add(ELU())
  # Fully connected layer
  model.add(Dense(512))
  # More dropout
  model.add(Dropout(.5))
  model.add(ELU())
  # Fully connected layer with one output dimension (representing the speed).
  model.add(Dense(1))

  # Adam optimizer is a standard, efficient SGD optimization method
  # Loss function is mean squared error, standard for regression problems
  model.compile(optimizer="adam", loss="mse")

  return model 
Example #26
Source File: train.py    From Deep-Music-Tagger with MIT License 4 votes vote down vote up
def build_model(output_size):
    channel_axis = 3
    freq_axis = 1
    padding = 37

    input_shape = (img_height, img_width, channels)
    print('Building model...')

    model = Sequential()
    model.add(ZeroPadding2D(padding=(0, padding), data_format='channels_last', input_shape=input_shape))
    model.add(BatchNormalization(axis=freq_axis, name='bn_0_freq'))

    model.add(Conv2D(64, (3, 3), padding='same', name='conv1'))
    model.add(BatchNormalization(axis=channel_axis, name='bn1'))
    model.add(ELU())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1'))
    model.add(Dropout(0.1, name='dropout1'))

    model.add(Conv2D(128, (3, 3), padding='same', name='conv2'))
    model.add(BatchNormalization(axis=channel_axis, name='bn2'))
    model.add(ELU())
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(3, 3), name='pool2'))
    model.add(Dropout(0.1, name='dropout2'))

    model.add(Conv2D(128, (3, 3), padding='same', name='conv3'))
    model.add(BatchNormalization(axis=channel_axis, name='bn3'))
    model.add(ELU())
    model.add(MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool3'))
    model.add(Dropout(0.1, name='dropout3'))

    model.add(Conv2D(128, (3, 3), padding='same', name='conv4'))
    model.add(BatchNormalization(axis=channel_axis, name='bn4'))
    model.add(ELU())
    model.add(MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool4'))
    model.add(Dropout(0.1, name='dropout4'))

    model.add(Reshape(target_shape=(15, 128)))

    model.add(GRU(32, return_sequences=True, name='gru1'))
    model.add(GRU(32, return_sequences=False, name='gru2'))

    model.add(Dropout(0.3, name='dropout_final'))

    model.add(Dense(output_size, activation='softmax', name='output'))

    return model 
Example #27
Source File: train.py    From Deep-Music-Tagger with MIT License 4 votes vote down vote up
def build_model(output_size):
    channel_axis = 3
    freq_axis = 1
    padding = 37

    input_shape = (img_height, img_width, channels)
    print('Building model...')

    model = Sequential()
    #model.add(ZeroPadding2D(padding=(0, padding), data_format='channels_last', input_shape=input_shape))
    #model.add(BatchNormalization(axis=freq_axis, name='bn_0_freq'))

    #model.add(Conv2D(64, (3, 3), padding='same', name='conv1'))
    #model.add(BatchNormalization(axis=channel_axis, name='bn1'))
    #model.add(ELU())
    #model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1'))
    #model.add(Dropout(0.1, name='dropout1'))

    #model.add(Conv2D(128, (3, 3), padding='same', name='conv2'))
    #model.add(BatchNormalization(axis=channel_axis, name='bn2'))
    #model.add(ELU())
    #model.add(MaxPooling2D(pool_size=(3, 3), strides=(3, 3), name='pool2'))
    #model.add(Dropout(0.1, name='dropout2'))

    #model.add(Conv2D(128, (3, 3), padding='same', name='conv3'))
    #model.add(BatchNormalization(axis=channel_axis, name='bn3'))
    #model.add(ELU())
    #model.add(MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool3'))
    #model.add(Dropout(0.1, name='dropout3'))

    #model.add(Conv2D(128, (3, 3), padding='same', name='conv4'))
    #model.add(BatchNormalization(axis=channel_axis, name='bn4'))
    #model.add(ELU())
    #model.add(MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool4'))
    #model.add(Dropout(0.1, name='dropout4'))

    #model.add(Reshape(target_shape=(15, 128)))

    #model.add(GRU(32, return_sequences=True, name='gru1'))
    #model.add(GRU(32, return_sequences=False, name='gru2'))

    #model.add(Dropout(0.3, name='dropout_final'))

    model.add(Reshape(target_shape=(img_height * img_width,), input_shape=input_shape))
    model.add(Dense(output_size, activation='softmax', name='output', input_shape=input_shape))

    return model