Python keras.layers.pooling.GlobalAveragePooling2D() Examples

The following are 20 code examples of keras.layers.pooling.GlobalAveragePooling2D(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers.pooling , or try the search function .
Example #1
Source File: resnet.py    From convnet-study with MIT License 6 votes vote down vote up
def resnet_model(nb_blocks, bottleneck=True, l2_reg=1e-4):
    nb_channels = [16, 32, 64]
    inputs = Input((32, 32, 3))
    x = Convolution2D(16, 3, 3, border_mode='same', init='he_normal',
                      W_regularizer=l2(l2_reg), bias=False)(inputs)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    for n, f in zip(nb_channels, [True, False, False]):
        x = block_stack(x, n, nb_blocks, bottleneck=bottleneck, l2_reg=l2_reg,
                        first=f)
    # Last BN-Relu
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = GlobalAveragePooling2D()(x)
    x = Dense(10)(x)
    x = Activation('softmax')(x)

    model = Model(input=inputs, output=x)
    return model 
Example #2
Source File: densenet.py    From convnet-study with MIT License 5 votes vote down vote up
def densenet_model(nb_blocks, nb_layers, growth_rate, dropout=0., l2_reg=1e-4,
                   init_channels=16):
    n_channels = init_channels
    inputs = Input(shape=(32, 32, 3))
    x = Convolution2D(init_channels, 3, 3, border_mode='same',
                      init='he_normal', W_regularizer=l2(l2_reg),
                      bias=False)(inputs)
    for i in range(nb_blocks - 1):
        # Create a dense block
        x = dense_block(x, nb_layers, growth_rate,
                        dropout=dropout, l2_reg=l2_reg)
        # Update the number of channels
        n_channels += nb_layers*growth_rate
        # Transition layer
        x = transition_block(x, n_channels, dropout=dropout, l2_reg=l2_reg)

    # Add last dense_block
    x = dense_block(x, nb_layers, growth_rate, dropout=dropout, l2_reg=l2_reg)
    # Add final BN-Relu
    x = BatchNormalization(gamma_regularizer=l2(l2_reg),
                             beta_regularizer=l2(l2_reg))(x)
    x = Activation('relu')(x)
    # Global average pooling
    x = GlobalAveragePooling2D()(x)
    x = Dense(10, W_regularizer=l2(l2_reg))(x)
    x = Activation('softmax')(x)

    model = Model(input=inputs, output=x)
    return model

# Apply preprocessing as described in the paper: normalize each channel
# individually. We use the values from fb.resnet.torch, but computing the values
# gets a very close answer. 
Example #3
Source File: inceptionv3.py    From crnn-lid with GNU General Public License v3.0 5 votes vote down vote up
def create_model(input_shape, config):

    input_tensor = Input(shape=input_shape)  # this assumes K.image_dim_ordering() == 'tf'
    inception_model = InceptionV3(include_top=False, weights=None, input_tensor=input_tensor)
    print(inception_model.summary())

    x = inception_model.output
    x = GlobalAveragePooling2D()(x)
    predictions = Dense(config["num_classes"], activation='softmax')(x)

    return Model(input=inception_model.input, output=predictions) 
Example #4
Source File: xception.py    From crnn-lid with GNU General Public License v3.0 5 votes vote down vote up
def create_model(input_shape, config):

    input_tensor = Input(shape=input_shape)  # this assumes K.image_dim_ordering() == 'tf'
    xception_model = Xception(include_top=False, weights=None, input_tensor=input_tensor)
    print(xception_model.summary())

    x = xception_model.output
    x = GlobalAveragePooling2D()(x)
    predictions = Dense(config["num_classes"], activation='softmax')(x)

    return Model(input=xception_model.input, output=predictions) 
Example #5
Source File: resnet.py    From crnn-lid with GNU General Public License v3.0 5 votes vote down vote up
def create_model(input_shape, config):

    input_tensor = Input(shape=input_shape)  # this assumes K.image_dim_ordering() == 'tf'
    resnet_model = ResNet50(include_top=False, weights=None, input_tensor=input_tensor)
    print(resnet_model.summary())

    x = resnet_model.output
    x = GlobalAveragePooling2D()(x)
    predictions = Dense(config["num_classes"], activation='softmax')(x)

    return Model(input=resnet_model.input, output=predictions) 
Example #6
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_globalpooling_2d():
    layer_test(pooling.GlobalMaxPooling2D,
               kwargs={'data_format': 'channels_first'},
               input_shape=(3, 4, 5, 6))
    layer_test(pooling.GlobalMaxPooling2D,
               kwargs={'data_format': 'channels_last'},
               input_shape=(3, 5, 6, 4))
    layer_test(pooling.GlobalAveragePooling2D,
               kwargs={'data_format': 'channels_first'},
               input_shape=(3, 4, 5, 6))
    layer_test(pooling.GlobalAveragePooling2D,
               kwargs={'data_format': 'channels_last'},
               input_shape=(3, 5, 6, 4)) 
Example #7
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_globalpooling_2d():
    layer_test(pooling.GlobalMaxPooling2D,
               kwargs={'data_format': 'channels_first'},
               input_shape=(3, 4, 5, 6))
    layer_test(pooling.GlobalMaxPooling2D,
               kwargs={'data_format': 'channels_last'},
               input_shape=(3, 5, 6, 4))
    layer_test(pooling.GlobalAveragePooling2D,
               kwargs={'data_format': 'channels_first'},
               input_shape=(3, 4, 5, 6))
    layer_test(pooling.GlobalAveragePooling2D,
               kwargs={'data_format': 'channels_last'},
               input_shape=(3, 5, 6, 4)) 
Example #8
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_globalpooling_2d():
    layer_test(pooling.GlobalMaxPooling2D,
               kwargs={'data_format': 'channels_first'},
               input_shape=(3, 4, 5, 6))
    layer_test(pooling.GlobalMaxPooling2D,
               kwargs={'data_format': 'channels_last'},
               input_shape=(3, 5, 6, 4))
    layer_test(pooling.GlobalAveragePooling2D,
               kwargs={'data_format': 'channels_first'},
               input_shape=(3, 4, 5, 6))
    layer_test(pooling.GlobalAveragePooling2D,
               kwargs={'data_format': 'channels_last'},
               input_shape=(3, 5, 6, 4)) 
Example #9
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_globalpooling_2d():
    layer_test(pooling.GlobalMaxPooling2D,
               kwargs={'data_format': 'channels_first'},
               input_shape=(3, 4, 5, 6))
    layer_test(pooling.GlobalMaxPooling2D,
               kwargs={'data_format': 'channels_last'},
               input_shape=(3, 5, 6, 4))
    layer_test(pooling.GlobalAveragePooling2D,
               kwargs={'data_format': 'channels_first'},
               input_shape=(3, 4, 5, 6))
    layer_test(pooling.GlobalAveragePooling2D,
               kwargs={'data_format': 'channels_last'},
               input_shape=(3, 5, 6, 4)) 
Example #10
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_globalpooling_2d():
    layer_test(pooling.GlobalMaxPooling2D,
               kwargs={'data_format': 'channels_first'},
               input_shape=(3, 4, 5, 6))
    layer_test(pooling.GlobalMaxPooling2D,
               kwargs={'data_format': 'channels_last'},
               input_shape=(3, 5, 6, 4))
    layer_test(pooling.GlobalAveragePooling2D,
               kwargs={'data_format': 'channels_first'},
               input_shape=(3, 4, 5, 6))
    layer_test(pooling.GlobalAveragePooling2D,
               kwargs={'data_format': 'channels_last'},
               input_shape=(3, 5, 6, 4)) 
Example #11
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_globalpooling_2d():
    layer_test(pooling.GlobalMaxPooling2D,
               kwargs={'data_format': 'channels_first'},
               input_shape=(3, 4, 5, 6))
    layer_test(pooling.GlobalMaxPooling2D,
               kwargs={'data_format': 'channels_last'},
               input_shape=(3, 5, 6, 4))
    layer_test(pooling.GlobalAveragePooling2D,
               kwargs={'data_format': 'channels_first'},
               input_shape=(3, 4, 5, 6))
    layer_test(pooling.GlobalAveragePooling2D,
               kwargs={'data_format': 'channels_last'},
               input_shape=(3, 5, 6, 4)) 
Example #12
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_globalpooling_2d():
    layer_test(pooling.GlobalMaxPooling2D,
               kwargs={'data_format': 'channels_first'},
               input_shape=(3, 4, 5, 6))
    layer_test(pooling.GlobalMaxPooling2D,
               kwargs={'data_format': 'channels_last'},
               input_shape=(3, 5, 6, 4))
    layer_test(pooling.GlobalAveragePooling2D,
               kwargs={'data_format': 'channels_first'},
               input_shape=(3, 4, 5, 6))
    layer_test(pooling.GlobalAveragePooling2D,
               kwargs={'data_format': 'channels_last'},
               input_shape=(3, 5, 6, 4)) 
Example #13
Source File: models_WGAN.py    From DeepLearningImplementations with MIT License 4 votes vote down vote up
def discriminator(img_dim, bn_mode, model_name="discriminator"):
    """DCGAN discriminator

    Args:
        img_dim: dimension of the image output
        bn_mode: keras batchnorm mode
        model_name: model name (default: {"generator_deconv"})

    Returns:
        keras model
    """

    if K.image_dim_ordering() == "th":
        bn_axis = 1
        min_s = min(img_dim[1:])
    else:
        bn_axis = -1
        min_s = min(img_dim[:-1])

    disc_input = Input(shape=img_dim, name="discriminator_input")

    # Get the list of number of conv filters
    # (first layer starts with 64), filters are subsequently doubled
    nb_conv = int(np.floor(np.log(min_s // 4) / np.log(2)))
    list_f = [64 * min(8, (2 ** i)) for i in range(nb_conv)]

    # First conv with 2x2 strides
    x = Conv2D(list_f[0], (3, 3), strides=(2, 2), name="disc_conv2d_1",
               padding="same", use_bias=False,
               kernel_initializer=RandomNormal(stddev=0.02))(disc_input)
    x = BatchNormalization(axis=bn_axis)(x)
    x = LeakyReLU(0.2)(x)

    # Conv blocks: Conv2D(2x2 strides)->BN->LReLU
    for i, f in enumerate(list_f[1:]):
        name = "disc_conv2d_%s" % (i + 2)
        x = Conv2D(f, (3, 3), strides=(2, 2), name=name, padding="same", use_bias=False,
                   kernel_initializer=RandomNormal(stddev=0.02))(x)
        x = BatchNormalization(axis=bn_axis)(x)
        x = LeakyReLU(0.2)(x)

    # Last convolution
    x = Conv2D(1, (3, 3), name="last_conv", padding="same", use_bias=False,
               kernel_initializer=RandomNormal(stddev=0.02))(x)
    # Average pooling
    x = GlobalAveragePooling2D()(x)

    discriminator_model = Model(inputs=[disc_input], outputs=[x], name=model_name)
    visualize_model(discriminator_model)

    return discriminator_model 
Example #14
Source File: densenet_fast.py    From semantic-embeddings with MIT License 4 votes vote down vote up
def create_dense_net(nb_classes, img_dim, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=16, dropout_rate=None,
                     weight_decay=1E-4, verbose=True):
    ''' Build the create_dense_net model

    Args:
        nb_classes: number of classes
        img_dim: tuple of shape (channels, rows, columns) or (rows, columns, channels)
        depth: number or layers
        nb_dense_block: number of dense blocks to add to end
        growth_rate: number of filters to add
        nb_filter: number of filters
        dropout_rate: dropout rate
        weight_decay: weight decay

    Returns: keras tensor with nb_layers of conv_block appended

    '''

    model_input = Input(shape=img_dim)

    concat_axis = 1 if K.image_dim_ordering() == "th" else -1

    assert (depth - 4) % 3 == 0, "Depth must be 3 N + 4"

    # layers in each dense block
    nb_layers = int((depth - 4) / 3)

    # Initial convolution
    x = Convolution2D(nb_filter, 3, 3, init="he_uniform", border_mode="same", name="initial_conv2D", bias=False,
                      W_regularizer=l2(weight_decay))(model_input)

    x = BatchNormalization(mode=0, axis=concat_axis, gamma_regularizer=l2(weight_decay),
                            beta_regularizer=l2(weight_decay))(x)

    # Add dense blocks
    for block_idx in range(nb_dense_block - 1):
        x, nb_filter = dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=dropout_rate,
                                   weight_decay=weight_decay)
        # add transition_block
        x = transition_block(x, nb_filter, dropout_rate=dropout_rate, weight_decay=weight_decay)

    # The last dense_block does not have a transition_block
    x, nb_filter = dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=dropout_rate,
                               weight_decay=weight_decay)

    x = Activation('relu')(x)
    x = GlobalAveragePooling2D()(x)
    x = Dense(nb_classes, activation='softmax', W_regularizer=l2(weight_decay), b_regularizer=l2(weight_decay))(x)

    densenet = Model(input=model_input, output=x, name="create_dense_net")

    if verbose: print("DenseNet-%d-%d created." % (depth, growth_rate))

    return densenet 
Example #15
Source File: pretrain_image_gan.py    From costar_plan with Apache License 2.0 4 votes vote down vote up
def _makeImageDiscriminator(self, img_shape):
        '''
        create image-only encoder to extract keypoints from the scene.

        Params:
        -------
        img_shape: shape of the image to encode
        '''
        img = Input(img_shape,name="img_encoder_in")
        img0 = Input(img_shape,name="img0_encoder_in")
        ins = [img, img0]
        dr = self.dropout_rate

        if self.use_wasserstein:
            loss = wasserstein_loss
            activation = "linear"
        else:
            loss = "binary_crossentropy"
            activation = "sigmoid"

        # common arguments
        kwargs = { "dropout_rate" : dr,
                   "padding" : "same",
                   "lrelu" : True,
                   "bn" : False,
                   "perm_drop" : True,
                 }

        x  = AddConv2D(img,  64, [4,4], 1, **kwargs)
        x0 = AddConv2D(img0, 64, [4,4], 1, **kwargs)
        x  = Add()([x, x0])
        x  = AddConv2D(x,    64, [4,4], 2, **kwargs)
        x  = AddConv2D(x,   128, [4,4], 2, **kwargs)
        x  = AddConv2D(x,   256, [4,4], 2, **kwargs)

        if self.use_wasserstein:
            x = Flatten()(x)
            x = AddDense(x, 1, "linear", 0., output=True, bn=False, perm_drop=True)
        else:
            x = AddConv2D(x, 1, [1,1], 1, 0., "same", activation="sigmoid",
                bn=False, perm_drop=True)
            x = GlobalAveragePooling2D()(x)

        discrim = Model(ins, x, name="image_discriminator")
        self.lr *= 2.
        discrim.compile(loss=loss, loss_weights=[1.],
                optimizer=self.getOptimizer())
        self.lr *= 0.5
        self.image_discriminator = discrim
        return discrim 
Example #16
Source File: Densenet.py    From CNNArt with Apache License 2.0 4 votes vote down vote up
def DenseNet(nb_classes, img_dim, depth, nb_dense_block, growth_rate,
             nb_filter, dropout_rate=None, weight_decay=1E-4):
    """ Build the DenseNet model
    :param nb_classes: int -- number of classes
    :param img_dim: tuple -- (channels, rows, columns)
    :param depth: int -- how many layers
    :param nb_dense_block: int -- number of dense blocks to add to end
    :param growth_rate: int -- number of filters to add
    :param nb_filter: int -- number of filters
    :param dropout_rate: float -- dropout rate
    :param weight_decay: float -- weight decay
    :returns: keras model with nb_layers of conv_factory appended
    :rtype: keras model
    """

    model_input = Input(shape=img_dim)

    assert (depth - 4) % 3 == 0, "Depth must be 3 N + 4"

    # layers in each dense block
    nb_layers = int((depth - 4) / 3)

    # Initial convolution
    x = Convolution2D(nb_filter, 3, 3,
                      init="he_uniform",
                      border_mode="same",
                      name="initial_conv2D",
                      bias=False,
                      W_regularizer=l2(weight_decay))(model_input)

    # Add dense blocks
    for block_idx in range(nb_dense_block - 1):
        x, nb_filter = denseblock(x, nb_layers, nb_filter, growth_rate,
                                  dropout_rate=dropout_rate,
                                  weight_decay=weight_decay)
        # add transition
        x = transition(x, nb_filter, dropout_rate=dropout_rate,
                       weight_decay=weight_decay)

    # The last denseblock does not have a transition
    x, nb_filter = denseblock(x, nb_layers, nb_filter, growth_rate,
                              dropout_rate=dropout_rate,
                              weight_decay=weight_decay)


    x = Activation('relu')(x)
    x = GlobalAveragePooling2D(dim_ordering="th")(x)
    x = Dense(nb_classes,
              activation='softmax',
              W_regularizer=l2(weight_decay),
              b_regularizer=l2(weight_decay))(x)

    densenet = Model(input=[model_input], output=[x], name="DenseNet")

    return densenet 
Example #17
Source File: Densenet.py    From CNNArt with Apache License 2.0 4 votes vote down vote up
def DenseNet(nb_classes, img_dim, depth, nb_dense_block, growth_rate,
             nb_filter, dropout_rate=None, weight_decay=1E-4):
    """ Build the DenseNet model
    :param nb_classes: int -- number of classes
    :param img_dim: tuple -- (channels, rows, columns)
    :param depth: int -- how many layers
    :param nb_dense_block: int -- number of dense blocks to add to end
    :param growth_rate: int -- number of filters to add
    :param nb_filter: int -- number of filters
    :param dropout_rate: float -- dropout rate
    :param weight_decay: float -- weight decay
    :returns: keras model with nb_layers of conv_factory appended
    :rtype: keras model
    """

    model_input = Input(shape=img_dim)

    assert (depth - 4) % 3 == 0, "Depth must be 3 N + 4"

    # layers in each dense block
    nb_layers = int((depth - 4) / 3)

    # Initial convolution
    x = Convolution2D(nb_filter, 3, 3,
                      init="he_uniform",
                      border_mode="same",
                      name="initial_conv2D",
                      bias=False,
                      W_regularizer=l2(weight_decay))(model_input)

    # Add dense blocks
    for block_idx in range(nb_dense_block - 1):
        x, nb_filter = denseblock(x, nb_layers, nb_filter, growth_rate,
                                  dropout_rate=dropout_rate,
                                  weight_decay=weight_decay)
        # add transition
        x = transition(x, nb_filter, dropout_rate=dropout_rate,
                       weight_decay=weight_decay)

    # The last denseblock does not have a transition
    x, nb_filter = denseblock(x, nb_layers, nb_filter, growth_rate,
                              dropout_rate=dropout_rate,
                              weight_decay=weight_decay)


    x = Activation('relu')(x)
    x = GlobalAveragePooling2D(dim_ordering="th")(x)
    x = Dense(nb_classes,
              activation='softmax',
              W_regularizer=l2(weight_decay),
              b_regularizer=l2(weight_decay))(x)

    densenet = Model(input=[model_input], output=[x], name="DenseNet")

    return densenet 
Example #18
Source File: models.py    From music-auto_tagging-keras with MIT License 4 votes vote down vote up
def raw_vgg(args, input_length=12000 * 29, tf='melgram', normalize=None,
            decibel=False, last_layer=True, sr=None):
    ''' when length = 12000*29 and 512/256 dft/hop, 
    melgram size: (n_mels, 1360)
    '''
    assert tf in ('stft', 'melgram')
    assert normalize in (None, False, 'no', 0, 0.0, 'batch', 'data_sample', 'time', 'freq', 'channel')
    assert isinstance(decibel, bool)

    if sr is None:
        sr = SR  # assumes 12000

    conv_until = args.conv_until
    trainable_kernel = args.trainable_kernel
    model = Sequential()
    # decode args
    fmin = args.fmin
    fmax = args.fmax
    if fmax == 0.0:
        fmax = sr / 2
    n_mels = args.n_mels
    trainable_fb = args.trainable_fb
    model.add(Melspectrogram(n_dft=512, n_hop=256, power_melgram=2.0,
                             input_shape=(1, input_length),
                             trainable_kernel=trainable_kernel,
                             trainable_fb=trainable_fb,
                             return_decibel_melgram=decibel,
                             sr=sr, n_mels=n_mels,
                             fmin=fmin, fmax=fmax,
                             name='melgram'))

    poolings = [(2, 4), (3, 4), (2, 5), (2, 4), (4, 4)]

    if normalize in ('batch', 'data_sample', 'time', 'freq', 'channel'):
        model.add(Normalization2D(normalize))
    model.add(get_convBNeluMPdrop(5, [32, 32, 32, 32, 32],
                                  [(3, 3), (3, 3), (3, 3), (3, 3), (3, 3)],
                                  poolings, model.output_shape[1:], conv_until=conv_until))
    if conv_until != 4:
        model.add(GlobalAveragePooling2D())
    else:
        model.add(Flatten())

    if last_layer:
        model.add(Dense(50, activation='sigmoid'))
    return model 
Example #19
Source File: wide_resnet.py    From keras-contrib with MIT License 4 votes vote down vote up
def __create_wide_residual_network(nb_classes, img_input, include_top, depth=28,
                                   width=8, dropout=0.0, activation='softmax'):
    ''' Creates a Wide Residual Network with specified parameters

    Args:
        nb_classes: Number of output classes
        img_input: Input tensor or layer
        include_top: Flag to include the last dense layer
        depth: Depth of the network. Compute N = (n - 4) / 6.
               For a depth of 16, n = 16, N = (16 - 4) / 6 = 2
               For a depth of 28, n = 28, N = (28 - 4) / 6 = 4
               For a depth of 40, n = 40, N = (40 - 4) / 6 = 6
        width: Width of the network.
        dropout: Adds dropout if value is greater than 0.0

    Returns:a Keras Model
    '''

    N = (depth - 4) // 6

    x = __conv1_block(img_input)
    nb_conv = 4

    for i in range(N):
        x = __conv2_block(x, width, dropout)
        nb_conv += 2

    x = MaxPooling2D((2, 2))(x)

    for i in range(N):
        x = __conv3_block(x, width, dropout)
        nb_conv += 2

    x = MaxPooling2D((2, 2))(x)

    for i in range(N):
        x = ___conv4_block(x, width, dropout)
        nb_conv += 2

    if include_top:
        x = GlobalAveragePooling2D()(x)
        x = Dense(nb_classes, activation=activation)(x)

    return x 
Example #20
Source File: DenseNet.py    From DenseNet-Cifar10 with MIT License 4 votes vote down vote up
def createDenseNet(nb_classes, img_dim, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=16, dropout_rate=None,
                     weight_decay=1E-4, verbose=True):
    ''' Build the create_dense_net model
    Args:
        nb_classes: number of classes
        img_dim: tuple of shape (channels, rows, columns) or (rows, columns, channels)
        depth: number or layers
        nb_dense_block: number of dense blocks to add to end
        growth_rate: number of filters to add
        nb_filter: number of filters
        dropout_rate: dropout rate
        weight_decay: weight decay
    Returns: keras tensor with nb_layers of conv_block appended
    '''

    model_input = Input(shape=img_dim)

    concat_axis = 1 if K.image_dim_ordering() == "th" else -1

    assert (depth - 4) % 3 == 0, "Depth must be 3 N + 4"

    # layers in each dense block
    nb_layers = int((depth - 4) / 3)

    # Initial convolution
    x = Convolution2D(nb_filter, (3, 3), kernel_initializer="he_uniform", padding="same", name="initial_conv2D", use_bias=False,
                      kernel_regularizer=l2(weight_decay))(model_input)

    x = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay),
                            beta_regularizer=l2(weight_decay))(x)

    # Add dense blocks
    for block_idx in range(nb_dense_block - 1):
        x, nb_filter = dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=dropout_rate,
                                   weight_decay=weight_decay)
        # add transition_block
        x = transition_block(x, nb_filter, dropout_rate=dropout_rate, weight_decay=weight_decay)

    # The last dense_block does not have a transition_block
    x, nb_filter = dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=dropout_rate,
                               weight_decay=weight_decay)

    x = Activation('relu')(x)
    x = GlobalAveragePooling2D()(x)
    x = Dense(nb_classes, activation='softmax', kernel_regularizer=l2(weight_decay), bias_regularizer=l2(weight_decay))(x)

    densenet = Model(inputs=model_input, outputs=x)

    if verbose: 
        print("DenseNet-%d-%d created." % (depth, growth_rate))

    return densenet