Python keras.layers.GlobalMaxPooling2D() Examples

The following are 17 code examples of keras.layers.GlobalMaxPooling2D(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source File: PredictOneClassifier.py    From kaggle-rsna18 with MIT License 5 votes vote down vote up
def get_model(base_model, 
              layer, 
              lr=1e-3, 
              input_shape=(224,224,1), 
              classes=2,
              activation="softmax",
              dropout=None, 
              pooling="avg", 
              weights=None,
              pretrained=None): 
    base = base_model(input_shape=input_shape,
                      include_top=False,
                      weights=pretrained, 
                      channels="gray") 
    if pooling == "avg": 
        x = GlobalAveragePooling2D()(base.output) 
    elif pooling == "max": 
        x = GlobalMaxPooling2D()(base.output) 
    elif pooling is None: 
        x = Flatten()(base.output) 
    if dropout is not None: 
        x = Dropout(dropout)(x) 
    x = Dense(classes, activation=activation)(x) 
    model = Model(inputs=base.input, outputs=x) 
    if weights is not None: 
        model.load_weights(weights) 
    for l in model.layers[:layer]:
        l.trainable = False 
    model.compile(loss="binary_crossentropy", metrics=["accuracy"], 
                  optimizer=optimizers.Adam(lr)) 
    return model

##########
## DATA ##
##########

# == PREPROCESSING == # 
Example #2
Source File: test_views.py    From Fabrik with GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        # Global Pooling 1D
        model = Sequential()
        model.add(GlobalMaxPooling1D(input_shape=(16, 1)))
        model.build()
        self.keras_param_test(model, 0, 5)
        # Global Pooling 2D
        model = Sequential()
        model.add(GlobalMaxPooling2D(input_shape=(16, 16, 1)))
        model.build()
        self.keras_param_test(model, 0, 8)
        # Pooling 1D
        model = Sequential()
        model.add(MaxPooling1D(pool_size=2, strides=2, padding='same', input_shape=(16, 1)))
        model.build()
        self.keras_param_test(model, 0, 5)
        # Pooling 2D
        model = Sequential()
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', input_shape=(16, 16, 1)))
        model.build()
        self.keras_param_test(model, 0, 8)
        # Pooling 3D
        model = Sequential()
        model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='same',
                               input_shape=(16, 16, 16, 1)))
        model.build()
        self.keras_param_test(model, 0, 11)


# ********** Locally-connected Layers ********** 
Example #3
Source File: layers.py    From deephar with MIT License 5 votes vote down vote up
def global_max_min_pooling(x, name=None):
    if 'global_max_min_pool_cnt' not in globals():
        global global_max_min_pool_cnt
        global_max_min_pool_cnt = 0

    if name is None:
        name = 'GlobalMaxMinPooling2D_%d' % global_max_min_pool_cnt
        global_max_min_pool_cnt += 1

    def _global_max_plus_min(x):
        x1 = GlobalMaxPooling2D()(x)
        x2 = GlobalMaxPooling2D()(-x)
        return x1 - x2

    return Lambda(_global_max_plus_min, name=name)(x) 
Example #4
Source File: layers.py    From deephar with MIT License 5 votes vote down vote up
def keypoint_confidence(x, name=None):
    """Implements the keypoint (body joint) confidence, given a set of
    probability maps as input. No parameters required.
    """
    def _keypoint_confidence(x):
        x = 4 * AveragePooling2D((2, 2), strides=(1, 1))(x)
        x = K.expand_dims(GlobalMaxPooling2D()(x), axis=-1)

        return x

    f = Lambda(_keypoint_confidence, name=name)

    return TimeDistributed(f, name=name)(x) if K.ndim(x) == 5 else f(x) 
Example #5
Source File: test_tf_keras_layers.py    From tf-coreml with Apache License 2.0 5 votes vote down vote up
def test_global_max_pooling(self):
      model = Sequential()
      model.add(GlobalMaxPooling2D(input_shape=(16, 16, 3)))
      self._test_keras_model(model, has_variables = False) 
Example #6
Source File: PredictClassifierEnsemble.py    From kaggle-rsna18 with MIT License 5 votes vote down vote up
def get_model(base_model, 
              layer, 
              lr=1e-3, 
              input_shape=(224,224,1), 
              classes=2,
              activation="softmax",
              dropout=None, 
              pooling="avg", 
              weights=None,
              pretrained=None): 
    base = base_model(input_shape=input_shape,
                      include_top=False,
                      weights=pretrained, 
                      channels="gray") 
    if pooling == "avg": 
        x = GlobalAveragePooling2D()(base.output) 
    elif pooling == "max": 
        x = GlobalMaxPooling2D()(base.output) 
    elif pooling is None: 
        x = Flatten()(base.output) 
    if dropout is not None: 
        x = Dropout(dropout)(x) 
    x = Dense(classes, activation=activation)(x) 
    model = Model(inputs=base.input, outputs=x) 
    if weights is not None: 
        model.load_weights(weights) 
    for l in model.layers[:layer]:
        l.trainable = False 
    model.compile(loss="binary_crossentropy", metrics=["accuracy"], 
                  optimizer=optimizers.Adam(lr)) 
    return model

##########
## DATA ##
##########

# == PREPROCESSING == # 
Example #7
Source File: TrainOneClassifier.py    From kaggle-rsna18 with MIT License 5 votes vote down vote up
def get_model(base_model, 
              layer, 
              lr=1e-3, 
              input_shape=(224,224,1), 
              classes=2,
              activation="softmax",
              dropout=None, 
              pooling="avg", 
              weights=None,
              pretrained="imagenet"): 
    base = base_model(input_shape=input_shape,
                      include_top=False,
                      weights=pretrained, 
                      channels="gray") 
    if pooling == "avg": 
        x = GlobalAveragePooling2D()(base.output) 
    elif pooling == "max": 
        x = GlobalMaxPooling2D()(base.output) 
    elif pooling is None: 
        x = Flatten()(base.output) 
    if dropout is not None: 
        x = Dropout(dropout)(x) 
    x = Dense(classes, activation=activation)(x) 
    model = Model(inputs=base.input, outputs=x) 
    if weights is not None: 
        model.load_weights(weights) 
    for l in model.layers[:layer]:
        l.trainable = False 
    model.compile(loss="binary_crossentropy", metrics=["accuracy"], 
                  optimizer=optimizers.Adam(lr)) 
    return model

##########
## DATA ##
##########

# == PREPROCESSING == # 
Example #8
Source File: TrainClassifierEnsemble.py    From kaggle-rsna18 with MIT License 5 votes vote down vote up
def get_model(base_model, 
              layer, 
              lr=1e-3, 
              input_shape=(224,224,1), 
              classes=2,
              activation="softmax",
              dropout=None, 
              pooling="avg", 
              weights=None,
              pretrained="imagenet"): 
    base = base_model(input_shape=input_shape,
                      include_top=False,
                      weights=pretrained, 
                      channels="gray") 
    if pooling == "avg": 
        x = GlobalAveragePooling2D()(base.output) 
    elif pooling == "max": 
        x = GlobalMaxPooling2D()(base.output) 
    elif pooling is None: 
        x = Flatten()(base.output) 
    if dropout is not None: 
        x = Dropout(dropout)(x) 
    x = Dense(classes, activation=activation)(x) 
    model = Model(inputs=base.input, outputs=x) 
    if weights is not None: 
        model.load_weights(weights) 
    for l in model.layers[:layer]:
        l.trainable = False 
    model.compile(loss="binary_crossentropy", metrics=["accuracy"], 
                  optimizer=optimizers.Adam(lr)) 
    return model

##########
## DATA ##
##########

# == PREPROCESSING == # 
Example #9
Source File: attention_module-checkpoint.py    From CBAM-keras with MIT License 5 votes vote down vote up
def channel_attention(input_feature, ratio=8):
	
	channel_axis = 1 if K.image_data_format() == "channels_first" else -1
	channel = input_feature._keras_shape[channel_axis]
	
	shared_layer_one = Dense(channel//ratio,
							 activation='relu',
							 kernel_initializer='he_normal',
							 use_bias=True,
							 bias_initializer='zeros')
	shared_layer_two = Dense(channel,
							 kernel_initializer='he_normal',
							 use_bias=True,
							 bias_initializer='zeros')
	
	avg_pool = GlobalAveragePooling2D()(input_feature)    
	avg_pool = Reshape((1,1,channel))(avg_pool)
	assert avg_pool._keras_shape[1:] == (1,1,channel)
	avg_pool = shared_layer_one(avg_pool)
	assert avg_pool._keras_shape[1:] == (1,1,channel//ratio)
	avg_pool = shared_layer_two(avg_pool)
	assert avg_pool._keras_shape[1:] == (1,1,channel)
	
	max_pool = GlobalMaxPooling2D()(input_feature)
	max_pool = Reshape((1,1,channel))(max_pool)
	assert max_pool._keras_shape[1:] == (1,1,channel)
	max_pool = shared_layer_one(max_pool)
	assert max_pool._keras_shape[1:] == (1,1,channel//ratio)
	max_pool = shared_layer_two(max_pool)
	assert max_pool._keras_shape[1:] == (1,1,channel)
	
	cbam_feature = Add()([avg_pool,max_pool])
	cbam_feature = Activation('sigmoid')(cbam_feature)
	
	if K.image_data_format() == "channels_first":
		cbam_feature = Permute((3, 1, 2))(cbam_feature)
	
	return multiply([input_feature, cbam_feature]) 
Example #10
Source File: attention_module.py    From CBAM-keras with MIT License 5 votes vote down vote up
def channel_attention(input_feature, ratio=8):
	
	channel_axis = 1 if K.image_data_format() == "channels_first" else -1
	channel = input_feature._keras_shape[channel_axis]
	
	shared_layer_one = Dense(channel//ratio,
							 activation='relu',
							 kernel_initializer='he_normal',
							 use_bias=True,
							 bias_initializer='zeros')
	shared_layer_two = Dense(channel,
							 kernel_initializer='he_normal',
							 use_bias=True,
							 bias_initializer='zeros')
	
	avg_pool = GlobalAveragePooling2D()(input_feature)    
	avg_pool = Reshape((1,1,channel))(avg_pool)
	assert avg_pool._keras_shape[1:] == (1,1,channel)
	avg_pool = shared_layer_one(avg_pool)
	assert avg_pool._keras_shape[1:] == (1,1,channel//ratio)
	avg_pool = shared_layer_two(avg_pool)
	assert avg_pool._keras_shape[1:] == (1,1,channel)
	
	max_pool = GlobalMaxPooling2D()(input_feature)
	max_pool = Reshape((1,1,channel))(max_pool)
	assert max_pool._keras_shape[1:] == (1,1,channel)
	max_pool = shared_layer_one(max_pool)
	assert max_pool._keras_shape[1:] == (1,1,channel//ratio)
	max_pool = shared_layer_two(max_pool)
	assert max_pool._keras_shape[1:] == (1,1,channel)
	
	cbam_feature = Add()([avg_pool,max_pool])
	cbam_feature = Activation('sigmoid')(cbam_feature)
	
	if K.image_data_format() == "channels_first":
		cbam_feature = Permute((3, 1, 2))(cbam_feature)
	
	return multiply([input_feature, cbam_feature]) 
Example #11
Source File: dl_image_toolbox_utils.py    From dataiku-contrib with Apache License 2.0 4 votes vote down vote up
def enrich_model(base_model, pooling, dropout, reg, n_classes, params, verbose):

    # Init params if not done before
    params = {} if params is None else params
    
    # Loading appropriate params
    params["pooling"] = select_param("pooling", pooling, params)
    params["n_classes"] = select_param("n_classes", n_classes, params)

    x = base_model.layers[-1].output
        
    if params["pooling"] == 'None' :
        x = Flatten()(x)
    elif params["pooling"] == 'avg' :
        x = GlobalAveragePooling2D()(x)
    elif params["pooling"] == 'max' :
        x = GlobalMaxPooling2D()(x)

    if dropout is not None and dropout != 0.0 :
        x = Dropout(dropout)(x)
        if verbose:
            print("Adding dropout to model with rate: {}".format(dropout))

    regularizer = None
    if reg is not None:
        reg_l2 = reg["l2"]
        reg_l1 = reg["l1"]
        if (reg_l1 != 0.0) and (reg_l2 != 0.0) :
            regularizer = regularizers.l1_l2(l1=reg_l1, l2=reg_l2)
        if (reg_l1 == 0.0) and (reg_l2 != 0.0) :
            regularizer = regularizers.l2(reg_l2)
        if (reg_l1 != 0.0) and (reg_l2 == 0.0) :
            regularizer = regularizers.l1(reg_l1)
        if verbose:
            print("Using regularizer for model: {}".format(reg))
    
    predictions = Dense(params["n_classes"], activation='softmax', name='predictions', kernel_regularizer=regularizer)(x)
    model = Model(input=base_model.input, output=predictions)

    return model, params

###################################################################################################################
## GPU
################################################################################################################### 
Example #12
Source File: nasnet.py    From Keras-NASNet with MIT License 4 votes vote down vote up
def _add_auxilary_head(x, classes, weight_decay, pooling, include_top):
    '''Adds an auxilary head for training the model

    From section A.7 "Training of ImageNet models" of the paper, all NASNet models are
    trained using an auxilary classifier around 2/3 of the depth of the network, with
    a loss weight of 0.4

    # Arguments
        x: input tensor
        classes: number of output classes
        weight_decay: l2 regularization weight

    # Returns
        a keras Tensor
    '''
    weights = load_auxilary_branch()

    img_height = 1 if K.image_data_format() == 'channels_last' else 2
    img_width = 2 if K.image_data_format() == 'channels_last' else 3
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    with K.name_scope('auxilary_branch'):
        auxilary_x = Activation('relu')(x)
        auxilary_x = AveragePooling2D((5, 5), strides=(3, 3), padding='valid', name='aux_pool')(auxilary_x)
        auxilary_x = Conv2D(128, (1, 1), padding='same', use_bias=False, name='aux_conv_projection',
                            kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay),
                            weights=[weights['conv1']])(auxilary_x)
        auxilary_x = BatchNormalization(axis=channel_axis, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
                                        name='aux_bn_projection',
                                        weights=weights['bn1'])(auxilary_x)
        auxilary_x = Activation('relu')(auxilary_x)

        auxilary_x = Conv2D(768, (auxilary_x._keras_shape[img_height], auxilary_x._keras_shape[img_width]),
                            padding='valid', use_bias=False, kernel_initializer='he_normal',
                            kernel_regularizer=l2(weight_decay), name='aux_conv_reduction',
                            weights=[weights['conv2']])(auxilary_x)
        auxilary_x = BatchNormalization(axis=channel_axis, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
                                        name='aux_bn_reduction',
                                        weights=weights['bn2'])(auxilary_x)
        auxilary_x = Activation('relu')(auxilary_x)

        if include_top:
            auxilary_x = GlobalAveragePooling2D()(auxilary_x)
            auxilary_x = Dense(classes, activation='softmax', kernel_regularizer=l2(weight_decay),
                                name='aux_predictions', weights=weights['fc'])(auxilary_x)
        else:
            if pooling == 'avg':
                auxilary_x = GlobalAveragePooling2D()(auxilary_x)
            elif pooling == 'max':
                auxilary_x = GlobalMaxPooling2D()(auxilary_x)

    return auxilary_x 
Example #13
Source File: nasnet.py    From Keras-NASNet with MIT License 4 votes vote down vote up
def _add_auxiliary_head(x, classes, weight_decay, pooling, include_top):
    '''Adds an auxiliary head for training the model

    From section A.7 "Training of ImageNet models" of the paper, all NASNet models are
    trained using an auxiliary classifier around 2/3 of the depth of the network, with
    a loss weight of 0.4

    # Arguments
        x: input tensor
        classes: number of output classes
        weight_decay: l2 regularization weight

    # Returns
        a keras Tensor
    '''
    img_height = 1 if K.image_data_format() == 'channels_last' else 2
    img_width = 2 if K.image_data_format() == 'channels_last' else 3
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    with K.name_scope('auxiliary_branch'):
        auxiliary_x = Activation('relu')(x)
        auxiliary_x = AveragePooling2D((5, 5), strides=(3, 3), padding='valid', name='aux_pool')(auxiliary_x)
        auxiliary_x = Conv2D(128, (1, 1), padding='same', use_bias=False, name='aux_conv_projection',
                            kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(auxiliary_x)
        auxiliary_x = BatchNormalization(axis=channel_axis, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
                                        name='aux_bn_projection')(auxiliary_x)
        auxiliary_x = Activation('relu')(auxiliary_x)

        auxiliary_x = Conv2D(768, (auxiliary_x._keras_shape[img_height], auxiliary_x._keras_shape[img_width]),
                            padding='valid', use_bias=False, kernel_initializer='he_normal',
                            kernel_regularizer=l2(weight_decay), name='aux_conv_reduction')(auxiliary_x)
        auxiliary_x = BatchNormalization(axis=channel_axis, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
                                        name='aux_bn_reduction')(auxiliary_x)
        auxiliary_x = Activation('relu')(auxiliary_x)

        if include_top:
            auxiliary_x = Flatten()(auxiliary_x)
            auxiliary_x = Dense(classes, activation='softmax', kernel_regularizer=l2(weight_decay),
                                name='aux_predictions')(auxiliary_x)
        else:
            if pooling == 'avg':
                auxiliary_x = GlobalAveragePooling2D()(auxiliary_x)
            elif pooling == 'max':
                auxiliary_x = GlobalMaxPooling2D()(auxiliary_x)

    return auxiliary_x 
Example #14
Source File: hypertree_model.py    From costar_plan with Apache License 2.0 4 votes vote down vote up
def classifier_block(input_tensor, include_top=True, top='classification',
                     classes=1, activation='sigmoid',
                     input_shape=None, final_pooling=None, name='', verbose=1):
    """ Performs the final Activation for the classification of a given problem.

    # Arguments

        include_top: Whether to include the fully-connected
            layer at the top of the network. Also maps to require_flatten
            option in `keras.applications.imagenet_utils._obtain_input_shape()`.
    """
    x = input_tensor
    if include_top and top == 'classification':
        if verbose:
            print("    classification of x: " + str(x))
        x = Dense(units=classes, activation=activation,
                  kernel_initializer="he_normal", name=name + 'fc' + str(classes))(x)

    elif include_top and top == 'segmentation':
        if verbose > 0:
            print("    segmentation of x: " + str(x))
        x = Conv2D(classes, (1, 1), activation='linear', padding='same')(x)

        if K.image_data_format() == 'channels_first':
            channel, row, col = input_shape
        else:
            row, col, channel = input_shape

        x = Reshape((row * col, classes))(x)
        x = Activation(activation)(x)
        x = Reshape((row, col, classes))(x)
    elif include_top and top == 'quaternion':
        x = Dense(units=classes, activation='linear',
                  kernel_initializer="he_normal", name=name + 'fc' + str(classes))(x)
        # normalize the output so we have a unit quaternion
        x = Lambda(lambda x: K.l2_normalize(x, axis=1))(x)
    elif final_pooling == 'avg':
        if verbose:
            print("    GlobalAveragePooling2D")
        x = GlobalAveragePooling2D()(x)

    elif final_pooling == 'max':
        if verbose:
            print("    GlobalMaxPooling2D")
        x = GlobalMaxPooling2D()(x)
    else:
        raise ValueError('hypertree_model.py::classifier_block() unsupported top: ' + str(top))
    return x 
Example #15
Source File: model.py    From DeepLearn with MIT License 4 votes vote down vote up
def cnn(embedding_matrix, dimx=50, dimy=50, nb_filter = 120, 
        embedding_dim = 50,filter_length = (50,4), vocab_size = 8000, depth = 1):

    print 'Model Uses Basic CNN......'
    
    inpx = Input(shape=(dimx,),dtype='int32',name='inpx')   
    inpy = Input(shape=(dimy,),dtype='int32',name='inpy')
    
    x = word2vec_embedding_layer(embedding_matrix,train=False)(inpx)
    y = word2vec_embedding_layer(embedding_matrix,train=False)(inpy)
    
    x = Permute((2,1))(x)
    y = Permute((2,1))(y)

    conv1 = Reshape((embedding_dim,dimx,1))(x)
    conv2 = Reshape((embedding_dim,dimy,1))(y)   
       
    channel_1, channel_2 = [], []
    
    for dep in range(depth):
        
        #conv1 = ZeroPadding2D((filter_width - 1, 0))(conv1)
        #conv2 = ZeroPadding2D((filter_width - 1, 0))(conv2)
        

        ques = Conv2D(nb_filter=nb_filter, kernel_size = filter_length, activation='relu',
                data_format = 'channels_last',border_mode="valid")(conv1)
        ans = Conv2D(nb_filter, kernel_size = filter_length, activation='relu',
                data_format="channels_last",border_mode="valid")(conv2)
                    
            
        #conv1 = GlobalMaxPooling2D()(ques)
        #conv2 = GlobalMaxPooling2D()(ans)
        #conv1 = MaxPooling2D()(ques)
        #conv2 = MaxPooling2D()(ans)
        
        channel_1.append(GlobalMaxPooling2D()(ques))
        channel_2.append(GlobalMaxPooling2D()(ans))
        
        #channel_1.append(GlobalAveragePooling2D()(ques))
        #channel_2.append(GlobalAveragePooling2D()(ans))
    
    h1 = channel_1.pop(-1)
    if channel_1:
        h1 = merge([h1] + channel_1, mode="concat")

    h2 = channel_2.pop(-1)
    if channel_2:
        h2 = merge([h2] + channel_2, mode="concat")
    
    h =  Merge(mode="concat",name='h')([h1, h2])
    #h = Dropout(0.2)(h)
    #h = Dense(50, kernel_regularizer=regularizers.l2(reg2),activation='relu')(h)
    #wrap = Dropout(0.5)(h)
    #wrap = Dense(64, activation='tanh')(h)   
    
    score = Dense(2,activation='softmax',name='score')(h)
    model = Model([inpx, inpy],[score])
    model.compile( loss='categorical_crossentropy',optimizer='adam')
    
    return model 
Example #16
Source File: model.py    From DeepLearn with MIT License 4 votes vote down vote up
def cnn(embedding_matrix, dimx=50, dimy=50, nb_filter = 120, 
        embedding_dim = 50,filter_length = (50,4), vocab_size = 8000, depth = 1):

    print 'Model Uses Basic CNN......'
    
    inpx = Input(shape=(dimx,),dtype='int32',name='inpx')   
    inpy = Input(shape=(dimy,),dtype='int32',name='inpy')
    
    x = word2vec_embedding_layer(embedding_matrix,train=False)(inpx)
    y = word2vec_embedding_layer(embedding_matrix,train=False)(inpy)
    
    x = Permute((2,1))(x)
    y = Permute((2,1))(y)

    conv1 = Reshape((embedding_dim,dimx,1))(x)
    conv2 = Reshape((embedding_dim,dimy,1))(y)   
       
    channel_1, channel_2 = [], []
    
    for dep in range(depth):
        
        #conv1 = ZeroPadding2D((filter_width - 1, 0))(conv1)
        #conv2 = ZeroPadding2D((filter_width - 1, 0))(conv2)
        

        ques = Conv2D(nb_filter=nb_filter, kernel_size = filter_length, activation='relu',
                data_format = 'channels_last',border_mode="valid")(conv1)
        ans = Conv2D(nb_filter, kernel_size = filter_length, activation='relu',
                data_format="channels_last",border_mode="valid")(conv2)
                    
            
        #conv1 = GlobalMaxPooling2D()(ques)
        #conv2 = GlobalMaxPooling2D()(ans)
        #conv1 = MaxPooling2D()(ques)
        #conv2 = MaxPooling2D()(ans)
        
        channel_1.append(GlobalMaxPooling2D()(ques))
        channel_2.append(GlobalMaxPooling2D()(ans))
        
        #channel_1.append(GlobalAveragePooling2D()(ques))
        #channel_2.append(GlobalAveragePooling2D()(ans))
    
    h1 = channel_1.pop(-1)
    if channel_1:
        h1 = merge([h1] + channel_1, mode="concat")

    h2 = channel_2.pop(-1)
    if channel_2:
        h2 = merge([h2] + channel_2, mode="concat")
    
    h =  Merge(mode="concat",name='h')([h1, h2])
    #h = Dropout(0.2)(h)
    #h = Dense(50, kernel_regularizer=regularizers.l2(reg2),activation='relu')(h)
    #wrap = Dropout(0.5)(h)
    #wrap = Dense(64, activation='tanh')(h)   
    
    score = Dense(2,activation='softmax',name='score')(h)
    model = Model([inpx, inpy],[score])
    model.compile( loss='categorical_crossentropy',optimizer='adam')
    
    return model 
Example #17
Source File: dl_image_toolbox_utils.py    From dataiku-contrib with Apache License 2.0 4 votes vote down vote up
def enrich_model(base_model, pooling, dropout, reg, n_classes, params, verbose):

    # Init params if not done before
    params = {} if params is None else params
    
    # Loading appropriate params
    params["pooling"] = select_param("pooling", pooling, params)
    params["n_classes"] = select_param("n_classes", n_classes, params)

    x = base_model.layers[-1].output
        
    if params["pooling"] == 'None' :
        x = Flatten()(x)
    elif params["pooling"] == 'avg' :
        x = GlobalAveragePooling2D()(x)
    elif params["pooling"] == 'max' :
        x = GlobalMaxPooling2D()(x)

    if dropout is not None and dropout != 0.0 :
        x = Dropout(dropout)(x)
        if verbose:
            print("Adding dropout to model with rate: {}".format(dropout))

    regularizer = None
    if reg is not None:
        reg_l2 = reg["l2"]
        reg_l1 = reg["l1"]
        if (reg_l1 != 0.0) and (reg_l2 != 0.0) :
            regularizer = regularizers.l1_l2(l1=reg_l1, l2=reg_l2)
        if (reg_l1 == 0.0) and (reg_l2 != 0.0) :
            regularizer = regularizers.l2(reg_l2)
        if (reg_l1 != 0.0) and (reg_l2 == 0.0) :
            regularizer = regularizers.l1(reg_l1)
        if verbose:
            print("Using regularizer for model: {}".format(reg))
    
    predictions = Dense(params["n_classes"], activation='softmax', name='predictions', kernel_regularizer=regularizer)(x)
    model = Model(input=base_model.input, output=predictions)

    return model, params

###################################################################################################################
## GPU
###################################################################################################################