Python keras.backend.spatial_2d_padding() Examples

The following are 7 code examples of keras.backend.spatial_2d_padding(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.backend , or try the search function .
Example #1
Source File: customlayers.py    From convnets-keras with MIT License 6 votes vote down vote up
def crosschannelnormalization(alpha=1e-4, k=2, beta=0.75, n=5, **kwargs):
    """
    This is the function used for cross channel normalization in the original
    Alexnet
    """

    def f(X):
        b, ch, r, c = X.shape
        half = n // 2
        square = K.square(X)
        extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0, 2, 3, 1))
                                              , (0, half))
        extra_channels = K.permute_dimensions(extra_channels, (0, 3, 1, 2))
        scale = k
        for i in range(n):
            scale += alpha * extra_channels[:, i:i + ch, :, :]
        scale = scale ** beta
        return X / scale

    return Lambda(f, output_shape=lambda input_shape: input_shape, **kwargs) 
Example #2
Source File: customlayers.py    From deep-mil-for-whole-mammogram-classification with MIT License 6 votes vote down vote up
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs):
    """
    This is the function used for cross channel normalization in the original
    Alexnet
    """
    def f(X):
        b, ch, r, c = X.shape
        half = n // 2
        square = K.square(X)
        extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1))
                                              , (0,half))
        extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2))
        scale = k
        for i in range(n):
            scale += alpha * extra_channels[:,i:i+ch,:,:]
        scale = scale ** beta
        return X / scale

    return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs) 
Example #3
Source File: customlayers.py    From deep-mil-for-whole-mammogram-classification with MIT License 6 votes vote down vote up
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs):
    """
    This is the function used for cross channel normalization in the original
    Alexnet
    """
    def f(X):
        b, ch, r, c = X.shape
        half = n // 2
        square = K.square(X)
        extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1))
                                              , (0,half))
        extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2))
        scale = k
        for i in range(n):
            scale += alpha * extra_channels[:,i:i+ch,:,:]
        scale = scale ** beta
        return X / scale

    return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs) 
Example #4
Source File: customlayers.py    From cnn_evaluation_smoke with GNU General Public License v3.0 6 votes vote down vote up
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs):
    """
    This is the function used for cross channel normalization in the original
    Alexnet
    """
    def f(X):
        b, ch, r, c = X.shape
        half = n // 2
        square = K.square(X)
        extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1)))
        extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2))
        scale = k
        for i in range(n):
            scale += alpha * extra_channels[:,i:i+ch,:,:]
        scale = scale ** beta
        return X / scale

    return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs) 
Example #5
Source File: model.py    From 2018DSB with MIT License 6 votes vote down vote up
def inst_weight(output_y, output_x, output_dr, output_dl, config=None):
    dy = output_y[:,2:,2:]-output_y[:, :-2,2:] + \
         2*(output_y[:,2:,1:-1]- output_y[:,:-2,1:-1]) + \
         output_y[:,2:,:-2]-output_y[:,:-2,:-2]
    dx = output_x[:,2:,2:]- output_x[:,2:,:-2] + \
         2*( output_x[:,1:-1,2:]- output_x[:,1:-1,:-2]) +\
         output_x[:,:-2,2:]- output_x[:,:-2,:-2]
    ddr=  (output_dr[:,2:,2:]-output_dr[:,:-2,:-2] +\
           output_dr[:,1:-1,2:]-output_dr[:,:-2,1:-1]+\
           output_dr[:,2:,1:-1]-output_dr[:,1:-1,:-2])*K.constant(2)
    ddl=  (output_dl[:,2:,:-2]-output_dl[:,:-2,2:] +\
           output_dl[:,2:,1:-1]-output_dl[:,1:-1,2:]+\
           output_dl[:,1:-1,:-2]-output_dl[:,:-2,1:-1])*K.constant(2)
    dpred = K.concatenate([dy,dx,ddr,ddl],axis=-1)
    dpred = K.spatial_2d_padding(dpred)
    weight_fg = K.cast(K.all(dpred>K.constant(config.GRADIENT_THRES), axis=3, 
                          keepdims=True), K.floatx())
    
    weight = K.clip(K.sqrt(weight_fg*K.prod(dpred, axis=3, keepdims=True)), 
                    config.WEIGHT_AREA/config.CLIP_AREA_HIGH, 
                    config.WEIGHT_AREA/config.CLIP_AREA_LOW)
    weight +=(1-weight_fg)*config.WEIGHT_AREA/config.BG_AREA
    weight = K.conv2d(weight, K.constant(config.GAUSSIAN_KERNEL),
                      padding='same')
    return K.stop_gradient(weight) 
Example #6
Source File: BMM_attention_model.py    From BMM_attentional_CNN with GNU General Public License v3.0 6 votes vote down vote up
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs):
    """
    This is the function used for cross channel normalization in the original
    Alexnet
    """
    def f(X):
        b, ch, r, c = X.shape
        half = n // 2
        square = K.square(X)
        extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1))
                                              , (0,half))
        extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2))
        scale = k
        for i in range(n):
            scale += alpha * extra_channels[:,i:i+ch,:,:]
        scale = scale ** beta
        return X / scale

    return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs) 
Example #7
Source File: KerasDeconv.py    From DeepLearningImplementations with MIT License 4 votes vote down vote up
def _deconv(self, X, lname, d_switch, feat_map=None):
        o_width, o_height = self[lname].output_shape[-2:]

        # Get filter size
        f_width = self[lname].W_shape[2]
        f_height = self[lname].W_shape[3]

        # Compute padding needed
        i_width, i_height = X.shape[-2:]
        pad_width = (o_width - i_width + f_width - 1) / 2
        pad_height = (o_height - i_height + f_height - 1) / 2

        assert isinstance(
            pad_width, int), "Pad width size issue at layer %s" % lname
        assert isinstance(
            pad_height, int), "Pad height size issue at layer %s" % lname

        # Set to zero based on switch values
        X[d_switch[lname]] = 0
        # Get activation function
        activation = self[lname].activation
        X = activation(X)
        if feat_map is not None:
            print("Setting other feat map to zero")
            for i in range(X.shape[1]):
                if i != feat_map:
                    X[:, i, :, :] = 0
            print("Setting non max activations to zero")
            for i in range(X.shape[0]):
                iw, ih = np.unravel_index(
                    X[i, feat_map, :, :].argmax(), X[i, feat_map, :, :].shape)
                m = np.max(X[i, feat_map, :, :])
                X[i, feat_map, :, :] = 0
                X[i, feat_map, iw, ih] = m
        # Get filters. No bias for now
        W = self[lname].W
        # Transpose filter
        W = W.transpose([1, 0, 2, 3])
        W = W[:, :, ::-1, ::-1]
        # CUDNN for conv2d ?
        conv_out = K.T.nnet.conv2d(
            input=self.x, filters=W, border_mode='valid')
        # Add padding to get correct size
        pad = K.function([self.x], K.spatial_2d_padding(
            self.x, padding=(pad_width, pad_height), dim_ordering="th"))
        X_pad = pad([X])
        # Get Deconv output
        deconv_func = K.function([self.x], conv_out)
        X_deconv = deconv_func([X_pad])
        assert X_deconv.shape[-2:] == (o_width, o_height),\
            "Deconv output at %s has wrong size" % lname
        return X_deconv