Python keras.backend.set_image_data_format() Examples

The following are 21 code examples of keras.backend.set_image_data_format(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.backend , or try the search function .
Example #1
Source File: util.py    From keras-transfer-learning-for-oxford102 with MIT License 7 votes vote down vote up
def set_img_format():
    try:
        if K.backend() == 'theano':
            K.set_image_data_format('channels_first')
        else:
            K.set_image_data_format('channels_last')
    except AttributeError:
        if K._BACKEND == 'theano':
            K.set_image_dim_ordering('th')
        else:
            K.set_image_dim_ordering('tf') 
Example #2
Source File: test_deepseg_sc.py    From spinalcordtoolbox with MIT License 6 votes vote down vote up
def test_segment_2d():
    from keras import backend as K
    K.set_image_data_format("channels_last")  # Set at channels_first in test_deepseg_lesion.test_segment()

    contrast_test = 't2'
    model_path = os.path.join(sct.__sct_dir__, 'data', 'deepseg_sc_models', '{}_sc.h5'.format(contrast_test))   

    fname_t2 = os.path.join(sct.__sct_dir__, 'sct_testing_data/t2/t2.nii.gz')  # install: sct_download_data -d sct_testing_data
    fname_t2_seg = os.path.join(sct.__sct_dir__, 'sct_testing_data/t2/t2_seg.nii.gz')  # install: sct_download_data -d sct_testing_data

    img, gt = _preprocess_segment(fname_t2, fname_t2_seg, contrast_test)

    seg = deepseg_sc.segment_2d(model_fname=model_path, contrast_type=contrast_test, input_size=(64,64), im_in=img)
    assert seg.dtype == np.dtype('float32')

    seg_im = img.copy()
    seg_im.data = (seg > 0.5).astype(np.uint8)
    assert msct_image.compute_dice(seg_im, gt) > 0.80 
Example #3
Source File: test_deepseg_sc.py    From spinalcordtoolbox with MIT License 6 votes vote down vote up
def test_segment_3d():
    from keras import backend as K
    K.set_image_data_format("channels_last")  # Set at channels_first in test_deepseg_lesion.test_segment()

    contrast_test = 't2'
    model_path = os.path.join(sct.__sct_dir__, 'data', 'deepseg_sc_models', '{}_sc_3D.h5'.format(contrast_test))   

    fname_t2 = os.path.join(sct.__sct_dir__, 'sct_testing_data/t2/t2.nii.gz')  # install: sct_download_data -d sct_testing_data
    fname_t2_seg = os.path.join(sct.__sct_dir__, 'sct_testing_data/t2/t2_seg.nii.gz')  # install: sct_download_data -d sct_testing_data

    img, gt = _preprocess_segment(fname_t2, fname_t2_seg, contrast_test, dim_3=True)

    seg = deepseg_sc.segment_3d(model_fname=model_path, contrast_type=contrast_test, im_in=img)
    assert seg.dtype == np.dtype('float32')

    seg_im = img.copy()
    seg_im.data = (seg > 0.5).astype(np.uint8)
    assert msct_image.compute_dice(seg_im, gt) > 0.80 
Example #4
Source File: test_utils.py    From keras-vis with MIT License 6 votes vote down vote up
def across_data_formats(func):
    """Function wrapper to run tests on multiple keras data_format and clean up after TensorFlow tests.

    Args:
        func: test function to clean up after.

    Returns:
        A function wrapping the input function.
    """
    @six.wraps(func)
    def wrapper(*args, **kwargs):
        for data_format in {'channels_first', 'channels_last'}:
            K.set_image_data_format(data_format)
            func(*args, **kwargs)
            if K.backend() == 'tensorflow':
                K.clear_session()
                tf.reset_default_graph()
    return wrapper 
Example #5
Source File: test_utils.py    From keras-vis with MIT License 5 votes vote down vote up
def test_get_img_shape_on_3d_image():
    n = 5
    channels = 4
    dim1 = 1
    dim2 = 2
    dim3 = 3

    K.set_image_data_format('channels_first')
    assert (n, channels, dim1, dim2, dim3) == utils.get_img_shape(K.ones(shape=(n, channels, dim1, dim2, dim3)))

    K.set_image_data_format('channels_last')
    assert (n, channels, dim1, dim2, dim3) == utils.get_img_shape(K.ones(shape=(n, dim1, dim2, dim3, channels))) 
Example #6
Source File: p2b1_baseline_keras2.py    From Benchmarks with MIT License 5 votes vote down vote up
def initialize_parameters(default_model = 'p2b1_default_model.txt'):

    # Build benchmark object
    p2b1Bmk = p2b1.BenchmarkP2B1(p2b1.file_path, default_model, 'keras',
    prog='p2b1_baseline', desc='Train Molecular Frame Autoencoder - Pilot 2 Benchmark 1')

    # Initialize parameters
    GP = candle.finalize_parameters(p2b1Bmk)
    #p2b1.logger.info('Params: {}'.format(gParameters))

    print ('\nTraining parameters:')
    for key in sorted(GP):
        print ("\t%s: %s" % (key, GP[key]))

    # print json.dumps(GP, indent=4, skipkeys=True, sort_keys=True)

    if GP['backend'] != 'theano' and GP['backend'] != 'tensorflow':
        sys.exit('Invalid backend selected: %s' % GP['backend'])

    os.environ['KERAS_BACKEND'] = GP['backend']
    reload(K)
    '''
    if GP['backend'] == 'theano':
        K.set_image_dim_ordering('th')
    elif GP['backend'] == 'tensorflow':
        K.set_image_dim_ordering('tf')
    '''
    K.set_image_data_format('channels_last')
#"th" format means that the convolutional kernels will have the shape (depth, input_depth, rows, cols)

#"tf" format means that the convolutional kernels will have the shape (rows, cols, input_depth, depth)
    print ("Image data format: ", K.image_data_format())
#    print "Image ordering: ", K.image_dim_ordering()
    return GP 
Example #7
Source File: test_resnet3d.py    From keras-resnet3d with MIT License 5 votes vote down vote up
def test_get_block():
    """Test get residual block."""
    K.set_image_data_format('channels_last')
    Resnet3DBuilder.build((224, 224, 224, 1), 2, 'bottleneck',
                          [2, 2, 2, 2], reg_factor=1e-4)
    assert True
    with pytest.raises(ValueError):
        Resnet3DBuilder.build((224, 224, 224, 1), 2, 'nullblock',
                              [2, 2, 2, 2], reg_factor=1e-4) 
Example #8
Source File: test_resnet3d.py    From keras-resnet3d with MIT License 5 votes vote down vote up
def test_bad_shape():
    """Input shape need to be 4."""
    K.set_image_data_format('channels_last')
    with pytest.raises(ValueError):
        Resnet3DBuilder.build_resnet_152((224, 224, 224), 2) 
Example #9
Source File: test_resnet3d.py    From keras-resnet3d with MIT License 5 votes vote down vote up
def test_resnet3d_101(resnet3d_test):
    """Test 101."""
    K.set_image_data_format('channels_last')
    model = Resnet3DBuilder.build_resnet_101((224, 224, 224, 1), 2)
    resnet3d_test(model)
    K.set_image_data_format('channels_first')
    model = Resnet3DBuilder.build_resnet_101((1, 512, 512, 256), 2)
    resnet3d_test(model) 
Example #10
Source File: test_resnet3d.py    From keras-resnet3d with MIT License 5 votes vote down vote up
def test_resnet3d_50(resnet3d_test):
    """Test 50."""
    K.set_image_data_format('channels_last')
    model = Resnet3DBuilder.build_resnet_50((224, 224, 224, 1), 1, 1e-2)
    resnet3d_test(model)
    K.set_image_data_format('channels_first')
    model = Resnet3DBuilder.build_resnet_50((1, 512, 512, 256), 1, 1e-2)
    resnet3d_test(model) 
Example #11
Source File: test_resnet3d.py    From keras-resnet3d with MIT License 5 votes vote down vote up
def test_resnet3d_34(resnet3d_test):
    """Test 34."""
    K.set_image_data_format('channels_last')
    model = Resnet3DBuilder.build_resnet_34((224, 224, 224, 1), 2)
    resnet3d_test(model)
    K.set_image_data_format('channels_first')
    model = Resnet3DBuilder.build_resnet_34((1, 512, 512, 256), 2)
    resnet3d_test(model) 
Example #12
Source File: test_resnet3d.py    From keras-resnet3d with MIT License 5 votes vote down vote up
def test_resnet3d_18(resnet3d_test):
    """Test 18."""
    K.set_image_data_format('channels_last')
    model = Resnet3DBuilder.build_resnet_18((224, 224, 224, 1), 2)
    resnet3d_test(model)
    K.set_image_data_format('channels_first')
    model = Resnet3DBuilder.build_resnet_18((1, 512, 512, 256), 2)
    resnet3d_test(model) 
Example #13
Source File: test_resnet3d.py    From keras-resnet3d with MIT License 5 votes vote down vote up
def resnet3d_test():
    """resnet3d test helper."""
    def f(model):
        K.set_image_data_format('channels_last')
        model.compile(loss="categorical_crossentropy", optimizer="sgd")
        assert True, "Failed to build with {}".format(K.image_data_format())
    return f 
Example #14
Source File: models.py    From panotti with MIT License 5 votes vote down vote up
def Panotti_CNN(X_shape, nb_classes, nb_layers=4):
    # Inputs:
    #    X_shape = [ # spectrograms per batch, # audio channels, # spectrogram freq bins, # spectrogram time bins ]
    #    nb_classes = number of output n_classes
    #    nb_layers = number of conv-pooling sets in the CNN
    from keras import backend as K
    K.set_image_data_format('channels_last')                   # SHH changed on 3/1/2018 b/c tensorflow prefers channels_last

    nb_filters = 32  # number of convolutional filters = "feature maps"
    kernel_size = (3, 3)  # convolution kernel size
    pool_size = (2, 2)  # size of pooling area for max pooling
    cl_dropout = 0.5    # conv. layer dropout
    dl_dropout = 0.6    # dense layer dropout

    print(" MyCNN_Keras2: X_shape = ",X_shape,", channels = ",X_shape[3])
    input_shape = (X_shape[1], X_shape[2], X_shape[3])
    model = Sequential()
    model.add(Conv2D(nb_filters, kernel_size, padding='same', input_shape=input_shape, name="Input"))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Activation('relu'))        # Leave this relu & BN here.  ELU is not good here (my experience)
    model.add(BatchNormalization(axis=-1))  # axis=1 for 'channels_first'; but tensorflow preferse channels_last (axis=-1)

    for layer in range(nb_layers-1):   # add more layers than just the first
        model.add(Conv2D(nb_filters, kernel_size, padding='same'))
        model.add(MaxPooling2D(pool_size=pool_size))
        model.add(Activation('elu'))
        model.add(Dropout(cl_dropout))
        #model.add(BatchNormalization(axis=-1))  # ELU authors reccommend no BatchNorm. I confirm.

    model.add(Flatten())
    model.add(Dense(128))            # 128 is 'arbitrary' for now
    #model.add(Activation('relu'))   # relu (no BN) works ok here, however ELU works a bit better...
    model.add(Activation('elu'))
    model.add(Dropout(dl_dropout))
    model.add(Dense(nb_classes))
    model.add(Activation("softmax",name="Output"))
    return model


# Used for when you want to use weights from a previously-trained model,
# with a different set/number of output classes 
Example #15
Source File: losses_test.py    From faceswap with GNU General Public License v3.0 5 votes vote down vote up
def test_dssim_channels_last(dummy):  # pylint:disable=unused-argument
    """ Basic test for DSSIM Loss """
    prev_data = K.image_data_format()
    K.set_image_data_format('channels_last')
    for input_dim, kernel_size in zip([32, 33], [2, 3]):
        input_shape = [input_dim, input_dim, 3]
        var_x = np.random.random_sample(4 * input_dim * input_dim * 3)
        var_x = var_x.reshape([4] + input_shape)
        var_y = np.random.random_sample(4 * input_dim * input_dim * 3)
        var_y = var_y.reshape([4] + input_shape)

        model = Sequential()
        model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape,
                         activation='relu'))
        model.add(Conv2D(3, (3, 3), padding='same', input_shape=input_shape,
                         activation='relu'))
        adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
        model.compile(loss=losses.DSSIMObjective(kernel_size=kernel_size),
                      metrics=['mse'],
                      optimizer=adam)
        model.fit(var_x, var_y, batch_size=2, epochs=1, shuffle='batch')

        # Test same
        x_1 = K.constant(var_x, 'float32')
        x_2 = K.constant(var_x, 'float32')
        dssim = losses.DSSIMObjective(kernel_size=kernel_size)
        assert_allclose(0.0, K.eval(dssim(x_1, x_2)), atol=1e-4)

        # Test opposite
        x_1 = K.zeros([4] + input_shape)
        x_2 = K.ones([4] + input_shape)
        dssim = losses.DSSIMObjective(kernel_size=kernel_size)
        assert_allclose(0.5, K.eval(dssim(x_1, x_2)), atol=1e-4)

    K.set_image_data_format(prev_data) 
Example #16
Source File: test_utils.py    From keras-vis with MIT License 5 votes vote down vote up
def test_get_img_shape_on_2d_image():
    n = 5
    channels = 4
    dim1 = 1
    dim2 = 2

    K.set_image_data_format('channels_first')
    assert (n, channels, dim1, dim2) == utils.get_img_shape(K.ones(shape=(n, channels, dim1, dim2)))

    K.set_image_data_format('channels_last')
    assert (n, channels, dim1, dim2) == utils.get_img_shape(K.ones(shape=(n, dim1, dim2, channels))) 
Example #17
Source File: dssim_test.py    From keras-contrib with MIT License 5 votes vote down vote up
def test_DSSIM_channels_first():
    prev_data = K.image_data_format()
    K.set_image_data_format('channels_first')
    for input_dim, kernel_size in zip([32, 33], [2, 3]):
        input_shape = [3, input_dim, input_dim]
        X = np.random.random_sample(4 * input_dim * input_dim * 3)
        X = X.reshape([4] + input_shape)
        y = np.random.random_sample(4 * input_dim * input_dim * 3)
        y = y.reshape([4] + input_shape)

        model = Sequential()
        model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape,
                         activation='relu'))
        model.add(Conv2D(3, (3, 3), padding='same', input_shape=input_shape,
                         activation='relu'))
        adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
        model.compile(loss=DSSIMObjective(kernel_size=kernel_size), metrics=['mse'],
                      optimizer=adam)
        model.fit(X, y, batch_size=2, epochs=1, shuffle='batch')

        # Test same
        x1 = K.constant(X, 'float32')
        x2 = K.constant(X, 'float32')
        dssim = DSSIMObjective(kernel_size=kernel_size)
        assert_allclose(0.0, K.eval(dssim(x1, x2)), atol=1e-4)

        # Test opposite
        x1 = K.zeros([4] + input_shape)
        x2 = K.ones([4] + input_shape)
        dssim = DSSIMObjective(kernel_size=kernel_size)
        assert_allclose(0.5, K.eval(dssim(x1, x2)), atol=1e-4)

    K.set_image_data_format(prev_data) 
Example #18
Source File: dssim_test.py    From keras-contrib with MIT License 5 votes vote down vote up
def test_DSSIM_channels_last():
    prev_data = K.image_data_format()
    K.set_image_data_format('channels_last')
    for input_dim, kernel_size in zip([32, 33], [2, 3]):
        input_shape = [input_dim, input_dim, 3]
        X = np.random.random_sample(4 * input_dim * input_dim * 3)
        X = X.reshape([4] + input_shape)
        y = np.random.random_sample(4 * input_dim * input_dim * 3)
        y = y.reshape([4] + input_shape)

        model = Sequential()
        model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape,
                         activation='relu'))
        model.add(Conv2D(3, (3, 3), padding='same', input_shape=input_shape,
                         activation='relu'))
        adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
        model.compile(loss=DSSIMObjective(kernel_size=kernel_size),
                      metrics=['mse'],
                      optimizer=adam)
        model.fit(X, y, batch_size=2, epochs=1, shuffle='batch')

        # Test same
        x1 = K.constant(X, 'float32')
        x2 = K.constant(X, 'float32')
        dssim = DSSIMObjective(kernel_size=kernel_size)
        assert_allclose(0.0, K.eval(dssim(x1, x2)), atol=1e-4)

        # Test opposite
        x1 = K.zeros([4] + input_shape)
        x2 = K.ones([4] + input_shape)
        dssim = DSSIMObjective(kernel_size=kernel_size)
        assert_allclose(0.5, K.eval(dssim(x1, x2)), atol=1e-4)

    K.set_image_data_format(prev_data) 
Example #19
Source File: models.py    From keras_BEGAN with MIT License 5 votes vote down vote up
def build_model(config: BEGANConfig):
    K.set_image_data_format('channels_last')

    autoencoder = build_autoencoder(config)
    generator = build_generator(config)
    discriminator = build_discriminator(config, autoencoder)

    return autoencoder, generator, discriminator 
Example #20
Source File: test_encoders.py    From keras-fcn with MIT License 4 votes vote down vote up
def test_vgg16():
    for data_format in ['channels_first', 'channels_last']:
        K.set_image_data_format(data_format)
        if K.image_data_format() == 'channels_first':
            x = Input(shape=(3, 500, 500))
            pool1_shape = (None, 64, 250, 250)
            pool2_shape = (None, 128, 125, 125)
            pool3_shape = (None, 256, 63, 63)
            pool4_shape = (None, 512, 32, 32)
            drop7_shape = (None, 4096, 16, 16)
            conv1_weight = -0.35009676
        else:
            x = Input(shape=(500, 500, 3))
            pool1_shape = (None, 250, 250, 64)
            pool2_shape = (None, 125, 125, 128)
            pool3_shape = (None, 63, 63, 256)
            pool4_shape = (None, 32, 32, 512)
            drop7_shape = (None, 16, 16, 4096)
            conv1_weight = 0.429471

        encoder = VGG16(x, weights='imagenet', trainable=False)
        feat_pyramid = encoder.outputs

        assert len(feat_pyramid) == 5

        assert K.int_shape(feat_pyramid[0]) == drop7_shape
        assert K.int_shape(feat_pyramid[1]) == pool4_shape
        assert K.int_shape(feat_pyramid[2]) == pool3_shape
        assert K.int_shape(feat_pyramid[3]) == pool2_shape
        assert K.int_shape(feat_pyramid[4]) == pool1_shape

        for layer in encoder.layers:
            if layer.name == 'block1_conv1':
                assert layer.trainable is False
                weights = K.eval(layer.weights[0])
                assert np.allclose(weights[0, 0, 0, 0], conv1_weight)

        encoder_from_scratch = VGG16(x, weights=None, trainable=True)
        for layer in encoder_from_scratch.layers:
            if layer.name == 'block1_conv1':
                assert layer.trainable is True
                weights = K.eval(layer.weights[0])
                assert not np.allclose(weights[0, 0, 0, 0], conv1_weight) 
Example #21
Source File: test_encoders.py    From keras-fcn with MIT License 4 votes vote down vote up
def test_vgg19():
    for data_format in ['channels_first', 'channels_last']:
        K.set_image_data_format(data_format)
        if K.image_data_format() == 'channels_first':
            x = Input(shape=(3, 500, 500))
            pool1_shape = (None, 64, 250, 250)
            pool2_shape = (None, 128, 125, 125)
            pool3_shape = (None, 256, 63, 63)
            pool4_shape = (None, 512, 32, 32)
            drop7_shape = (None, 4096, 16, 16)
            conv1_weight = -0.35009676
        else:
            x = Input(shape=(500, 500, 3))
            pool1_shape = (None, 250, 250, 64)
            pool2_shape = (None, 125, 125, 128)
            pool3_shape = (None, 63, 63, 256)
            pool4_shape = (None, 32, 32, 512)
            drop7_shape = (None, 16, 16, 4096)
            conv1_weight = 0.429471

        encoder = VGG19(x, weights='imagenet', trainable=False)
        feat_pyramid = encoder.outputs

        assert len(feat_pyramid) == 5

        assert K.int_shape(feat_pyramid[0]) == drop7_shape
        assert K.int_shape(feat_pyramid[1]) == pool4_shape
        assert K.int_shape(feat_pyramid[2]) == pool3_shape
        assert K.int_shape(feat_pyramid[3]) == pool2_shape
        assert K.int_shape(feat_pyramid[4]) == pool1_shape

        for layer in encoder.layers:
            if layer.name == 'block1_conv1':
                assert layer.trainable is False
                weights = K.eval(layer.weights[0])
                assert np.allclose(weights[0, 0, 0, 0], conv1_weight)

        encoder_from_scratch = VGG19(x, weights=None, trainable=True)
        for layer in encoder_from_scratch.layers:
            if layer.name == 'block1_conv1':
                assert layer.trainable is True
                weights = K.eval(layer.weights[0])
                assert not np.allclose(weights[0, 0, 0, 0], conv1_weight)