Python keras.applications.densenet.DenseNet121() Examples
The following are 4
code examples of keras.applications.densenet.DenseNet121().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.applications.densenet
, or try the search function
.
Example #1
Source File: pretrain_imagenet_cnn.py From hyperspectral_deeplearning_review with GNU General Public License v3.0 | 5 votes |
def get_model_pretrain(arch): modlrate = 1 if "VGG16" in arch: base_model = vgg16.VGG16 elif "VGG19" in arch: base_model = vgg19.VGG19 elif "RESNET50" in arch: base_model = resnet50.ResNet50 elif "DENSENET121" in arch: base_model = densenet.DenseNet121 elif "MOBILENET" in arch: base_model = mobilenet.MobileNet modlrate = 10 else: print("model not avaiable"); exit() base_model = base_model(weights='imagenet', include_top=False) return base_model, modlrate
Example #2
Source File: test_keras_applications.py From keras-onnx with MIT License | 5 votes |
def test_DenseNet121(self): from keras.applications.densenet import DenseNet121 model = DenseNet121(include_top=True, weights='imagenet') res = run_image(model, self.model_files, img_path) self.assertTrue(*res)
Example #3
Source File: models.py From dsb2018_topcoders with MIT License | 4 votes |
def get_densenet121_unet_softmax(input_shape, weights='imagenet'): blocks = [6, 12, 24, 16] img_input = Input(input_shape + (4,)) x = ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input) x = Conv2D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x) x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='conv1/bn')(x) x = Activation('relu', name='conv1/relu')(x) conv1 = x x = ZeroPadding2D(padding=((1, 1), (1, 1)))(x) x = MaxPooling2D(3, strides=2, name='pool1')(x) x = dense_block(x, blocks[0], name='conv2') conv2 = x x = transition_block(x, 0.5, name='pool2') x = dense_block(x, blocks[1], name='conv3') conv3 = x x = transition_block(x, 0.5, name='pool3') x = dense_block(x, blocks[2], name='conv4') conv4 = x x = transition_block(x, 0.5, name='pool4') x = dense_block(x, blocks[3], name='conv5') x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='bn')(x) conv5 = x conv6 = conv_block(UpSampling2D()(conv5), 320) conv6 = concatenate([conv6, conv4], axis=-1) conv6 = conv_block(conv6, 320) conv7 = conv_block(UpSampling2D()(conv6), 256) conv7 = concatenate([conv7, conv3], axis=-1) conv7 = conv_block(conv7, 256) conv8 = conv_block(UpSampling2D()(conv7), 128) conv8 = concatenate([conv8, conv2], axis=-1) conv8 = conv_block(conv8, 128) conv9 = conv_block(UpSampling2D()(conv8), 96) conv9 = concatenate([conv9, conv1], axis=-1) conv9 = conv_block(conv9, 96) conv10 = conv_block(UpSampling2D()(conv9), 64) conv10 = conv_block(conv10, 64) res = Conv2D(3, (1, 1), activation='softmax')(conv10) model = Model(img_input, res) if weights == 'imagenet': densenet = DenseNet121(input_shape=input_shape + (3,), weights=weights, include_top=False) w0 = densenet.layers[2].get_weights() w = model.layers[2].get_weights() w[0][:, :, [0, 1, 2], :] = 0.9 * w0[0][:, :, :3, :] w[0][:, :, 3, :] = 0.1 * w0[0][:, :, 1, :] model.layers[2].set_weights(w) for i in range(3, len(densenet.layers)): model.layers[i].set_weights(densenet.layers[i].get_weights()) model.layers[i].trainable = False return model
Example #4
Source File: test_bench.py From Keras-inference-time-optimizer with MIT License | 4 votes |
def get_tst_neural_net(type): model = None custom_objects = dict() if type == 'mobilenet_small': from keras.applications.mobilenet import MobileNet model = MobileNet((128, 128, 3), depth_multiplier=1, alpha=0.25, include_top=True, weights='imagenet') elif type == 'mobilenet': from keras.applications.mobilenet import MobileNet model = MobileNet((224, 224, 3), depth_multiplier=1, alpha=1.0, include_top=True, weights='imagenet') elif type == 'mobilenet_v2': from keras.applications.mobilenetv2 import MobileNetV2 model = MobileNetV2((224, 224, 3), depth_multiplier=1, alpha=1.4, include_top=True, weights='imagenet') elif type == 'resnet50': from keras.applications.resnet50 import ResNet50 model = ResNet50(input_shape=(224, 224, 3), include_top=True, weights='imagenet') elif type == 'inception_v3': from keras.applications.inception_v3 import InceptionV3 model = InceptionV3(input_shape=(299, 299, 3), include_top=True, weights='imagenet') elif type == 'inception_resnet_v2': from keras.applications.inception_resnet_v2 import InceptionResNetV2 model = InceptionResNetV2(input_shape=(299, 299, 3), include_top=True, weights='imagenet') elif type == 'xception': from keras.applications.xception import Xception model = Xception(input_shape=(299, 299, 3), include_top=True, weights='imagenet') elif type == 'densenet121': from keras.applications.densenet import DenseNet121 model = DenseNet121(input_shape=(224, 224, 3), include_top=True, weights='imagenet') elif type == 'densenet169': from keras.applications.densenet import DenseNet169 model = DenseNet169(input_shape=(224, 224, 3), include_top=True, weights='imagenet') elif type == 'densenet201': from keras.applications.densenet import DenseNet201 model = DenseNet201(input_shape=(224, 224, 3), include_top=True, weights='imagenet') elif type == 'nasnetmobile': from keras.applications.nasnet import NASNetMobile model = NASNetMobile(input_shape=(224, 224, 3), include_top=True, weights='imagenet') elif type == 'nasnetlarge': from keras.applications.nasnet import NASNetLarge model = NASNetLarge(input_shape=(331, 331, 3), include_top=True, weights='imagenet') elif type == 'vgg16': from keras.applications.vgg16 import VGG16 model = VGG16(input_shape=(224, 224, 3), include_top=False, pooling='avg', weights='imagenet') elif type == 'vgg19': from keras.applications.vgg19 import VGG19 model = VGG19(input_shape=(224, 224, 3), include_top=False, pooling='avg', weights='imagenet') elif type == 'multi_io': model = get_custom_multi_io_model() elif type == 'multi_model_layer_1': model = get_custom_model_with_other_model_as_layer() elif type == 'multi_model_layer_2': model = get_small_model_with_other_model_as_layer() elif type == 'Conv2DTranspose': model = get_Conv2DTranspose_model() elif type == 'RetinaNet': model, custom_objects = get_RetinaNet_model() elif type == 'conv3d_model': model = get_simple_3d_model() return model, custom_objects