Python keras.applications.VGG16 Examples
The following are 28
code examples of keras.applications.VGG16().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.applications
, or try the search function
.
Example #1
Source File: network.py From Unified-Gesture-and-Fingertip-Detection with MIT License | 6 votes |
def model(): model = VGG16(include_top=False, input_shape=(128, 128, 3)) x = model.output y = x x = Flatten()(x) x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) probability = Dense(5, activation='sigmoid', name='probabilistic_output')(x) y = UpSampling2D((3, 3))(y) y = Activation('relu')(y) y = Conv2D(1, (3, 3), activation='linear')(y) position = Reshape(target_shape=(10, 10), name='positional_output')(y) model = Model(input=model.input, outputs=[probability, position]) return model
Example #2
Source File: vgg.py From inpainting-gmcnn-keras with MIT License | 6 votes |
def build_vgg_img_shape(y_pred, vgg_layers): input_shape = y_pred.shape.as_list()[1:4] img = Input(shape=input_shape) img_norm = _norm_inputs(img) vgg = VGG16(weights="imagenet", include_top=False) # Output the first three pooling layers vgg.outputs = [vgg.layers[i].output for i in vgg_layers] # Create model and compile model = Model(inputs=img, outputs=vgg(img_norm)) model.trainable = False model.compile(loss='mse', optimizer='adam') return model
Example #3
Source File: vgg.py From inpainting-gmcnn-keras with MIT License | 6 votes |
def build_vgg_original_shape(y_pred, vgg_layers): input_shape = y_pred.shape.as_list()[1:4] img = Input(shape=input_shape) img_reshaped = Lambda(lambda x: tf.image.resize_nearest_neighbor(x, size=ORIGINAL_VGG_16_SHAPE))( img) img_norm = _norm_inputs(img_reshaped) vgg = VGG16(weights="imagenet", include_top=False) # Output the first three pooling layers vgg.outputs = [vgg.layers[i].output for i in vgg_layers] # Create model and compile model = Model(inputs=img, outputs=vgg(img_norm)) model.trainable = False model.compile(loss='mse', optimizer='adam') return model
Example #4
Source File: feature_detection.py From deeposlandia with MIT License | 6 votes |
def vgg16(self): """Build the structure of a convolutional neural network from input image data to the last hidden layer on the model of a similar manner than VGG-net See: Simonyan & Zisserman, Very Deep Convolutional Networks for Large-Scale Image Recognition, arXiv technical report, 2014 Returns ------- tensor (batch_size, nb_labels)-shaped output predictions, that have to be compared with ground-truth values """ vgg16_model = VGG16(input_tensor=self.X, include_top=False) y = self.flatten(vgg16_model.output, block_name="flatten") y = self.dense(y, 1024, block_name="fc1") y = self.dense(y, 1024, block_name="fc2") return self.output_layer(y, depth=self.nb_labels)
Example #5
Source File: applications_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_vgg(): app = random.choice([applications.VGG16, applications.VGG19]) last_dim = 512 _test_application_basic(app) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim)
Example #6
Source File: file_util.py From Mosaicer with MIT License | 5 votes |
def make_model(model, image_size): if model == "inceptionv3": base_model = InceptionV3(include_top=False, input_shape=image_size + (3,)) elif model == "vgg16" or model is None: base_model = VGG16(include_top=False, input_shape=image_size + (3,)) elif model == "mobilenet": base_model = MobileNet(include_top=False, input_shape=image_size + (3,)) return base_model
Example #7
Source File: solo_net.py From Unified-Gesture-and-Fingertip-Detection with MIT License | 5 votes |
def model(): model = VGG16(include_top=False, input_shape=(416, 416, 3)) x = model.output x = Conv2D(1, (1, 1), activation='sigmoid')(x) output = Reshape((13, 13), name='output')(x) model = Model(model.input, output) return model
Example #8
Source File: content_based_filtering.py From keras-recommender with MIT License | 5 votes |
def __init__(self): self.matrix_res = None self.similarity_deep = None self.model = VGG16(include_top=False, weights='imagenet') self.matrix_idx_to_item_id = None self.item_id_to_matrix_idx = None
Example #9
Source File: vgg.py From inpainting-gmcnn-keras with MIT License | 5 votes |
def build_vgg16(y_pred, use_original_vgg_shape, vgg_layers): """ Load pre-trained VGG16 from keras applications """ if use_original_vgg_shape: return build_vgg_original_shape(y_pred, vgg_layers) else: return build_vgg_img_shape(y_pred, vgg_layers)
Example #10
Source File: capgen.py From Image-to-Image-Search with MIT License | 5 votes |
def __init__(self): self.checkpoint = pickle.load(open(CHECKPOINT_PATH, 'rb'),encoding='latin1') self.checkpoint_params = self.checkpoint['params'] self.language_model = self.checkpoint['model'] self.ixtoword = self.checkpoint['ixtoword'] model = VGG16(weights="imagenet") self.visual_model = Model(input=model.input,output=model.layers[21].output) self.visual_model._make_predict_function() self.graph = tf.get_default_graph() self.BEAM_SIZE = 2
Example #11
Source File: vgg16.py From plaidbench with Apache License 2.0 | 5 votes |
def build_model(): import keras.applications as kapp from keras.layers import Input from keras.backend import floatx inputLayer = Input(shape=(224, 224, 3), dtype=floatx()) return kapp.VGG16(input_tensor=inputLayer)
Example #12
Source File: mlearn_for_image.py From easy12306 with Artistic License 2.0 | 5 votes |
def learn(): (train_x, train_y, sample_weight), (test_x, test_y) = load_data() datagen = ImageDataGenerator(horizontal_flip=True, vertical_flip=True) train_generator = datagen.flow(train_x, train_y, sample_weight=sample_weight) base = VGG16(weights='imagenet', include_top=False, input_shape=(None, None, 3)) for layer in base.layers[:-4]: layer.trainable = False model = models.Sequential([ base, layers.BatchNormalization(), layers.Conv2D(64, (3, 3), activation='relu', padding='same'), layers.GlobalAveragePooling2D(), layers.BatchNormalization(), layers.Dense(64, activation='relu'), layers.BatchNormalization(), layers.Dropout(0.20), layers.Dense(80, activation='softmax') ]) model.compile(optimizer=optimizers.RMSprop(lr=1e-5), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary() reduce_lr = ReduceLROnPlateau(verbose=1) model.fit_generator(train_generator, epochs=400, steps_per_epoch=100, validation_data=(test_x[:800], test_y[:800]), callbacks=[reduce_lr]) result = model.evaluate(test_x, test_y) print(result) model.save('12306.image.model.h5', include_optimizer=False)
Example #13
Source File: applications_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_vgg(): app = random.choice([applications.VGG16, applications.VGG19]) last_dim = 512 _test_application_basic(app) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim)
Example #14
Source File: applications_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_vgg(): app = random.choice([applications.VGG16, applications.VGG19]) last_dim = 512 _test_application_basic(app) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim)
Example #15
Source File: applications_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_vgg(): app = random.choice([applications.VGG16, applications.VGG19]) last_dim = 512 _test_application_basic(app) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim)
Example #16
Source File: applications_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_vgg(): app = random.choice([applications.VGG16, applications.VGG19]) last_dim = 512 _test_application_basic(app) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim)
Example #17
Source File: applications_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_vgg(): app = random.choice([applications.VGG16, applications.VGG19]) last_dim = 512 _test_application_basic(app) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim)
Example #18
Source File: applications_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_vgg(): app = random.choice([applications.VGG16, applications.VGG19]) last_dim = 512 _test_application_basic(app) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim)
Example #19
Source File: generate_features.py From show-attend-and-tell-keras with MIT License | 5 votes |
def setup_model(encoder, layer_name): image_input = Input(shape=(224, 224, 3)) base_model = None if encoder == 'vgg16': base_model = VGG16(include_top=False, weights='imagenet', input_tensor=image_input, input_shape=(224, 224, 3)) elif encoder == 'vgg19': base_model = VGG19(include_top=False, weights='imagenet', input_tensor=image_input, input_shape=(224, 224, 3)) else: raise ValueError("not implemented encoder type") model = Model(inputs=base_model.input, outputs=base_model.get_layer(layer_name).output) return model
Example #20
Source File: _validateSchema.py From nyoka with Apache License 2.0 | 5 votes |
def test_validate_keras_vgg(self): input_tensor = Input(shape=(224, 224, 3)) model = VGG16(weights="imagenet", input_tensor=input_tensor) file_name = "keras"+model.name+".pmml" pmml_obj = KerasToPmml(model,dataSet="image",predictedClasses=[str(i) for i in range(1000)]) pmml_obj.export(open(file_name,'w'),0) self.assertEqual(self.schema.is_valid(file_name), True)
Example #21
Source File: train_multi.py From DeepFashion with Apache License 2.0 | 5 votes |
def save_bottlebeck_features_btl(): dataset_btl_path = 'dataset_btl/train' batch_size = 1 datagen = ImageDataGenerator(rescale=1. / 255) # build the VGG16 network model = applications.VGG16(include_top=False, weights='imagenet') # exclude 3 FC layers on top of network score_iou_btl_g, nb_btl_samples = get_images_count_recursive(dataset_btl_path) logging.debug('score_iou_btl_g {}'.format(score_iou_btl_g)) logging.debug('nb_btl_samples {}'.format(nb_btl_samples)) ## Train generator = datagen.flow_from_directory( dataset_btl_path, target_size=(img_width, img_height), batch_size=batch_size, classes=None, # the order of the classes, which will map to the label indices, will be alphanumeric class_mode=None, # "categorical": 2D one-hot encoded labels; "None": yield batches of data, no labels; "sparse" will be 1D integer labels. save_to_dir='temp', shuffle=False) # Don't shuffle else [class index = alphabetical folder order] logic used below might become wrong; first 1000 images will be cats, then 1000 dogs logging.info('generator.class_indices {}'.format(generator.class_indices)) # classes: If not given, the order of the classes, which will map to the label indices, will be alphanumeric bottleneck_features_btl = model.predict_generator( generator, nb_btl_samples // batch_size) logging.debug('bottleneck_features_btl {}'.format(bottleneck_features_btl.shape)) # bottleneck_features_train (10534, 4, 4, 512) where train images i.e Blazer+Jeans=5408+5126=10532 images; # save the output as a Numpy array logging.debug('Saving bottleneck_features_btl...') np.save(open('output/bottleneck_features_btl.npy', 'w'), bottleneck_features_btl)
Example #22
Source File: train_multi_v2.py From DeepFashion with Apache License 2.0 | 5 votes |
def save_bottlebeck_features_btl(): dataset_btl_path = 'dataset_btl/train' batch_size = 1 datagen = ImageDataGenerator(rescale=1. / 255) # build the VGG16 network model = applications.VGG16(include_top=False, weights='imagenet') # exclude 3 FC layers on top of network score_iou_btl_g, nb_btl_samples = get_images_count_recursive(dataset_btl_path) logging.debug('score_iou_btl_g {}'.format(score_iou_btl_g)) logging.debug('nb_btl_samples {}'.format(nb_btl_samples)) ## Train generator = datagen.flow_from_directory( dataset_btl_path, target_size=(img_width, img_height), batch_size=batch_size, classes=None, # the order of the classes, which will map to the label indices, will be alphanumeric class_mode=None, # "categorical": 2D one-hot encoded labels; "None": yield batches of data, no labels; "sparse" will be 1D integer labels. save_to_dir='temp', shuffle=False) # Don't shuffle else [class index = alphabetical folder order] logic used below might become wrong; first 1000 images will be cats, then 1000 dogs logging.info('generator.class_indices {}'.format(generator.class_indices)) # classes: If not given, the order of the classes, which will map to the label indices, will be alphanumeric bottleneck_features_btl = model.predict_generator( generator, nb_btl_samples // batch_size) logging.debug('bottleneck_features_btl {}'.format(bottleneck_features_btl.shape)) # bottleneck_features_train (10534, 4, 4, 512) where train images i.e Blazer+Jeans=5408+5126=10532 images; # save the output as a Numpy array logging.debug('Saving bottleneck_features_btl...') np.save(open('output/bottleneck_features_btl.npy', 'w'), bottleneck_features_btl)
Example #23
Source File: mlearn_for_image.py From 12306 with MIT License | 5 votes |
def learn(): (train_x, train_y, sample_weight), (test_x, test_y) = load_data() datagen = ImageDataGenerator(horizontal_flip=True, vertical_flip=True) train_generator = datagen.flow(train_x, train_y, sample_weight=sample_weight) base = VGG16(weights='imagenet', include_top=False, input_shape=(None, None, 3)) for layer in base.layers[:-4]: layer.trainable = False model = models.Sequential([ base, layers.BatchNormalization(), layers.Conv2D(64, (3, 3), activation='relu', padding='same'), layers.GlobalAveragePooling2D(), layers.BatchNormalization(), layers.Dense(64, activation='relu'), layers.BatchNormalization(), layers.Dropout(0.20), layers.Dense(80, activation='softmax') ]) model.compile(optimizer=optimizers.RMSprop(lr=1e-5), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary() reduce_lr = ReduceLROnPlateau(verbose=1) model.fit_generator(train_generator, epochs=400, steps_per_epoch=100, validation_data=(test_x[:800], test_y[:800]), callbacks=[reduce_lr]) result = model.evaluate(test_x, test_y) print(result) model.save('12306.image.model.h5', include_optimizer=False)
Example #24
Source File: features.py From vergeml with MIT License | 4 votes |
def get_imagenet_architecture(architecture, variant, size, alpha, output_layer, include_top=False, weights='imagenet'): from keras import applications, Model if include_top: assert output_layer == 'last' if size == 'auto': size = get_image_size(architecture, variant, size) shape = (size, size, 3) if architecture == 'densenet': if variant == 'auto': variant = 'densenet-121' if variant == 'densenet-121': model = applications.DenseNet121(weights=weights, include_top=include_top, input_shape=shape) elif variant == 'densenet-169': model = applications.DenseNet169(weights=weights, include_top=include_top, input_shape=shape) elif variant == 'densenet-201': model = applications.DenseNet201(weights=weights, include_top=include_top, input_shape=shape) elif architecture == 'inception-resnet-v2': model = applications.InceptionResNetV2(weights=weights, include_top=include_top, input_shape=shape) elif architecture == 'mobilenet': model = applications.MobileNet(weights=weights, include_top=include_top, input_shape=shape, alpha=alpha) elif architecture == 'mobilenet-v2': model = applications.MobileNetV2(weights=weights, include_top=include_top, input_shape=shape, alpha=alpha) elif architecture == 'nasnet': if variant == 'auto': variant = 'large' if variant == 'large': model = applications.NASNetLarge(weights=weights, include_top=include_top, input_shape=shape) else: model = applications.NASNetMobile(weights=weights, include_top=include_top, input_shape=shape) elif architecture == 'resnet-50': model = applications.ResNet50(weights=weights, include_top=include_top, input_shape=shape) elif architecture == 'vgg-16': model = applications.VGG16(weights=weights, include_top=include_top, input_shape=shape) elif architecture == 'vgg-19': model = applications.VGG19(weights=weights, include_top=include_top, input_shape=shape) elif architecture == 'xception': model = applications.Xception(weights=weights, include_top=include_top, input_shape=shape) elif architecture == 'inception-v3': model = applications.InceptionV3(weights=weights, include_top=include_top, input_shape=shape) if output_layer != 'last': try: if isinstance(output_layer, int): layer = model.layers[output_layer] else: layer = model.get_layer(output_layer) except Exception: raise VergeMLError('layer not found: {}'.format(output_layer)) model = Model(inputs=model.input, outputs=layer.output) return model
Example #25
Source File: train_multi_v3.py From DeepFashion with Apache License 2.0 | 4 votes |
def create_model_predict(input_shape, optimizer='Adagrad', learn_rate=None, decay=0.0, momentum=0.0, activation='relu', dropout_rate=0.5): logging.debug('input_shape {}'.format(input_shape)) logging.debug('input_shape {}'.format(type(input_shape))) # Optimizer optimizer, learn_rate = get_optimizer(optimizer, learn_rate, decay, momentum) input_shape = (img_width, img_height, 3) # 224,224,3 base_model = applications.VGG16(weights='imagenet', include_top=False, input_shape=input_shape) logging.debug('base_model inputs {}'.format(base_model.input)) # shape=(?, 224, 224, 3) logging.debug('base_model outputs {}'.format(base_model.output)) # shape=(?, 224, 224, 3) # TODO: Hardcoding input_shape_top_model_tensor = Input(shape=(7, 7, 512)) #x_common = Dense(256, activation='relu')(input_shape_top_model) # x_common = Dense(256, activation='relu')(base_model.output) ## Model Classification # x = Flatten()(x_common) x = Flatten()(base_model.output) x = Dense(256, activation='tanh')(x) x = Dropout(dropout_rate)(x) predictions_class = Dense(len(class_names), activation='softmax', name='predictions_class')(x) ## Model (Regression) IOU score # x = Flatten()(x_common) x = Flatten()(base_model.output) x = Dense(256, activation='tanh')(x) x = Dropout(dropout_rate)(x) x = Dense(256, activation='tanh')(x) x = Dropout(dropout_rate)(x) predictions_iou = Dense(1, activation='sigmoid', name='predictions_iou')(x) # model_top.load_weights(top_model_weights_path) # This creates a model that includes the Input layer and three Dense layers logging.debug('Creating model {}') #model = Model(inputs=base_model.input, outputs=[predictions_class(base_model.output), predictions_iou(base_model.output)]) model = Model(inputs=base_model.input, outputs=[predictions_class, predictions_iou]) logging.debug('model summary {}'.format(model.summary())) # TODO: loads only top model weights as only those are present(also saved by name) in the file. # Test this assumption model.load_weights(top_model_weights_path, by_name=True) # Compile model.compile(optimizer=optimizer, loss={'predictions_class': 'sparse_categorical_crossentropy', 'predictions_iou': 'mean_squared_error'}, metrics=['accuracy'], loss_weights={'predictions_class': 0.5, 'predictions_iou': 0.5}) #loss={'predictions_class': 'sparse_categorical_crossentropy', 'predictions_iou': 'logcosh'}, metrics=['accuracy'], logging.info('optimizer:{} learn_rate:{} decay:{} momentum:{} activation:{} dropout_rate:{}'.format( optimizer, learn_rate, decay, momentum, activation, dropout_rate)) return model
Example #26
Source File: train_multi_v3.py From DeepFashion with Apache License 2.0 | 4 votes |
def create_model_train(input_shape, optimizer='Adagrad', learn_rate=None, decay=0.0, momentum=0.0, activation='relu', dropout_rate=0.5): logging.debug('input_shape {}'.format(input_shape)) logging.debug('input_shape {}'.format(type(input_shape))) # Optimizer optimizer, learn_rate = get_optimizer(optimizer, learn_rate, decay, momentum) # input_shape = (7, 7, 512) # VGG bottleneck layer - block5_pool (MaxPooling2D) inputs = Input(shape=(input_shape)) x_common = Dense(256, activation='relu')(inputs) ## Model Classification x = Flatten()(x_common) #x = Dropout(dropout_rate)(x) predictions_class = Dense(len(class_names), activation='softmax', name='predictions_class')(x) ## Model (Regression) IOU score x = Flatten()(x_common) # x = Dense(256, activation='relu')(x) # x = Dropout(dropout_rate)(x) predictions_iou = Dense(1, activation='sigmoid', name='predictions_iou')(x) # This creates a model that includes the Input layer and three Dense layers model = Model(inputs=inputs, outputs=[predictions_class, predictions_iou]) logging.debug('model summary {}'.format(model.summary())) # Compile model.compile(optimizer=optimizer, loss={'predictions_class': 'sparse_categorical_crossentropy', 'predictions_iou': 'mean_squared_error'}, metrics=['accuracy'], loss_weights={'predictions_class': 0.5, 'predictions_iou': 0.5}) #loss={'predictions_class': 'sparse_categorical_crossentropy', 'predictions_iou': 'logcosh'}, metrics=['accuracy'], logging.info('optimizer:{} learn_rate:{} decay:{} momentum:{} activation:{} dropout_rate:{}'.format( optimizer, learn_rate, decay, momentum, activation, dropout_rate)) return model # INPUT: # Input Image (None, 224, 224, 3) [fed to VGG16] # OUTPUT: # Branch1 - Class Prediction # Branch2 - IOU Prediction # NOTE: Both models in create_model_train() and create_model_predict() should be exaclty same
Example #27
Source File: train_multi_v3.py From DeepFashion with Apache License 2.0 | 4 votes |
def get_optimizer(optimizer='Adagrad', lr=None, decay=0.0, momentum=0.0): if optimizer == 'SGD': if lr is None: lr = 0.01 optimizer_mod = keras.optimizers.SGD(lr=lr, momentum=momentum, decay=decay, nesterov=False) elif optimizer == 'RMSprop': if lr is None: lr = 0.001 optimizer_mod = keras.optimizers.RMSprop(lr=lr, rho=0.9, epsilon=1e-08, decay=decay) elif optimizer == 'Adagrad': if lr is None: lr = 0.01 optimizer_mod = keras.optimizers.Adagrad(lr=lr, epsilon=1e-08, decay=decay) elif optimizer == 'Adadelta': if lr is None: lr = 1.0 optimizer_mod = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0) elif optimizer == 'Adam': if lr is None: lr = 0.001 optimizer_mod = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) elif optimizer == 'Adamax': if lr is None: lr = 0.002 optimizer_mod = keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) elif optimizer == 'Nadam': if lr is None: lr = 0.002 optimizer_mod = keras.optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004) else: logging.error('Unknown optimizer {}'.format(optimizer)) exit(1) # logging.debug('lr {}'.format(lr)) # logging.debug('momentum {}'.format(momentum)) # logging.debug('decay {}'.format(decay)) # logging.debug('optimizer_mod {}'.format(optimizer_mod)) return optimizer_mod, lr # INPUT: # VGG16 - block5_pool (MaxPooling2D) (None, 7, 7, 512) # OUTPUT: # Branch1 - Class Prediction # Branch2 - IOU Prediction # NOTE: Both models in create_model_train() and create_model_predict() should be exaclty same
Example #28
Source File: train_multi_v4.py From DeepFashion with Apache License 2.0 | 4 votes |
def get_optimizer(optimizer='Adagrad', lr=None, decay=0.0, momentum=0.0): if optimizer == 'SGD': if lr is None: lr = 0.01 optimizer_mod = keras.optimizers.SGD(lr=lr, momentum=momentum, decay=decay, nesterov=False) elif optimizer == 'RMSprop': if lr is None: lr = 0.001 optimizer_mod = keras.optimizers.RMSprop(lr=lr, rho=0.9, epsilon=1e-08, decay=decay) elif optimizer == 'Adagrad': if lr is None: lr = 0.01 optimizer_mod = keras.optimizers.Adagrad(lr=lr, epsilon=1e-08, decay=decay) elif optimizer == 'Adadelta': if lr is None: lr = 1.0 optimizer_mod = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0) elif optimizer == 'Adam': if lr is None: lr = 0.001 optimizer_mod = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) elif optimizer == 'Adamax': if lr is None: lr = 0.002 optimizer_mod = keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) elif optimizer == 'Nadam': if lr is None: lr = 0.002 optimizer_mod = keras.optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004) else: logging.error('Unknown optimizer {}'.format(optimizer)) exit(1) # logging.debug('lr {}'.format(lr)) # logging.debug('momentum {}'.format(momentum)) # logging.debug('decay {}'.format(decay)) # logging.debug('optimizer_mod {}'.format(optimizer_mod)) return optimizer_mod, lr # INPUT: # VGG16 - block5_pool (MaxPooling2D) (None, 7, 7, 512) # OUTPUT: # Branch1 - Class Prediction # Branch2 - IOU Prediction # NOTE: Both models in create_model_train() and create_model_predict() should be exaclty same