Python keras.applications.resnet50.ResNet50() Examples
The following are 30
code examples of keras.applications.resnet50.ResNet50().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.applications.resnet50
, or try the search function
.
Example #1
Source File: cluster_resnet.py From lost with MIT License | 12 votes |
def main(self): self.logger.info('Will load keras model') model = ResNet50(weights='imagenet') self.logger.info('Keras model loaded') feature_list = [] img_path_list = [] for raw_file in self.inp.raw_files: media_path = raw_file.path file_list = os.listdir(media_path) total = float(len(file_list)) for index, img_file in enumerate(file_list): img_path = os.path.join(media_path, img_file) img_path_list.append(img_path) img = image.load_img(img_path, target_size=(224, 224)) x = keras_image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) # extract features scores = model.predict(x) sim_class = np.argmax(scores) print('Scores {}\nSimClass: {}'.format(scores, sim_class)) self.outp.request_annos(img_path, img_sim_class=sim_class) self.logger.info('Requested annotation for: {} (cluster: {})'.format(img_path, sim_class)) self.update_progress(index*100/total)
Example #2
Source File: resnet50.py From transfer with MIT License | 7 votes |
def get_resnet_model(img_dim): array_input = Input(shape=(img_dim, img_dim, 3)) resnet = ResNet50(include_top=False, weights='imagenet', input_tensor=array_input, pooling='avg') return resnet
Example #3
Source File: TransferLearning_reg.py From Intelligent-Projects-Using-Python with MIT License | 7 votes |
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'): model = ResNet50(weights='imagenet',include_top=False) x = model.output x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) out = Dense(1)(x) model_final = Model(input = model.input,outputs=out) if full_freeze != 'N': for layer in model.layers[0:freeze_layers]: layer.trainable = False return model_final # VGG16 Model for transfer Learning
Example #4
Source File: TransferLearning.py From Intelligent-Projects-Using-Python with MIT License | 7 votes |
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'): model = ResNet50(weights='imagenet',include_top=False) x = model.output x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) out = Dense(5,activation='softmax')(x) model_final = Model(input = model.input,outputs=out) if full_freeze != 'N': for layer in model.layers[0:freeze_layers]: layer.trainable = False return model_final # VGG16 Model for transfer Learning
Example #5
Source File: frcnn.py From maskrcnn with MIT License | 6 votes |
def _model_backbone_headless(self): if self.config.backbone_nn_type == 'vgg': model = VGG16(weights='imagenet', include_top=False) # 畳み込み層の後のプーリング層を除く # https://github.com/keras-team/keras/issues/2371 # https://github.com/keras-team/keras/issues/6229 # http://forums.fast.ai/t/how-to-finetune-with-new-keras-api/2328/9 model.layers.pop() else: model = ResNet50(weights='imagenet', include_top=False) # VGGの重みは学習対象外 for layer in model.layers: layer.trainable = False output = model.layers[-1].output _input = model.input return _input, output
Example #6
Source File: feature_detection.py From deeposlandia with MIT License | 6 votes |
def resnet(self): """Build the structure of a convolutional neural network from input image data to the last hidden layer on a similar manner than ResNet See: He, Zhang, Ren, Sun. Deep Residual Learning for Image Recognition. ArXiv technical report, 2015. Returns ------- tensor (batch_size, nb_labels)-shaped output predictions, that have to be compared with ground-truth values """ resnet_model = resnet50.ResNet50( include_top=False, input_tensor=self.X ) y = self.flatten(resnet_model.output) return self.output_layer(y, depth=self.nb_labels)
Example #7
Source File: baseline_age.py From face_age_gender with MIT License | 6 votes |
def get_model(n_classes=1): base_model = ResNet50(weights='imagenet', include_top=False) #for layer in base_model.layers: # layer.trainable = False x = base_model.output x = GlobalMaxPooling2D()(x) x = Dropout(0.5)(x) x = Dense(100, activation="relu")(x) x = Dropout(0.5)(x) if n_classes == 1: x = Dense(n_classes, activation="sigmoid")(x) else: x = Dense(n_classes, activation="softmax")(x) base_model = Model(base_model.input, x, name="base_model") if n_classes == 1: base_model.compile(loss="binary_crossentropy", metrics=['acc'], optimizer="adam") else: base_model.compile(loss="sparse_categorical_crossentropy", metrics=['acc'], optimizer="adam") return base_model
Example #8
Source File: baseline_gender.py From face_age_gender with MIT License | 6 votes |
def get_model(n_classes=1): base_model = ResNet50(weights='imagenet', include_top=False) #for layer in base_model.layers: # layer.trainable = False x = base_model.output x = GlobalMaxPooling2D()(x) x = Dropout(0.5)(x) x = Dense(100, activation="relu")(x) x = Dropout(0.5)(x) if n_classes == 1: x = Dense(n_classes, activation="sigmoid")(x) else: x = Dense(n_classes, activation="softmax")(x) base_model = Model(base_model.input, x, name="base_model") if n_classes == 1: base_model.compile(loss="binary_crossentropy", metrics=['acc'], optimizer="adam") else: base_model.compile(loss="sparse_categorical_crossentropy", metrics=['acc'], optimizer="adam") return base_model
Example #9
Source File: cnn_models.py From SmooFaceEngine with Apache License 2.0 | 6 votes |
def ResNet50(input_shape, num_classes): # wrap ResNet50 from keras, because ResNet50 is so deep. from keras.applications.resnet50 import ResNet50 input_tensor = Input(shape=input_shape, name="input") x = ResNet50(include_top=False, weights=None, input_tensor=input_tensor, input_shape=None, pooling="avg", classes=num_classes) x = Dense(units=2048, name="feature")(x.output) return Model(inputs=input_tensor, outputs=x) # implement ResNet's block. # I implement two classes block: # one is basic block, the other is bottleneck block.
Example #10
Source File: TransferLearning_ffd.py From Intelligent-Projects-Using-Python with MIT License | 6 votes |
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'): model = ResNet50(weights='imagenet',include_top=False) x = model.output x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) out = Dense(5,activation='softmax')(x) model_final = Model(input = model.input,outputs=out) if full_freeze != 'N': for layer in model.layers[0:freeze_layers]: layer.trainable = False return model_final # VGG16 Model for transfer Learning
Example #11
Source File: TransferLearning_reg.py From Intelligent-Projects-Using-Python with MIT License | 6 votes |
def inception_pseudo(self,dim=224,freeze_layers=30,full_freeze='N'): model = InceptionV3(weights='imagenet',include_top=False) x = model.output x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) out = Dense(1)(x) model_final = Model(input = model.input,outputs=out) if full_freeze != 'N': for layer in model.layers[0:freeze_layers]: layer.trainable = False return model_final # ResNet50 Model for transfer Learning
Example #12
Source File: TransferLearning.py From Intelligent-Projects-Using-Python with MIT License | 6 votes |
def inception_pseudo(self,dim=224,freeze_layers=30,full_freeze='N'): model = InceptionV3(weights='imagenet',include_top=False) x = model.output x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) out = Dense(5,activation='softmax')(x) model_final = Model(input = model.input,outputs=out) if full_freeze != 'N': for layer in model.layers[0:freeze_layers]: layer.trainable = False return model_final # ResNet50 Model for transfer Learning
Example #13
Source File: neuralnets.py From EmoPy with GNU Affero General Public License v3.0 | 6 votes |
def _get_base_model(self): """ :return: base model from Keras based on user-supplied model name """ if self.model_name == 'inception_v3': return InceptionV3(weights='imagenet', include_top=False) elif self.model_name == 'xception': return Xception(weights='imagenet', include_top=False) elif self.model_name == 'vgg16': return VGG16(weights='imagenet', include_top=False) elif self.model_name == 'vgg19': return VGG19(weights='imagenet', include_top=False) elif self.model_name == 'resnet50': return ResNet50(weights='imagenet', include_top=False) else: raise ValueError('Cannot find base model %s' % self.model_name)
Example #14
Source File: keras_applications.py From spark-deep-learning with Apache License 2.0 | 6 votes |
def _imagenet_preprocess_input(x, input_shape): """ For ResNet50, VGG models. For InceptionV3 and Xception it's okay to use the keras version (e.g. InceptionV3.preprocess_input) as the code path they hit works okay with tf.Tensor inputs. The following was translated to tf ops from https://github.com/fchollet/keras/blob/fb4a0849cf4dc2965af86510f02ec46abab1a6a4/keras/applications/imagenet_utils.py#L52 It's a possibility to change the implementation in keras to look like the following and modified to work with BGR images (standard in Spark), but not doing it for now. """ # assuming 'BGR' # Zero-center by mean pixel mean = np.ones(input_shape + (3,), dtype=np.float32) mean[..., 0] = 103.939 mean[..., 1] = 116.779 mean[..., 2] = 123.68 return x - mean
Example #15
Source File: fcn.py From lunania-ai with MIT License | 6 votes |
def createResNetModel(): input_shape = (config.img_height, config.img_width, 3) input_img = Input(shape=input_shape, name='input_1') model = ResNet50(include_top=False, input_tensor=input_img) x = model.layers[-2].output # FC layer x = Cropping2D(cropping=((0, 1), (0, 1)), name='cropping2d_2')(x) h_grid = int(round(config.img_height / 100)) w_grid = int(round(config.img_width / 100)) x = Conv2DTranspose(config.classes, (64, 64), #output_shape=(None, h_grid * 102, w_grid * 102, config.classes), strides=(34, 34), padding='same', name='deconvolution2d_1')(x) x = Cropping2D(cropping=((h_grid, h_grid), (w_grid, w_grid)), name='cropping2d_1')(x) x = Reshape((config.img_width * config.img_height, config.classes), name='reshape_1')(x) out = Activation("softmax", name='activation_fc')(x) model = Model(model.layers[0].input, out) return model
Example #16
Source File: cnn_architecture.py From Pix2Depth with GNU General Public License v3.0 | 6 votes |
def model_3(): input_layer = Input(shape=(224,224,3)) from keras.layers import Conv2DTranspose as DeConv resnet = ResNet50(include_top=False, weights="imagenet") resnet.trainable = False res_features = resnet(input_layer) conv = DeConv(1024, padding="valid", activation="relu", kernel_size=3)(res_features) conv = UpSampling2D((2,2))(conv) conv = DeConv(512, padding="valid", activation="relu", kernel_size=5)(conv) conv = UpSampling2D((2,2))(conv) conv = DeConv(128, padding="valid", activation="relu", kernel_size=5)(conv) conv = UpSampling2D((2,2))(conv) conv = DeConv(32, padding="valid", activation="relu", kernel_size=5)(conv) conv = UpSampling2D((2,2))(conv) conv = DeConv(8, padding="valid", activation="relu", kernel_size=5)(conv) conv = UpSampling2D((2,2))(conv) conv = DeConv(4, padding="valid", activation="relu", kernel_size=5)(conv) conv = DeConv(1, padding="valid", activation="sigmoid", kernel_size=5)(conv) model = Model(inputs=input_layer, outputs=conv) return model
Example #17
Source File: regnet.py From GaneratedHandsForReal_TIME with MIT License | 5 votes |
def __init__(self, input_shape, heatmap_shape): self.min_loss = [10000.0, 10000., 100000., 100000., 100000., 100000., 100000.] self.heatmap_shape=input_shape input_layer = Input(input_shape) resnet = resnet50.ResNet50(input_tensor=input_layer, weights='imagenet', include_top=False) conv = RegNet.make_conv(resnet.output) flat = Flatten()(conv) fc_joints3d_1_before_proj = Dense(200, name='fc_joints3d_1_before_proj')(flat) joints3d_prediction_before_proj = Dense(63, name='joints3d_prediction_before_proj')(fc_joints3d_1_before_proj) reshape_joints3D_before_proj = Reshape((21,1,3), name='reshape_joints3D_before_proj')(joints3d_prediction_before_proj) temp = Reshape((21,3))(reshape_joints3D_before_proj) projLayer = ProjLayer(heatmap_shape)(temp) heatmaps_pred3D = RenderingLayer(heatmap_shape, coeff=1, name='heatmaps_pred3D')(projLayer) heatmaps_pred3D_reshape = ReshapeChannelToLast(heatmap_shape)(heatmaps_pred3D) conv_rendered_2 = Conv2D(filters=64, kernel_size=3, strides=2, padding='same', activation='relu')(heatmaps_pred3D_reshape) conv_rendered_3 = Conv2D(filters=128, kernel_size=3, strides=2, padding='same', activation='relu')(conv_rendered_2) concat_pred_rendered = concatenate([conv, conv_rendered_3]) conv_rendered_4 = Conv2D(filters=256, kernel_size=3, strides=1, padding='same', activation='relu')(concat_pred_rendered) heatmap_prefinal_small = Conv2D(filters=64, kernel_size=3,strides=1,padding='same')(conv_rendered_4) heatmap_prefinal = Deconv2D(filters=21, kernel_size=4, strides=2, padding='same', name='heatmap_prefinal')(heatmap_prefinal_small) heatmap_final = Deconv2D(filters=21, kernel_size=4, strides=2, padding='same', name='heatmap_final')(heatmap_prefinal) flat = Flatten()(conv_rendered_4) fc_joints3D_1_final = Dense(200, name='fc_joints3D_1_final')(flat) joints3D_final = Dense(63, name='joints3D_prediction_final')(fc_joints3D_1_final) joints3D_final_vec = Reshape((21,1,3), name='joint3d_final')(joints3D_final) self.model = Model(inputs=input_layer, output=[reshape_joints3D_before_proj, joints3D_final_vec, heatmap_final]) # self.model = Model(inputs=input_layer, output=projLayer) self.model.summary()
Example #18
Source File: ResNet_CAM.py From ResNetCAM-keras with MIT License | 5 votes |
def get_ResNet(): # define ResNet50 model model = ResNet50(weights='imagenet') # get AMP layer weights all_amp_layer_weights = model.layers[-1].get_weights()[0] # extract wanted output ResNet_model = Model(inputs=model.input, outputs=(model.layers[-4].output, model.layers[-1].output)) return ResNet_model, all_amp_layer_weights
Example #19
Source File: baseline.py From kinship_prediction with MIT License | 5 votes |
def baseline_model(): input_1 = Input(shape=(224, 224, 3)) input_2 = Input(shape=(224, 224, 3)) base_model = ResNet50(weights='imagenet', include_top=False) for x in base_model.layers[:-3]: x.trainable = True x1 = base_model(input_1) x2 = base_model(input_2) # x1_ = Reshape(target_shape=(7*7, 2048))(x1) # x2_ = Reshape(target_shape=(7*7, 2048))(x2) # # x_dot = Dot(axes=[2, 2], normalize=True)([x1_, x2_]) # x_dot = Flatten()(x_dot) x1 = Concatenate(axis=-1)([GlobalMaxPool2D()(x1), GlobalAvgPool2D()(x1)]) x2 = Concatenate(axis=-1)([GlobalMaxPool2D()(x2), GlobalAvgPool2D()(x2)]) x3 = Subtract()([x1, x2]) x3 = Multiply()([x3, x3]) x = Multiply()([x1, x2]) x = Concatenate(axis=-1)([x, x3]) x = Dense(100, activation="relu")(x) x = Dropout(0.01)(x) out = Dense(1, activation="sigmoid")(x) model = Model([input_1, input_2], out) model.compile(loss="binary_crossentropy", metrics=['acc'], optimizer=Adam(0.00001)) model.summary() return model
Example #20
Source File: resnet.py From Elphas with Apache License 2.0 | 5 votes |
def load_model(): global model model = ResNet50(weights="imagenet") global graph graph = tf.get_default_graph()
Example #21
Source File: 05_print_resnet.py From Practical-Computer-Vision with MIT License | 5 votes |
def get_model(): """ Loads Resnet and prints model structure """ # create model model = ResNet50(weights='imagenet') # To print our model loaded model.summary() return model
Example #22
Source File: custom_layers.py From vehicle-ReId with Apache License 2.0 | 5 votes |
def resnet50_model(input_shape): model = resnet50.ResNet50(weights=None,include_top=False,input_shape=input_shape) x = Flatten()(model.output) baseModel = Model(inputs=model.input,outputs=x) #for layer in baseModel.layers: # layer.trainable = False return baseModel
Example #23
Source File: models.py From ICIAR2018 with MIT License | 5 votes |
def __init__(self, batch_size=32): self.model = ResNet50(include_top=False, weights='imagenet', pooling="avg") self.batch_size = batch_size self.data_format = K.image_data_format()
Example #24
Source File: cats_and_dogs.py From uncertainty-adversarial-paper with MIT License | 5 votes |
def define_model_resnet(): K.set_learning_phase(True) rn50 = ResNet50(weights='imagenet', include_top='False') a = Dropout(rate=0.5)(rn50.output) a = Dense(2, activation='softmax')(a) model = keras.models.Model(inputs=rn50.input, outputs=a) # freeze resnet layers for layer in rn50.layers: layer.trainable = False return model
Example #25
Source File: resnet.py From crnn-lid with GNU General Public License v3.0 | 5 votes |
def create_model(input_shape, config): input_tensor = Input(shape=input_shape) # this assumes K.image_dim_ordering() == 'tf' resnet_model = ResNet50(include_top=False, weights=None, input_tensor=input_tensor) print(resnet_model.summary()) x = resnet_model.output x = GlobalAveragePooling2D()(x) predictions = Dense(config["num_classes"], activation='softmax')(x) return Model(input=resnet_model.input, output=predictions)
Example #26
Source File: test_keras_applications.py From keras-onnx with MIT License | 5 votes |
def test_ResNet50(self): from keras.applications.resnet50 import ResNet50 model = ResNet50(include_top=True, weights='imagenet') res = run_image(model, self.model_files, img_path) self.assertTrue(*res)
Example #27
Source File: pspnet.py From eye-in-the-sky with Apache License 2.0 | 5 votes |
def PSPNet(n_classes = 3, input_shape = (128, 128, 4)): # Input to the model inputs = Input(input_shape) '''in_shape = inputs.shape out_shape = (in_shape[1], in_shape[2], 3)''' # Converting 4 channel input to a 3 channel map using Encoder-Decoder network # to give it as a input to ResNet50 with pretrained weights res_input = encoder_decoder(inputs) res_input_shape = K.int_shape(res_input) res_input_shape = (res_input_shape[1],res_input_shape[2],res_input_shape[3]) # Passing the 3 channel map into ResNet50 followed by 2 upsampling layers # to get a output of shape exactly 1/8th of the input map shape res = resnet(res_input, input_shape = res_input_shape) # Pyramid Pooling Module ppmodule_out = pyramid_pooling_module(res) # Final Conv layers and output x = Conv2D(512, 3, activation = 'relu', padding='same')(ppmodule_out) x = BatchNormalization()(x) x = Dropout(0.5)(x) x = Conv2D(n_classes, 1)(x) #x = interpolation(x, shape = (input_shape[0], input_shape[1])) x = Lambda(interpolation, arguments={'shape': (input_shape[0], input_shape[1])})(x) out = Activation('softmax')(x) model = Model(inputs = inputs, outputs = out) adam = Adam(lr = 0.00001) model.compile(optimizer = adam, loss = 'categorical_crossentropy', metrics = ['accuracy']) model.summary() return model
Example #28
Source File: resnet50.py From keras-transfer-learning-for-oxford102 with MIT License | 5 votes |
def _create(self): base_model = KerasResNet50(include_top=False, input_tensor=self.get_input_tensor()) self.make_net_layers_non_trainable(base_model) x = base_model.output x = Flatten()(x) x = Dropout(0.5)(x) # we could achieve almost the same accuracy without this layer, buy this one helps later # for novelty detection part and brings much more useful features. x = Dense(self.noveltyDetectionLayerSize, activation='elu', name=self.noveltyDetectionLayerName)(x) x = Dropout(0.5)(x) predictions = Dense(len(config.classes), activation='softmax', name='predictions')(x) self.model = Model(input=base_model.input, output=predictions)
Example #29
Source File: pspnet.py From eye-in-the-sky with Apache License 2.0 | 5 votes |
def resnet(x, input_shape): # Decreases the dimensions of the input image by a factor of 32 x = ResNet50(include_top=False, weights=None, input_tensor=x, input_shape=(512,512,3)).output # Upsampling by 2 x = UpSampling2D(size = (2,2))(x) ##x = BatchNormalization()(x) # Again Upsampling by 2 so that we get an output feature map of size 1/8th of the initial image x = UpSampling2D(size = (2,2))(x) ##res = BatchNormalization()(x) x = UpSampling2D(size = (2,2))(x) return x
Example #30
Source File: keras_applications.py From spark-deep-learning with Apache License 2.0 | 5 votes |
def model(self, preprocessed, featurize): # Model provided by Keras. All cotributions by Keras are provided subject to the # MIT license located at https://github.com/fchollet/keras/blob/master/LICENSE # and subject to the below additional copyrights and licenses. # # The MIT License (MIT) # # Copyright (c) 2016 Shaoqing Ren # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. return resnet50.ResNet50(input_tensor=preprocessed, weights="imagenet", include_top=(not featurize))