Python keras.applications.inception_v3.InceptionV3() Examples

The following are 30 code examples of keras.applications.inception_v3.InceptionV3(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.applications.inception_v3 , or try the search function .
Example #1
Source File: model.py    From Image-Caption-Generator with MIT License 11 votes vote down vote up
def RNNModel(vocab_size, max_len, rnnConfig, model_type):
	embedding_size = rnnConfig['embedding_size']
	if model_type == 'inceptionv3':
		# InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(2048,))
	elif model_type == 'vgg16':
		# VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(4096,))
	image_model_1 = Dropout(rnnConfig['dropout'])(image_input)
	image_model = Dense(embedding_size, activation='relu')(image_model_1)

	caption_input = Input(shape=(max_len,))
	# mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency.
	caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input)
	caption_model_2 = Dropout(rnnConfig['dropout'])(caption_model_1)
	caption_model = LSTM(rnnConfig['LSTM_units'])(caption_model_2)

	# Merging the models and creating a softmax classifier
	final_model_1 = concatenate([image_model, caption_model])
	final_model_2 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_1)
	final_model = Dense(vocab_size, activation='softmax')(final_model_2)

	model = Model(inputs=[image_input, caption_input], outputs=final_model)
	model.compile(loss='categorical_crossentropy', optimizer='adam')
	return model 
Example #2
Source File: retrain.py    From 3d-dl with MIT License 6 votes vote down vote up
def unfreeze(self,layers):
        """
        unfreeze a specified number of InceptionV3 layers ard recompile model
        """
        inception_layers = 311
        slice = inception_layers-layers

        for layer in self.model.layers[:slice]:
           layer.trainable = False
        for layer in self.model.layers[slice:]:
           layer.trainable = True

        self.model.compile(optimizer=SGD(lr=self.lr, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])

    # train a model from scratch given a set of training parameters
    # choose whether to save the model 
Example #3
Source File: inceptionV3.py    From Deep-Learning-Quick-Reference with MIT License 6 votes vote down vote up
def build_model_feature_extraction():
    # create the base pre-trained model
    base_model = InceptionV3(weights='imagenet', include_top=False)

    # add a global spatial average pooling layer
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    # let's add a fully-connected layer
    x = Dense(1024, activation='relu')(x)
    # and a logistic layer
    predictions = Dense(1, activation='sigmoid')(x)

    # this is the model we will train
    model = Model(inputs=base_model.input, outputs=predictions)

    # first: train only the top layers (which were randomly initialized)
    # i.e. freeze all convolutional InceptionV3 layers
    for layer in base_model.layers:
        layer.trainable = False

    # compile the model (should be done *after* setting layers to non-trainable)
    model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
    return model 
Example #4
Source File: spatial_validate_model.py    From two-stream-action-recognition-keras with MIT License 6 votes vote down vote up
def cnn_spatial(self, weights='imagenet'):
        # create the base pre-trained model
        base_model = InceptionV3(weights=weights, include_top=False)
    
        # add a global spatial average pooling layer
        x = base_model.output
        x = GlobalAveragePooling2D()(x)
        # let's add a fully-connected layer
        x = Dense(1024, activation='relu')(x)
        # and a logistic layer
        predictions = Dense(self.nb_classes, activation='softmax')(x)
    
        # this is the model we will train
        model = Model(inputs=base_model.input, outputs=predictions)

        return model 
Example #5
Source File: neuralnets.py    From EmoPy with GNU Affero General Public License v3.0 6 votes vote down vote up
def _get_base_model(self):
        """
        :return: base model from Keras based on user-supplied model name
        """
        if self.model_name == 'inception_v3':
            return InceptionV3(weights='imagenet', include_top=False)
        elif self.model_name == 'xception':
            return Xception(weights='imagenet', include_top=False)
        elif self.model_name == 'vgg16':
            return VGG16(weights='imagenet', include_top=False)
        elif self.model_name == 'vgg19':
            return VGG19(weights='imagenet', include_top=False)
        elif self.model_name == 'resnet50':
            return ResNet50(weights='imagenet', include_top=False)
        else:
            raise ValueError('Cannot find base model %s' % self.model_name) 
Example #6
Source File: feature_detection.py    From deeposlandia with MIT License 6 votes vote down vote up
def inception(self):
        """Build the structure of a convolutional neural network from input
        image data to the last hidden layer on the model of a similar manner
        than Inception-V4

        See: Szegedy, Vanhoucke, Ioffe, Shlens. Rethinking the Inception
        Architecture for Computer Vision. ArXiv technical report, 2015.

        Returns
        -------
        tensor
            (batch_size, nb_labels)-shaped output predictions, that have to be
        compared with ground-truth values

        """
        inception_model = inception_v3.InceptionV3(
            input_tensor=self.X, include_top=False
        )
        y = K.layers.GlobalAveragePooling2D()(inception_model.output)
        return self.output_layer(y, depth=self.nb_labels) 
Example #7
Source File: models.py    From keras-image-captioning with MIT License 6 votes vote down vote up
def _build_image_embedding(self):
        image_model = InceptionV3(include_top=False, weights='imagenet',
                                  pooling='avg')
        for layer in image_model.layers:
            layer.trainable = False

        dense_input = BatchNormalization(axis=-1)(image_model.output)
        image_dense = Dense(units=self._embedding_size,
                            kernel_regularizer=self._regularizer,
                            kernel_initializer=self._initializer
                            )(dense_input)
        # Add timestep dimension
        image_embedding = RepeatVector(1)(image_dense)

        image_input = image_model.input
        return image_input, image_embedding 
Example #8
Source File: TransferLearning_ffd.py    From Intelligent-Projects-Using-Python with MIT License 6 votes vote down vote up
def inception_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'):
        model = InceptionV3(weights='imagenet',include_top=False)
        x = model.output
        x = GlobalAveragePooling2D()(x)
        x = Dense(512, activation='relu')(x)
        x = Dropout(0.5)(x)
        x = Dense(512, activation='relu')(x)
        x = Dropout(0.5)(x)
        out = Dense(5,activation='softmax')(x)
        model_final = Model(input = model.input,outputs=out)
        if full_freeze != 'N':
            for layer in model.layers[0:freeze_layers]:
                layer.trainable = False
        return model_final

# ResNet50 Model for transfer Learning 
Example #9
Source File: TransferLearning_reg.py    From Intelligent-Projects-Using-Python with MIT License 6 votes vote down vote up
def inception_pseudo(self,dim=224,freeze_layers=30,full_freeze='N'):
		model = InceptionV3(weights='imagenet',include_top=False)
		x = model.output
		x = GlobalAveragePooling2D()(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		out = Dense(1)(x)
		model_final = Model(input = model.input,outputs=out)
		if full_freeze != 'N':
			for layer in model.layers[0:freeze_layers]:
				layer.trainable = False
		return model_final

	# ResNet50 Model for transfer Learning 
Example #10
Source File: TransferLearning.py    From Intelligent-Projects-Using-Python with MIT License 6 votes vote down vote up
def inception_pseudo(self,dim=224,freeze_layers=30,full_freeze='N'):
		model = InceptionV3(weights='imagenet',include_top=False)
		x = model.output
		x = GlobalAveragePooling2D()(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		out = Dense(5,activation='softmax')(x)
		model_final = Model(input = model.input,outputs=out)
		if full_freeze != 'N':
			for layer in model.layers[0:freeze_layers]:
				layer.trainable = False
		return model_final

	# ResNet50 Model for transfer Learning 
Example #11
Source File: TransferLearning.py    From Intelligent-Projects-Using-Python with MIT License 6 votes vote down vote up
def __init__(self):
		parser = argparse.ArgumentParser(description='Process the inputs')
		parser.add_argument('--path',help='image directory')
		parser.add_argument('--class_folders',help='class images folder names')
		parser.add_argument('--dim',type=int,help='Image dimensions to process')
		parser.add_argument('--lr',type=float,help='learning rate',default=1e-4)
		parser.add_argument('--batch_size',type=int,help='batch size')
		parser.add_argument('--epochs',type=int,help='no of epochs to train')
		parser.add_argument('--initial_layers_to_freeze',type=int,help='the initial layers to freeze')
		parser.add_argument('--model',help='Standard Model to load',default='InceptionV3')
		parser.add_argument('--folds',type=int,help='num of cross validation folds',default=5)
		parser.add_argument('--outdir',help='output directory')
		
		
		args = parser.parse_args()
		self.path = args.path
		self.class_folders = json.loads(args.class_folders)
		self.dim  = int(args.dim)
		self.lr   = float(args.lr)
		self.batch_size = int(args.batch_size)
		self.epochs =  int(args.epochs)
		self.initial_layers_to_freeze = int(args.initial_layers_to_freeze)
		self.model = args.model
		self.folds = int(args.folds)
		self.outdir = args.outdir 
Example #12
Source File: keras_applications.py    From spark-deep-learning with Apache License 2.0 6 votes vote down vote up
def model(self, preprocessed, featurize):
        # Model provided by Keras. All cotributions by Keras are provided subject to the
        # MIT license located at https://github.com/fchollet/keras/blob/master/LICENSE
        # and subject to the below additional copyrights and licenses.
        #
        # Copyright 2016 The TensorFlow Authors.  All rights reserved.
        #
        # Licensed under the Apache License, Version 2.0 (the "License");
        # you may not use this file except in compliance with the License.
        # You may obtain a copy of the License at
        #
        # http://www.apache.org/licenses/LICENSE-2.0
        #
        # Unless required by applicable law or agreed to in writing, software
        # distributed under the License is distributed on an "AS IS" BASIS,
        # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
        # See the License for the specific language governing permissions and
        # limitations under the License.
        """
        From Keras: These weights are released under the Apache License 2.0.
        """
        return inception_v3.InceptionV3(input_tensor=preprocessed, weights="imagenet",
                                        include_top=(not featurize)) 
Example #13
Source File: keras_applications.py    From spark-deep-learning with Apache License 2.0 6 votes vote down vote up
def _imagenet_preprocess_input(x, input_shape):
    """
    For ResNet50, VGG models. For InceptionV3 and Xception it's okay to use the
    keras version (e.g. InceptionV3.preprocess_input) as the code path they hit
    works okay with tf.Tensor inputs. The following was translated to tf ops from
    https://github.com/fchollet/keras/blob/fb4a0849cf4dc2965af86510f02ec46abab1a6a4/keras/applications/imagenet_utils.py#L52
    It's a possibility to change the implementation in keras to look like the
    following and modified to work with BGR images (standard in Spark), but not doing it for now.
    """
    # assuming 'BGR'
    # Zero-center by mean pixel
    mean = np.ones(input_shape + (3,), dtype=np.float32)
    mean[..., 0] = 103.939
    mean[..., 1] = 116.779
    mean[..., 2] = 123.68
    return x - mean 
Example #14
Source File: train_cnn.py    From five-video-classification-methods with MIT License 5 votes vote down vote up
def freeze_all_but_top(model):
    """Used to train just the top layers of the model."""
    # first: train only the top layers (which were randomly initialized)
    # i.e. freeze all convolutional InceptionV3 layers
    for layer in model.layers[:-2]:
        layer.trainable = False

    # compile the model (should be done *after* setting layers to non-trainable)
    model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])

    return model 
Example #15
Source File: model.py    From Image-Caption-Generator with MIT License 5 votes vote down vote up
def CNNModel(model_type):
	if model_type == 'inceptionv3':
		model = InceptionV3()
	elif model_type == 'vgg16':
		model = VGG16()
	model.layers.pop()
	model = Model(inputs=model.inputs, outputs=model.layers[-1].output)
	return model 
Example #16
Source File: model.py    From Image-Caption-Generator with MIT License 5 votes vote down vote up
def AlternativeRNNModel(vocab_size, max_len, rnnConfig, model_type):
	embedding_size = rnnConfig['embedding_size']
	if model_type == 'inceptionv3':
		# InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(2048,))
	elif model_type == 'vgg16':
		# VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(4096,))
	image_model_1 = Dense(embedding_size, activation='relu')(image_input)
	image_model = RepeatVector(max_len)(image_model_1)

	caption_input = Input(shape=(max_len,))
	# mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency.
	caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input)
	# Since we are going to predict the next word using the previous words
	# (length of previous words changes with every iteration over the caption), we have to set return_sequences = True.
	caption_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=True)(caption_model_1)
	# caption_model = TimeDistributed(Dense(embedding_size, activation='relu'))(caption_model_2)
	caption_model = TimeDistributed(Dense(embedding_size))(caption_model_2)

	# Merging the models and creating a softmax classifier
	final_model_1 = concatenate([image_model, caption_model])
	# final_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=False)(final_model_1)
	final_model_2 = Bidirectional(LSTM(rnnConfig['LSTM_units'], return_sequences=False))(final_model_1)
	# final_model_3 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_2)
	# final_model = Dense(vocab_size, activation='softmax')(final_model_3)
	final_model = Dense(vocab_size, activation='softmax')(final_model_2)

	model = Model(inputs=[image_input, caption_input], outputs=final_model)
	model.compile(loss='categorical_crossentropy', optimizer='adam')
	# model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
	return model 
Example #17
Source File: 05_print_inceptionv3.py    From Practical-Computer-Vision with MIT License 5 votes vote down vote up
def print_model():
    """
    Loads Inceptionv3 and prints model structure
    """
    
    # create model 
    model = InceptionV3(weights='imagenet')

    # prints our model created
    model.summary() 
Example #18
Source File: featureExtractor.py    From glyphreader with MIT License 5 votes vote down vote up
def __init__(self):
        print("loading DeepNet (Inception-V3) ...")
        self.model = InceptionV3(weights='imagenet')
        
        # Initialise the model to output the second to last layer, which contains the deeplearning featuers  
        self.model.layers.pop() # Get rid of the classification layer
        self.model.outputs = [self.model.layers[-1].output]
        self.model.layers[-1].outbound_nodes = [] 
Example #19
Source File: spatial_train_model.py    From two-stream-action-recognition-keras with MIT License 5 votes vote down vote up
def freeze_all_but_top(model):
    """Used to train just the top layers of the model."""
    # first: train only the top layers (which were randomly initialized)
    # i.e. freeze all convolutional InceptionV3 layers
    for layer in model.layers[:-2]:
        layer.trainable = False

    # compile the model (should be done *after* setting layers to non-trainable)
    model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])

    return model 
Example #20
Source File: spatial_train_model.py    From two-stream-action-recognition-keras with MIT License 5 votes vote down vote up
def get_model(data, weights='imagenet'):
    # create the base pre-trained model
    base_model = InceptionV3(weights=weights, include_top=False)

    # add a global spatial average pooling layer
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    # let's add a fully-connected layer
    x = Dense(1024, activation='relu')(x)
    # and a logistic layer
    predictions = Dense(len(data.classes), activation='softmax')(x)

    # this is the model we will train
    model = Model(inputs=base_model.input, outputs=predictions)
    return model 
Example #21
Source File: fuse_validate_model.py    From two-stream-action-recognition-keras with MIT License 5 votes vote down vote up
def cnn_spatial(self):
        base_model = InceptionV3(weights='imagenet', include_top=False)
    
        # add a global spatial average pooling layer
        x = base_model.output
        x = GlobalAveragePooling2D()(x)
        # let's add a fully-connected layer
        x = Dense(1024, activation='relu')(x)
        # and a logistic layer
        predictions = Dense(self.nb_classes, activation='softmax')(x)
    
        model = Model(inputs=base_model.input, outputs=predictions)
        return model

    # CNN model for the temporal stream 
Example #22
Source File: keras_applications.py    From spark-deep-learning with Apache License 2.0 5 votes vote down vote up
def _testKerasModel(self, include_top):
        return inception_v3.InceptionV3(weights="imagenet", include_top=include_top) 
Example #23
Source File: models.py    From pretrained.ml with MIT License 5 votes vote down vote up
def __init__(self):
        logger.info('Loading Inception V3')
        self.model = InceptionV3(weights='imagenet') 
Example #24
Source File: models.py    From ICIAR2018 with MIT License 5 votes vote down vote up
def __init__(self, batch_size=32):
        self.model = InceptionV3(include_top=False, weights="imagenet", pooling="avg")
        self.batch_size = batch_size
        self.data_format = K.image_data_format() 
Example #25
Source File: TransferLearning_reg.py    From Intelligent-Projects-Using-Python with MIT License 5 votes vote down vote up
def __init__(self):
		parser = argparse.ArgumentParser(description='Process the inputs')
		parser.add_argument('--path',help='image directory')
		parser.add_argument('--class_folders',help='class images folder names')
		parser.add_argument('--dim',type=int,help='Image dimensions to process')
		parser.add_argument('--lr',type=float,help='learning rate',default=1e-4)
		parser.add_argument('--batch_size',type=int,help='batch size')
		parser.add_argument('--epochs',type=int,help='no of epochs to train')
		parser.add_argument('--initial_layers_to_freeze',type=int,help='the initial layers to freeze')
		parser.add_argument('--model',help='Standard Model to load',default='InceptionV3')
		parser.add_argument('--folds',type=int,help='num of cross validation folds',default=5)
		parser.add_argument('--mode',help='train or validation',default='train')
		parser.add_argument('--model_save_dest',help='dict wit model paths')
		parser.add_argument('--outdir',help='output directory')
		
		args = parser.parse_args()
		self.path = args.path
		self.class_folders = json.loads(args.class_folders)
		self.dim  = int(args.dim)
		self.lr   = float(args.lr)
		self.batch_size = int(args.batch_size)
		self.epochs =  int(args.epochs)
		self.initial_layers_to_freeze = int(args.initial_layers_to_freeze)
		self.model = args.model
		self.folds = int(args.folds)
		self.mode = args.mode
		self.model_save_dest = args.model_save_dest
		self.outdir = args.outdir 
Example #26
Source File: train_cnn.py    From five-video-classification-methods with MIT License 5 votes vote down vote up
def get_model(weights='imagenet'):
    # create the base pre-trained model
    base_model = InceptionV3(weights=weights, include_top=False)

    # add a global spatial average pooling layer
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    # let's add a fully-connected layer
    x = Dense(1024, activation='relu')(x)
    # and a logistic layer
    predictions = Dense(len(data.classes), activation='softmax')(x)

    # this is the model we will train
    model = Model(inputs=base_model.input, outputs=predictions)
    return model 
Example #27
Source File: extractor.py    From five-video-classification-methods with MIT License 5 votes vote down vote up
def __init__(self, weights=None):
        """Either load pretrained from imagenet, or load our saved
        weights from our own training."""

        self.weights = weights  # so we can check elsewhere which model

        if weights is None:
            # Get model with pretrained weights.
            base_model = InceptionV3(
                weights='imagenet',
                include_top=True
            )

            # We'll extract features at the final pool layer.
            self.model = Model(
                inputs=base_model.input,
                outputs=base_model.get_layer('avg_pool').output
            )

        else:
            # Load the model first.
            self.model = load_model(weights)

            # Then remove the top so we get features not predictions.
            # From: https://github.com/fchollet/keras/issues/2371
            self.model.layers.pop()
            self.model.layers.pop()  # two pops to get to pool layer
            self.model.outputs = [self.model.layers[-1].output]
            self.model.output_layers = [self.model.layers[-1]]
            self.model.layers[-1].outbound_nodes = [] 
Example #28
Source File: inceptionv3_crnn.py    From crnn-lid with GNU General Public License v3.0 5 votes vote down vote up
def create_model(input_shape, config):


    input_tensor = Input(shape=input_shape)  # this assumes K.image_dim_ordering() == 'tf'
    inception_model = InceptionV3(include_top=False, weights=None, input_tensor=input_tensor)
    # inception_model.load_weights("logs/2016-12-18-13-56-44/weights.21.model", by_name=True)

    for layer in inception_model.layers:
        layer.trainable = False

    x = inception_model.output
    #x = GlobalAveragePooling2D()(x)

    # (bs, y, x, c) --> (bs, x, y, c)
    x = Permute((2, 1, 3))(x)

    # (bs, x, y, c) --> (bs, x, y * c)
    _x, _y, _c = [int(s) for s in x._shape[1:]]
    x = Reshape((_x, _y*_c))(x)
    x = Bidirectional(LSTM(512, return_sequences=False), merge_mode="concat")(x)

    predictions = Dense(config["num_classes"], activation='softmax')(x)

    model = Model(input=inception_model.input, output=predictions)
    model.load_weights("logs/2017-01-02-13-39-41/weights.06.model")

    return model 
Example #29
Source File: inceptionv3.py    From crnn-lid with GNU General Public License v3.0 5 votes vote down vote up
def create_model(input_shape, config):

    input_tensor = Input(shape=input_shape)  # this assumes K.image_dim_ordering() == 'tf'
    inception_model = InceptionV3(include_top=False, weights=None, input_tensor=input_tensor)
    print(inception_model.summary())

    x = inception_model.output
    x = GlobalAveragePooling2D()(x)
    predictions = Dense(config["num_classes"], activation='softmax')(x)

    return Model(input=inception_model.input, output=predictions) 
Example #30
Source File: test_keras_applications.py    From keras-onnx with MIT License 5 votes vote down vote up
def test_InceptionV3(self):
        from keras.applications.inception_v3 import InceptionV3
        model = InceptionV3(include_top=True, weights='imagenet')
        res = run_image(model, self.model_files, img_path, target_size=299)
        self.assertTrue(*res)