Python keras.applications.vgg16.preprocess_input() Examples

The following are 30 code examples of keras.applications.vgg16.preprocess_input(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.applications.vgg16 , or try the search function .
Example #1
Source File: utils.py    From neural-style-keras with MIT License 9 votes vote down vote up
def preprocess_image_scale(image_path, img_size=None):
    '''
    Preprocess the image scaling it so that its larger size is max_size.
    This function preserves aspect ratio.
    '''
    img = load_img(image_path)
    if img_size:
        scale = float(img_size) / max(img.size)
        new_size = (int(np.ceil(scale * img.size[0])), int(np.ceil(scale * img.size[1])))
        img = img.resize(new_size, resample=Image.BILINEAR)
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = vgg16.preprocess_input(img)
    return img


# util function to convert a tensor into a valid image 
Example #2
Source File: test.py    From Image-Caption-Generator with MIT License 6 votes vote down vote up
def extract_features(filename, model, model_type):
	if model_type == 'inceptionv3':
		from keras.applications.inception_v3 import preprocess_input
		target_size = (299, 299)
	elif model_type == 'vgg16':
		from keras.applications.vgg16 import preprocess_input
		target_size = (224, 224)
	# Loading and resizing image
	image = load_img(filename, target_size=target_size)
	# Convert the image pixels to a numpy array
	image = img_to_array(image)
	# Reshape data for the model
	image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
	# Prepare the image for the CNN Model model
	image = preprocess_input(image)
	# Pass image into model to get encoded features
	features = model.predict(image, verbose=0)
	return features

# Load the tokenizer 
Example #3
Source File: extract_bottleneck_features.py    From kale with Apache License 2.0 6 votes vote down vote up
def extract_Xception(tensor):
	from keras.applications.xception import Xception, preprocess_input
	return Xception(weights='imagenet', include_top=False).predict(preprocess_input(tensor)) 
Example #4
Source File: vgg16_feature_extractor.py    From keras-video-classifier with MIT License 6 votes vote down vote up
def extract_vgg16_features_live(model, video_input_file_path):
    print('Extracting frames from video: ', video_input_file_path)
    vidcap = cv2.VideoCapture(video_input_file_path)
    success, image = vidcap.read()
    features = []
    success = True
    count = 0
    while success:
        vidcap.set(cv2.CAP_PROP_POS_MSEC, (count * 1000))  # added this line
        success, image = vidcap.read()
        # print('Read a new frame: ', success)
        if success:
            img = cv2.resize(image, (224, 224), interpolation=cv2.INTER_AREA)
            input = img_to_array(img)
            input = np.expand_dims(input, axis=0)
            input = preprocess_input(input)
            feature = model.predict(input).ravel()
            features.append(feature)
            count = count + 1
    unscaled_features = np.array(features)
    return unscaled_features 
Example #5
Source File: vgg16_feature_extractor.py    From keras-video-classifier with MIT License 6 votes vote down vote up
def extract_vgg16_features(model, video_input_file_path, feature_output_file_path):
    if os.path.exists(feature_output_file_path):
        return np.load(feature_output_file_path)
    count = 0
    print('Extracting frames from video: ', video_input_file_path)
    vidcap = cv2.VideoCapture(video_input_file_path)
    success, image = vidcap.read()
    features = []
    success = True
    while success:
        vidcap.set(cv2.CAP_PROP_POS_MSEC, (count * 1000))  # added this line
        success, image = vidcap.read()
        # print('Read a new frame: ', success)
        if success:
            img = cv2.resize(image, (224, 224), interpolation=cv2.INTER_AREA)
            input = img_to_array(img)
            input = np.expand_dims(input, axis=0)
            input = preprocess_input(input)
            feature = model.predict(input).ravel()
            features.append(feature)
            count = count + 1
    unscaled_features = np.array(features)
    np.save(feature_output_file_path, unscaled_features)
    return unscaled_features 
Example #6
Source File: utils.py    From neural-style-keras with MIT License 6 votes vote down vote up
def preprocess_image_crop(image_path, img_size):
    '''
    Preprocess the image scaling it so that its smaller size is img_size.
    The larger size is then cropped in order to produce a square image.
    '''
    img = load_img(image_path)
    scale = float(img_size) / min(img.size)
    new_size = (int(np.ceil(scale * img.size[0])), int(np.ceil(scale * img.size[1])))
    # print('old size: %s,new size: %s' %(str(img.size), str(new_size)))
    img = img.resize(new_size, resample=Image.BILINEAR)
    img = img_to_array(img)
    crop_h = img.shape[0] - img_size
    crop_v = img.shape[1] - img_size
    img = img[crop_h:img_size+crop_h, crop_v:img_size+crop_v, :]
    img = np.expand_dims(img, axis=0)
    img = vgg16.preprocess_input(img)
    return img

# util function to open, resize and format pictures into appropriate tensors 
Example #7
Source File: datasets.py    From DEC-keras with MIT License 6 votes vote down vote up
def extract_vgg16_features(x):
    from keras.preprocessing.image import img_to_array, array_to_img
    from keras.applications.vgg16 import preprocess_input, VGG16
    from keras.models import Model

    # im_h = x.shape[1]
    im_h = 224
    model = VGG16(include_top=True, weights='imagenet', input_shape=(im_h, im_h, 3))
    # if flatten:
    #     add_layer = Flatten()
    # else:
    #     add_layer = GlobalMaxPool2D()
    # feature_model = Model(model.input, add_layer(model.output))
    feature_model = Model(model.input, model.get_layer('fc1').output)
    print('extracting features...')
    x = np.asarray([img_to_array(array_to_img(im, scale=False).resize((im_h,im_h))) for im in x])
    x = preprocess_input(x)  # data - 127. #data/255.#
    features = feature_model.predict(x)
    print('Features shape = ', features.shape)

    return features 
Example #8
Source File: preprocessing.py    From Image-Caption-Generator with MIT License 6 votes vote down vote up
def extract_features(path, model_type):
	if model_type == 'inceptionv3':
		from keras.applications.inception_v3 import preprocess_input
		target_size = (299, 299)
	elif model_type == 'vgg16':
		from keras.applications.vgg16 import preprocess_input
		target_size = (224, 224)
	# Get CNN Model from model.py
	model = CNNModel(model_type)
	features = dict()
	# Extract features from each photo
	for name in tqdm(os.listdir(path)):
		# Loading and resizing image
		filename = path + name
		image = load_img(filename, target_size=target_size)
		# Convert the image pixels to a numpy array
		image = img_to_array(image)
		# Reshape data for the model
		image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
		# Prepare the image for the CNN Model model
		image = preprocess_input(image)
		# Pass image into model to get encoded features
		feature = model.predict(image, verbose=0)
		# Store encoded features for the image
		image_id = name.split('.')[0]
		features[image_id] = feature
	return features 
Example #9
Source File: test_shap.py    From AIX360 with Apache License 2.0 6 votes vote down vote up
def test_ShapGradientExplainer(self):

    #     model = VGG16(weights='imagenet', include_top=True)
    #     X, y = shap.datasets.imagenet50()
    #     to_explain = X[[39, 41]]
    #
    #     url = "https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json"
    #     fname = shap.datasets.cache(url)
    #     with open(fname) as f:
    #         class_names = json.load(f)
    #
    #     def map2layer(x, layer):
    #         feed_dict = dict(zip([model.layers[0].input], [preprocess_input(x.copy())]))
    #         return K.get_session().run(model.layers[layer].input, feed_dict)
    #
    #     e = GradientExplainer((model.layers[7].input, model.layers[-1].output),
    #                           map2layer(preprocess_input(X.copy()), 7))
    #     shap_values, indexes = e.explain_instance(map2layer(to_explain, 7), ranked_outputs=2)
    #
          print("Skipped Shap GradientExplainer") 
Example #10
Source File: vgg.py    From inpainting-gmcnn-keras with MIT License 5 votes vote down vote up
def _norm_inputs(input_img):
  ones = tf.constant(1, dtype=tf.float32)
  c = tf.constant(127.5, dtype=tf.float32)
  
  img_norm = Lambda(lambda x: x + ones)(input_img)
  img_norm = Lambda(lambda x: x * c)(img_norm)
  img_norm = Lambda(preprocess_input)(img_norm)
  return img_norm 
Example #11
Source File: vggnet.py    From bootcamp with Apache License 2.0 5 votes vote down vote up
def vgg_extract_feat(img_path, model, graph, sess):
    with sess.as_default():
        with graph.as_default():
            img = image.load_img(img_path, target_size=(input_shape[0], input_shape[1]))
            img = image.img_to_array(img)
            img = np.expand_dims(img, axis=0)
            img = preprocess_input_vgg(img)
            feat = model.predict(img)
            norm_feat = feat[0] / LA.norm(feat[0])
            norm_feat = [i.item() for i in norm_feat]
            return norm_feat 
Example #12
Source File: deep_dream.py    From costar_plan with Apache License 2.0 5 votes vote down vote up
def preprocess_image(image_path):
    img = load_img(image_path, target_size=(img_width, img_height))
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = vgg16.preprocess_input(img)
    return img

# util function to convert a tensor into a valid image 
Example #13
Source File: neural_style_transfer.py    From pCVR with Apache License 2.0 5 votes vote down vote up
def preprocess_image(image_path):
    img = load_img(image_path, target_size=(img_nrows, img_ncols))
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = vgg16.preprocess_input(img)
    return img

# util function to convert a tensor into a valid image 
Example #14
Source File: utils.py    From fast-neural-style-keras with MIT License 5 votes vote down vote up
def process_image(image_path, width, height, resize=True):
    if resize:
        img = load_img(image_path, target_size=(height, width))
    else:
        img = load_img(image_path)
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = vgg16.preprocess_input(img)
    return img 
Example #15
Source File: models.py    From ICIAR2018 with MIT License 5 votes vote down vote up
def predict(self, x):
        if self.data_format == "channels_first":
            x = x.transpose(0, 3, 1, 2)
        x = preprocess_vgg(x.astype(K.floatx()))
        return self.model.predict(x, batch_size=self.batch_size) 
Example #16
Source File: feature.py    From Mosaicer with MIT License 5 votes vote down vote up
def preprocess_image(img):
    img_data = image.img_to_array(img)
    img_data = np.expand_dims(img_data, axis=0)
    img_data = preprocess_input(img_data)
    return img_data 
Example #17
Source File: data_generator.py    From AdvancedEAST with MIT License 5 votes vote down vote up
def gen(batch_size=cfg.batch_size, is_val=False):
    img_h, img_w = cfg.max_train_img_size, cfg.max_train_img_size
    x = np.zeros((batch_size, img_h, img_w, cfg.num_channels), dtype=np.float32)
    pixel_num_h = img_h // cfg.pixel_size
    pixel_num_w = img_w // cfg.pixel_size
    y = np.zeros((batch_size, pixel_num_h, pixel_num_w, 7), dtype=np.float32)
    if is_val:
        with open(os.path.join(cfg.data_dir, cfg.val_fname), 'r') as f_val:
            f_list = f_val.readlines()
    else:
        with open(os.path.join(cfg.data_dir, cfg.train_fname), 'r') as f_train:
            f_list = f_train.readlines()
    while True:
        for i in range(batch_size):
            # random gen an image name
            random_img = np.random.choice(f_list)
            img_filename = str(random_img).strip().split(',')[0]
            # load img and img anno
            img_path = os.path.join(cfg.data_dir,
                                    cfg.train_image_dir_name,
                                    img_filename)
            img = image.load_img(img_path)
            img = image.img_to_array(img)
            x[i] = preprocess_input(img, mode='tf')
            gt_file = os.path.join(cfg.data_dir,
                                   cfg.train_label_dir_name,
                                   img_filename[:-4] + '_gt.npy')
            y[i] = np.load(gt_file)
        yield x, y 
Example #18
Source File: predict.py    From AdvancedEAST with MIT License 5 votes vote down vote up
def predict_txt(east_detect, img_path, txt_path, pixel_threshold, quiet=False):
    img = image.load_img(img_path)
    d_wight, d_height = resize_image(img, cfg.max_predict_img_size)
    scale_ratio_w = d_wight / img.width
    scale_ratio_h = d_height / img.height
    img = img.resize((d_wight, d_height), Image.NEAREST).convert('RGB')
    img = image.img_to_array(img)
    img = preprocess_input(img, mode='tf')
    x = np.expand_dims(img, axis=0)
    y = east_detect.predict(x)

    y = np.squeeze(y, axis=0)
    y[:, :, :3] = sigmoid(y[:, :, :3])
    cond = np.greater_equal(y[:, :, 0], pixel_threshold)
    activation_pixels = np.where(cond)
    quad_scores, quad_after_nms = nms(y, activation_pixels)

    txt_items = []
    for score, geo in zip(quad_scores, quad_after_nms):
        if np.amin(score) > 0:
            rescaled_geo = geo / [scale_ratio_w, scale_ratio_h]
            rescaled_geo_list = np.reshape(rescaled_geo, (8,)).tolist()
            txt_item = ','.join(map(str, rescaled_geo_list))
            txt_items.append(txt_item + '\n')
        elif not quiet:
            print('quad invalid with vertex num less then 4.')
    if cfg.predict_write2txt and len(txt_items) > 0:
        with open(txt_path, 'w') as f_txt:
            f_txt.writelines(txt_items) 
Example #19
Source File: get_img_features_VGG16.py    From machine-learning-note with MIT License 5 votes vote down vote up
def process_pic(img_path, model='', predict=True):
    img_path = img_path
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    # 下面两步不是很理解
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    
    if predict:  # predict pic's class
        last_layer_features = model.predict(x)  # 1000 last_layer_features
        # print('Predicted:', decode_predictions(last_layer_features, top=3)[0])
        return decode_predictions(last_layer_features, top=3)[0]
    else:  # return 4096 last_layer_features
        last_layer_features = model.predict(x)
        return last_layer_features 
Example #20
Source File: vgg16_content_based_filtering.py    From keras-recommender with MIT License 5 votes vote down vote up
def main():
    data_dir_path = './data/ml-latest-small'
    poster_dir_path = './data/posters'
    output_dir_path = './data/models'

    np.set_printoptions(threshold=np.nan)
    pd.set_option('display.height', 1000)
    pd.set_option('display.max_rows', 500)
    pd.set_option('display.max_columns', 500)
    pd.set_option('display.width', 1000)

    df = pd.read_csv(data_dir_path + '/ratings.csv', sep=',')
    df_id = pd.read_csv(data_dir_path + '/links.csv', sep=',')
    df_movie_names = pd.read_csv(data_dir_path + '/movies.csv', sep=',')
    df = pd.merge(pd.merge(df, df_id, on='movieId'), df_movie_names, on='movieId')

    print(df.head())

    data_file = data_dir_path + '/imdb_id_to_image_dict.data'
    if not os.path.exists(data_file):
        imdb_id_to_image_dict = dict()
        for poster_file in glob(poster_dir_path + '/*.jpg'):  # debug here
            print('Loading img at {}'.format(poster_file))
            img = kimage.load_img(poster_file, target_size=(224, 224))
            img = preprocess_input(np.expand_dims(kimage.img_to_array(img), axis=0))
            imdb_id = poster_file.split('/')[-1].split('.')[0]
            imdb_id_to_image_dict[imdb_id] = img
        pickle.dump(file=open(data_file, 'wb'), obj=imdb_id_to_image_dict)
    else:
        imdb_id_to_image_dict = pickle.load(file=open(data_file, 'rb'))

    recommender = Vgg16ContentBaseFiltering()
    recommender.fit(imdb_id_to_image_dict, model_dir_path=output_dir_path) 
Example #21
Source File: tsne_grid.py    From tsne-grid with MIT License 5 votes vote down vote up
def get_activations(model, img_collection):
    activations = []
    for idx, img in enumerate(img_collection):
        if idx == to_plot:
            break;
        print("Processing image {}".format(idx+1))
        img = img.resize((224, 224), Image.ANTIALIAS)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        activations.append(np.squeeze(model.predict(x)))
    return activations 
Example #22
Source File: siamese.py    From keras-face with MIT License 5 votes vote down vote up
def img_to_encoding(self, image_path):
        print('encoding: ', image_path)
        if self.vgg16_model is None:
            self.vgg16_model = self.create_vgg16_model()

        image = cv2.imread(image_path, 1)
        img = cv2.resize(image, (224, 224), interpolation=cv2.INTER_AREA)
        input = img_to_array(img)
        input = np.expand_dims(input, axis=0)
        input = preprocess_input(input)
        return self.vgg16_model.predict(input) 
Example #23
Source File: perceptual_model.py    From style-image-prior with GNU General Public License v3.0 5 votes vote down vote up
def call(self, inputs, mask=None):
		return self.__model(vgg16.preprocess_input(inputs)) 
Example #24
Source File: utils.py    From deepxplore with MIT License 5 votes vote down vote up
def preprocess_image(img_path):
    img = image.load_img(img_path, target_size=(224, 224))
    input_img_data = image.img_to_array(img)
    input_img_data = np.expand_dims(input_img_data, axis=0)
    input_img_data = preprocess_input(input_img_data)  # final input shape = (1,224,224,3)
    return input_img_data 
Example #25
Source File: utils.py    From style-transfer with MIT License 5 votes vote down vote up
def preprocess_image(image_path, desired_dims):
	img = load_img(image_path, target_size=desired_dims)
	img = img_to_array(img)
	img = np.expand_dims(img, axis=0)
	img = vgg16.preprocess_input(img)
	return img

# util function to convert a tensor into a valid image 
Example #26
Source File: architectures.py    From deepJDOT with MIT License 5 votes vote down vote up
def vgg16_fe(img_input):
    # net = preprocess_input(img_input)
    vgg_model = VGG16(weights='imagenet', include_top=True, input_tensor=img_input)
    vgg_model.layers.pop()
    return vgg_model.layers[-1].output
    # return  model.layers[-1].output 
Example #27
Source File: architectures.py    From deepJDOT with MIT License 5 votes vote down vote up
def vgg16F_fe(img_input):
    # net = preprocess_input(img_input)
    from keras_vggface.vggface import VGGFace
    vgg_model = VGGFace(include_top=False, input_tensor=img_input, pooling='avg')
    #vgg_model.layers.pop()
    last_layer = vgg_model.get_layer('pool5').output
    x = Flatten(name='flatten')(last_layer)
    x = Dense(1024, activation='relu', trainable=True)(x)
    x = Dense(512, activation='relu', trainable=True)(x)
    model = dnn.Model(input=vgg_model.input, output=x)
    return model.layers[-1].output 
Example #28
Source File: CNNFeatures.py    From videofeatures with MIT License 5 votes vote down vote up
def computeFeatures(self, video):
    x = vgg16.preprocess_input(video)
    features = self.model.predict(x)
    return features 
Example #29
Source File: CNNFeatures.py    From videofeatures with MIT License 5 votes vote down vote up
def computeFeatures(self, video):
    x = resnet50.preprocess_input(video)
    features = self.model.predict(x)
    return features.reshape((-1, 2048)) 
Example #30
Source File: data.py    From udacity-SDC-baseline with MIT License 5 votes vote down vote up
def vgg_preprocess_input(x):
    return vgg16.preprocess_input(x)