Python keras.applications.inception_v3.preprocess_input() Examples

The following are 30 code examples of keras.applications.inception_v3.preprocess_input(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.applications.inception_v3 , or try the search function .
Example #1
Source File: predict.py    From Transfer-Learning with MIT License 7 votes vote down vote up
def predict(model, img, target_size):
    """Run model prediction on image
    Args:
        model: keras model
        img: PIL format image
        target_size: (w,h) tuple
    Returns:
        list of predicted labels and their probabilities 
    """
    if img.size != target_size:
        img = img.resize(target_size)

    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = model.predict(x)
    return preds[0] 
Example #2
Source File: test.py    From Image-Caption-Generator with MIT License 6 votes vote down vote up
def extract_features(filename, model, model_type):
	if model_type == 'inceptionv3':
		from keras.applications.inception_v3 import preprocess_input
		target_size = (299, 299)
	elif model_type == 'vgg16':
		from keras.applications.vgg16 import preprocess_input
		target_size = (224, 224)
	# Loading and resizing image
	image = load_img(filename, target_size=target_size)
	# Convert the image pixels to a numpy array
	image = img_to_array(image)
	# Reshape data for the model
	image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
	# Prepare the image for the CNN Model model
	image = preprocess_input(image)
	# Pass image into model to get encoded features
	features = model.predict(image, verbose=0)
	return features

# Load the tokenizer 
Example #3
Source File: predict.py    From Learning-Generative-Adversarial-Networks with MIT License 6 votes vote down vote up
def predict(model, img, target_size):
  """Run model prediction on image
  Args:
    model: keras model
    img: PIL format image
    target_size: (w,h) tuple
  Returns:
    list of predicted labels and their probabilities
  """
  if img.size != target_size:
    img = img.resize(target_size)

  x = image.img_to_array(img)
  x = np.expand_dims(x, axis=0)
  x = preprocess_input(x)
  preds = model.predict(x)
  return preds[0] 
Example #4
Source File: kerasModel.py    From Learning-Generative-Adversarial-Networks with MIT License 6 votes vote down vote up
def predict(image_file):
    """
    Predict the top 3 categories for the given image file.

    """
    img = image.load_img(image_file, target_size=(299, 299))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    results = model.predict(x)

    top3 = decode_predictions(results, top=3)[0]
    return [
        {'label': label, 'description': description,
         'probability': probability * 100.0}
        for label, description, probability in top3
    ] 
Example #5
Source File: extractor.py    From five-video-classification-methods with MIT License 6 votes vote down vote up
def extract(self, image_path):
        img = image.load_img(image_path, target_size=(299, 299))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)

        # Get the prediction.
        features = self.model.predict(x)

        if self.weights is None:
            # For imagenet/default network:
            features = features[0]
        else:
            # For loaded network:
            features = features[0]

        return features 
Example #6
Source File: run_bottleneck.py    From CarND-Transfer-Learning-Lab with MIT License 6 votes vote down vote up
def gen(session, data, labels, batch_size):
    def _f():
        start = 0
        end = start + batch_size
        n = data.shape[0]

        while True:
            X_batch = session.run(resize_op, {img_placeholder: data[start:end]})
            X_batch = preprocess_input(X_batch)
            y_batch = labels[start:end]
            start += batch_size
            end += batch_size
            if start >= n:
                start = 0
                end = batch_size

            print(start, end)
            yield (X_batch, y_batch)

    return _f 
Example #7
Source File: extract_bottleneck_features.py    From kale with Apache License 2.0 6 votes vote down vote up
def extract_Xception(tensor):
	from keras.applications.xception import Xception, preprocess_input
	return Xception(weights='imagenet', include_top=False).predict(preprocess_input(tensor)) 
Example #8
Source File: keras_applications.py    From spark-deep-learning with Apache License 2.0 6 votes vote down vote up
def _imagenet_preprocess_input(x, input_shape):
    """
    For ResNet50, VGG models. For InceptionV3 and Xception it's okay to use the
    keras version (e.g. InceptionV3.preprocess_input) as the code path they hit
    works okay with tf.Tensor inputs. The following was translated to tf ops from
    https://github.com/fchollet/keras/blob/fb4a0849cf4dc2965af86510f02ec46abab1a6a4/keras/applications/imagenet_utils.py#L52
    It's a possibility to change the implementation in keras to look like the
    following and modified to work with BGR images (standard in Spark), but not doing it for now.
    """
    # assuming 'BGR'
    # Zero-center by mean pixel
    mean = np.ones(input_shape + (3,), dtype=np.float32)
    mean[..., 0] = 103.939
    mean[..., 1] = 116.779
    mean[..., 2] = 123.68
    return x - mean 
Example #9
Source File: preprocessing.py    From Image-Caption-Generator with MIT License 6 votes vote down vote up
def extract_features(path, model_type):
	if model_type == 'inceptionv3':
		from keras.applications.inception_v3 import preprocess_input
		target_size = (299, 299)
	elif model_type == 'vgg16':
		from keras.applications.vgg16 import preprocess_input
		target_size = (224, 224)
	# Get CNN Model from model.py
	model = CNNModel(model_type)
	features = dict()
	# Extract features from each photo
	for name in tqdm(os.listdir(path)):
		# Loading and resizing image
		filename = path + name
		image = load_img(filename, target_size=target_size)
		# Convert the image pixels to a numpy array
		image = img_to_array(image)
		# Reshape data for the model
		image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
		# Prepare the image for the CNN Model model
		image = preprocess_input(image)
		# Pass image into model to get encoded features
		feature = model.predict(image, verbose=0)
		# Store encoded features for the image
		image_id = name.split('.')[0]
		features[image_id] = feature
	return features 
Example #10
Source File: tf_image_test.py    From spark-deep-learning with Apache License 2.0 6 votes vote down vote up
def test_load_image_vs_keras_RGB(self):
        g = tf.Graph()
        with g.as_default():
            image_arr = utils.imageInputPlaceholder()
            # keras expects array in RGB order, we get it from image schema in BGR => need to flip
            preprocessed = preprocess_input(image_arr)

        output_col = "transformed_image"
        transformer = TFImageTransformer(channelOrder='RGB', inputCol="image", outputCol=output_col, graph=g,
                                         inputTensor=image_arr, outputTensor=preprocessed.name,
                                         outputMode="vector")

        image_df = image_utils.getSampleImageDF()
        df = transformer.transform(image_df.limit(5))

        for row in df.collect():
            processed = np.array(row[output_col], dtype = np.float32)
            # compare to keras loading
            images = self._loadImageViaKeras(row["image"]['origin'])
            image = images[0]
            image.shape = (1, image.shape[0] * image.shape[1] * image.shape[2])
            keras_processed = image[0]
            np.testing.assert_array_almost_equal(keras_processed, processed, decimal = 6)

    # Test full pre-processing for InceptionV3 as an example of a simple computation graph 
Example #11
Source File: tf_image_test.py    From spark-deep-learning with Apache License 2.0 6 votes vote down vote up
def test_load_image_vs_keras(self):
        g = tf.Graph()
        with g.as_default():
            image_arr = utils.imageInputPlaceholder()
            # keras expects array in RGB order, we get it from image schema in BGR => need to flip
            preprocessed = preprocess_input(imageIO._reverseChannels(image_arr))

        output_col = "transformed_image"
        transformer = TFImageTransformer(channelOrder='BGR', inputCol="image", outputCol=output_col, graph=g,
                                         inputTensor=image_arr, outputTensor=preprocessed.name,
                                         outputMode="vector")

        image_df = image_utils.getSampleImageDF()
        df = transformer.transform(image_df.limit(5))

        for row in df.collect():
            processed = np.array(row[output_col]).astype(np.float32)
            # compare to keras loading
            images = self._loadImageViaKeras(row["image"]['origin'])
            image = images[0]
            image.shape = (1, image.shape[0] * image.shape[1] * image.shape[2])
            keras_processed = image[0]
            np.testing.assert_array_almost_equal(keras_processed, processed, decimal=6) 
Example #12
Source File: deep_dream.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def preprocess_image(image_path):
    # Util function to open, resize and format pictures
    # into appropriate tensors.
    img = load_img(image_path)
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = inception_v3.preprocess_input(img)
    return img 
Example #13
Source File: model.py    From transfer with MIT License 5 votes vote down vote up
def gen_minibatches(array_dir, array_names, batch_size, architecture, final = False):

    array_names = list(array_names)

    while True:
        # in place shuffle
        np.random.shuffle(array_names)
        array_names_mb = array_names[:batch_size]

        arrays = []
        labels = []
        for array_name in array_names_mb:
            img_path = os.path.join(array_dir, array_name)
            array = np.load(img_path)
            if final:
                if architecture == 'resnet50':
                    array = np.squeeze(resnet_preprocess_input(array[np.newaxis].astype(np.float32)))
                elif architecture == 'xception':
                    array = np.squeeze(xception_preprocess_input(array[np.newaxis].astype(np.float32)))
                else:
                    array = np.squeeze(inception_v3_preprocess_input(array[np.newaxis].astype(np.float32)))

            arrays.append(array)
            labels.append(np.load(img_path.replace('-img-','-label-')))

        yield np.array(arrays), np.array(labels) 
Example #14
Source File: deep_dream.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def preprocess_image(image_path):
    # Util function to open, resize and format pictures
    # into appropriate tensors.
    img = load_img(image_path)
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = inception_v3.preprocess_input(img)
    return img 
Example #15
Source File: deep_dream.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def preprocess_image(image_path):
    # Util function to open, resize and format pictures
    # into appropriate tensors.
    img = load_img(image_path)
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = inception_v3.preprocess_input(img)
    return img 
Example #16
Source File: deep_dream.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def preprocess_image(image_path):
    # Util function to open, resize and format pictures
    # into appropriate tensors.
    img = load_img(image_path)
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = inception_v3.preprocess_input(img)
    return img 
Example #17
Source File: deep_dream.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def preprocess_image(image_path):
    # Util function to open, resize and format pictures
    # into appropriate tensors.
    img = load_img(image_path)
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = inception_v3.preprocess_input(img)
    return img 
Example #18
Source File: deep_dream.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def preprocess_image(image_path):
    # Util function to open, resize and format pictures
    # into appropriate tensors.
    img = load_img(image_path)
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = inception_v3.preprocess_input(img)
    return img 
Example #19
Source File: deep_dream.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def preprocess_image(image_path):
    # Util function to open, resize and format pictures
    # into appropriate tensors.
    img = load_img(image_path)
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = inception_v3.preprocess_input(img)
    return img 
Example #20
Source File: predict_model.py    From transfer with MIT License 5 votes vote down vote up
def multi_predict(aug_gen, models, architecture):
    predicted = []
    for img, _ in aug_gen:
        if architecture == 'resnet50':
            img = resnet_preprocess_input(img[np.newaxis].astype(np.float32))
        elif architecture == 'xception':
            img = xception_preprocess_input(img[np.newaxis].astype(np.float32))
        else:
            img = inception_v3_preprocess_input(img[np.newaxis].astype(np.float32))
        for model in models:
            predicted.append(model.predict(img))
    predicted = np.array(predicted).sum(axis=0)
    pred_list = list(predicted[0])
    return predicted, pred_list 
Example #21
Source File: deep_dream.py    From pCVR with Apache License 2.0 5 votes vote down vote up
def preprocess_image(image_path):
    # Util function to open, resize and format pictures
    # into appropriate tensors.
    img = load_img(image_path)
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = inception_v3.preprocess_input(img)
    return img 
Example #22
Source File: image_utils.py    From spark-deep-learning with Apache License 2.0 5 votes vote down vote up
def loadAndPreprocessKerasInceptionV3(raw_uri):
    # this is the canonical way to load and prep images in keras
    uri = raw_uri[5:] if raw_uri.startswith("file:/") else raw_uri
    image = img_to_array(load_img(uri, target_size=InceptionV3Constants.INPUT_SHAPE))
    image = np.expand_dims(image, axis=0)
    return preprocess_input(image) 
Example #23
Source File: models.py    From ICIAR2018 with MIT License 5 votes vote down vote up
def predict(self, x):
        if self.data_format == "channels_first":
            x = x.transpose(0, 3, 1, 2)
        x = preprocess_inception(x.astype(K.floatx()))
        return self.model.predict(x, batch_size=self.batch_size) 
Example #24
Source File: utils.py    From deep-learning-note with MIT License 5 votes vote down vote up
def preprocess_image(image_path):
    img = image.load_img(image_path)
    img = image.img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = inception_v3.preprocess_input(img)
    return img 
Example #25
Source File: imageLoader.py    From glyphreader with MIT License 5 votes vote down vote up
def loadImage(path):
    img = image.load_img(path, target_size=(299, 299))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    return x 
Example #26
Source File: test_builder.py    From spark-deep-learning with Apache License 2.0 5 votes vote down vote up
def test_keras_consistency(self):
        """ Exported model in Keras should get same result as original """

        img_fpaths = glob(os.path.join(_getSampleJPEGDir(), '*.jpg'))

        def keras_load_and_preproc(fpath):
            img = load_img(fpath, target_size=(299, 299))
            img_arr = img_to_array(img)
            img_iv3_input = iv3.preprocess_input(img_arr)
            return np.expand_dims(img_iv3_input, axis=0)

        imgs_iv3_input = np.vstack([keras_load_and_preproc(fp) for fp in img_fpaths])

        model_ref = InceptionV3(weights="imagenet")
        preds_ref = model_ref.predict(imgs_iv3_input)

        with IsolatedSession(using_keras=True) as issn:
            K.set_learning_phase(0)
            model = InceptionV3(weights="imagenet")
            gfn = issn.asGraphFunction(model.inputs, model.outputs)

        with IsolatedSession(using_keras=True) as issn:
            K.set_learning_phase(0)
            feeds, fetches = issn.importGraphFunction(gfn, prefix="InceptionV3")
            preds_tgt = issn.run(fetches[0], {feeds[0]: imgs_iv3_input})

            np.testing.assert_array_almost_equal(preds_tgt, preds_ref, decimal=5) 
Example #27
Source File: pre_model.py    From transfer with MIT License 5 votes vote down vote up
def val_pre_model(source_path, folder, img_dim, architechture):

    array_path = os.path.join(source_path, folder)
    pre_model_path = os.path.join(source_path, 'pre_model')
    shutil.rmtree(pre_model_path,ignore_errors=True)
    os.makedirs(pre_model_path)

    if architechture == 'resnet50':
        popped, pre_model = get_resnet_pre_model(img_dim)
    elif architechture == 'xception':
        popped, pre_model = get_xception_pre_model(img_dim)
    else:
        popped, pre_model = get_inception_v3_pre_model(img_dim)

    for (array, label, array_name, label_name) in tqdm(gen_array_from_dir(array_path)):
        if architechture == 'resnet50':
            array = resnet_preprocess_input(array[np.newaxis].astype(np.float32))
        elif architechture == 'xception':
            array = xception_preprocess_input(array[np.newaxis].astype(np.float32))
        else:
            array = inception_v3_preprocess_input(array[np.newaxis].astype(np.float32))
        array_pre_model = np.squeeze(pre_model.predict(array, batch_size=1))

        array_name = array_name.split('.')[0]
        label_name = label_name.split('.')[0]

        img_pre_model_path = os.path.join(pre_model_path, array_name)
        label_pre_model_path = os.path.join(pre_model_path, label_name)

        np.save(img_pre_model_path, array_pre_model)
        np.save(label_pre_model_path, label) 
Example #28
Source File: tf_image_test.py    From spark-deep-learning with Apache License 2.0 5 votes vote down vote up
def _loadImageViaKeras(self, raw_uri):
        uri = raw_uri[5:] if raw_uri.startswith("file:/") else raw_uri
        image = img_to_array(load_img(uri))
        image = np.expand_dims(image, axis=0)
        return preprocess_input(image) 
Example #29
Source File: extract_bottleneck_features.py    From kale with Apache License 2.0 5 votes vote down vote up
def extract_InceptionV3(tensor):
	from keras.applications.inception_v3 import InceptionV3, preprocess_input
	return InceptionV3(weights='imagenet', include_top=False).predict(preprocess_input(tensor)) 
Example #30
Source File: extract_bottleneck_features.py    From kale with Apache License 2.0 5 votes vote down vote up
def extract_Resnet50(tensor):
	from keras.applications.resnet50 import ResNet50, preprocess_input
	return ResNet50(weights='imagenet', include_top=False).predict(preprocess_input(tensor))