Python tensorflow.python.keras.preprocessing.image.load_img() Examples
The following are 6
code examples of tensorflow.python.keras.preprocessing.image.load_img().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.keras.preprocessing.image
, or try the search function
.
Example #1
Source File: idenprof.py From IdenProf with MIT License | 6 votes |
def run_inference(): model = ResNet50(input_shape=(224, 224, 3), num_classes=10) model.load_weights(MODEL_PATH) picture = os.path.join(execution_path, "Haitian-fireman.jpg") image_to_predict = image.load_img(picture, target_size=( 224, 224)) image_to_predict = image.img_to_array(image_to_predict, data_format="channels_last") image_to_predict = np.expand_dims(image_to_predict, axis=0) image_to_predict = preprocess_input(image_to_predict) prediction = model.predict(x=image_to_predict, steps=1) predictiondata = decode_predictions(prediction, top=int(5), model_json=JSON_PATH) for result in predictiondata: print(str(result[0]), " : ", str(result[1] * 100)) # run_inference()
Example #2
Source File: ae.py From BVAE-tf with The Unlicense | 5 votes |
def test(): import os import numpy as np from PIL import Image from tensorflow.python.keras.preprocessing.image import load_img from models import Darknet19Encoder, Darknet19Decoder inputShape = (256, 256, 3) batchSize = 8 latentSize = 100 img = load_img(os.path.join(os.path.dirname(__file__), '..','images', 'img.jpg'), target_size=inputShape[:-1]) img.show() img = np.array(img, dtype=np.float32) * (2/255) - 1 # print(np.min(img)) # print(np.max(img)) # print(np.mean(img)) img = np.array([img]*batchSize) # make fake batches to improve GPU utilization # This is how you build the autoencoder encoder = Darknet19Encoder(inputShape, latentSize=latentSize, latentConstraints='bvae', beta=69) decoder = Darknet19Decoder(inputShape, latentSize=latentSize) bvae = AutoEncoder(encoder, decoder) bvae.ae.compile(optimizer='adam', loss='mean_absolute_error') while True: bvae.ae.fit(img, img, epochs=100, batch_size=batchSize) # example retrieving the latent vector latentVec = bvae.encoder.predict(img)[0] print(latentVec) pred = bvae.ae.predict(img) # get the reconstructed image pred = np.uint8((pred + 1)* 255/2) # convert to regular image values pred = Image.fromarray(pred[0]) pred.show() # display popup
Example #3
Source File: model_util.py From image-similarity with MIT License | 5 votes |
def preprocess_image(path): '''Process an image to numpy array. Args: path: the path of the image. Returns: Numpy array of the image. ''' img = process_image.load_img(path, target_size=(224, 224)) x = process_image.img_to_array(img) # x = np.expand_dims(x, axis=0) x = preprocess_input(x) return x
Example #4
Source File: tsne_grid.py From tsne-grid with MIT License | 5 votes |
def load_img(in_dir): pred_img = [f for f in os.listdir(in_dir) if os.path.isfile(os.path.join(in_dir, f))] img_collection = [] for idx, img in enumerate(pred_img): img = os.path.join(in_dir, img) img_collection.append(image.load_img(img, target_size=(out_res, out_res))) if (np.square(out_dim) > len(img_collection)): raise ValueError("Cannot fit {} images in {}x{} grid".format(len(img_collection), out_dim, out_dim)) return img_collection
Example #5
Source File: tsne_grid.py From tsne-grid with MIT License | 5 votes |
def main(): model = build_model() img_collection = load_img(in_dir) activations = get_activations(model, img_collection) print("Generating 2D representation.") X_2d = generate_tsne(activations) print("Generating image grid.") save_tsne_grid(img_collection, X_2d, out_res, out_dim)
Example #6
Source File: data_preprocesser.py From BCNN-keras-clean with MIT License | 4 votes |
def _get_batches_of_transformed_samples(self, index_array): batch_x = np.zeros( (len(index_array),) + self.image_shape, dtype=floatx()) grayscale = self.color_mode == 'grayscale' # Build batch of image data for i, j in enumerate(index_array): fname = self.filenames[j] img = load_img( os.path.join(self.directory, fname), grayscale=grayscale, target_size=None, interpolation=self.interpolation) x = img_to_array(img, data_format=self.data_format) # Pillow images should be closed after `load_img`, but not PIL images. if hasattr(img, 'close'): img.close() x = self.image_data_generator.standardize(x) batch_x[i] = x # Optionally save augmented images to disk for debugging purposes if self.save_to_dir: for i, j in enumerate(index_array): img = array_to_img(batch_x[i], self.data_format, scale=True) fname = '{prefix}_{index}_{hash}.{format}'.format( prefix=self.save_prefix, index=j, hash=np.random.randint(1e7), format=self.save_format) img.save(os.path.join(self.save_to_dir, fname)) # Build batch of labels if self.class_mode == 'input': batch_y = batch_x.copy() elif self.class_mode == 'sparse': batch_y = self.classes[index_array] elif self.class_mode == 'binary': batch_y = self.classes[index_array].astype(floatx()) elif self.class_mode == 'categorical': batch_y = np.zeros( (len(batch_x), self.num_classes), dtype=floatx()) for i, label in enumerate(self.classes[index_array]): batch_y[i, label] = 1. else: return batch_x return batch_x, batch_y