Python keras.preprocessing.image.load_img() Examples

The following are 30 code examples of keras.preprocessing.image.load_img(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.preprocessing.image , or try the search function .
Example #1
Source File: cluster_resnet.py    From lost with MIT License 12 votes vote down vote up
def main(self):
        self.logger.info('Will load keras model')
        model = ResNet50(weights='imagenet')
        self.logger.info('Keras model loaded')
        feature_list = []
        img_path_list = []
        for raw_file in self.inp.raw_files:
            media_path = raw_file.path
            file_list = os.listdir(media_path)
            total = float(len(file_list))
            for index, img_file in enumerate(file_list):
                img_path = os.path.join(media_path, img_file)
                img_path_list.append(img_path)
                img = image.load_img(img_path, target_size=(224, 224))
                x = keras_image.img_to_array(img)
                x = np.expand_dims(x, axis=0)
                x = preprocess_input(x)
                # extract features
                scores = model.predict(x)
                sim_class = np.argmax(scores)
                print('Scores {}\nSimClass: {}'.format(scores, sim_class))
                self.outp.request_annos(img_path, img_sim_class=sim_class)
                self.logger.info('Requested annotation for: {} (cluster: {})'.format(img_path, sim_class))
                self.update_progress(index*100/total) 
Example #2
Source File: utils.py    From neural-style-keras with MIT License 9 votes vote down vote up
def preprocess_image_scale(image_path, img_size=None):
    '''
    Preprocess the image scaling it so that its larger size is max_size.
    This function preserves aspect ratio.
    '''
    img = load_img(image_path)
    if img_size:
        scale = float(img_size) / max(img.size)
        new_size = (int(np.ceil(scale * img.size[0])), int(np.ceil(scale * img.size[1])))
        img = img.resize(new_size, resample=Image.BILINEAR)
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = vgg16.preprocess_input(img)
    return img


# util function to convert a tensor into a valid image 
Example #3
Source File: utils.py    From Neural-Network-Projects-with-Python with MIT License 8 votes vote down vote up
def get_data(dir):
    X_train, Y_train = [], []
    X_test, Y_test = [], []
    subfolders = sorted([file.path for file in os.scandir(dir) if file.is_dir()])
    for idx, folder in enumerate(subfolders):
        for file in sorted(os.listdir(folder)):
            img = load_img(folder+"/"+file, color_mode='grayscale')
            img = img_to_array(img).astype('float32')/255
            img = img.reshape(img.shape[0], img.shape[1],1)
            if idx < 35:
                X_train.append(img)
                Y_train.append(idx)
            else:
                X_test.append(img)
                Y_test.append(idx-35)

    X_train = np.array(X_train)
    X_test = np.array(X_test)
    Y_train = np.array(Y_train)
    Y_test = np.array(Y_test)
    return (X_train, Y_train), (X_test, Y_test) 
Example #4
Source File: imagenet.py    From vergeml with MIT License 7 votes vote down vote up
def predict(self, f, k=5, resize_mode='fill'):
        from keras.preprocessing import image
        from vergeml.img import resize_image

        filename = os.path.basename(f)

        if not os.path.exists(f):
            return dict(filename=filename, prediction=[])

        img = image.load_img(f)
        img = resize_image(img, self.image_size, self.image_size, 'antialias', resize_mode)

        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = self.preprocess_input(x)
        preds = self.model.predict(x)
        pred = self._decode(preds, top=k)[0]
        prediction=[dict(probability=np.asscalar(perc), label=klass) for _, klass, perc in pred]

        return dict(filename=filename, prediction=prediction) 
Example #5
Source File: objcls.py    From sia-cog with MIT License 7 votes vote down vote up
def predict(imagepath, target_x, target_y, name, model):
    if imagepath.startswith('http://') or imagepath.startswith('https://') or imagepath.startswith('ftp://'):
        response = requests.get(imagepath)
        img = Image.open(BytesIO(response.content))
        img = img.resize((target_x, target_y))
    else:
        if not os.path.exists(imagepath):
            raise Exception('Input image file does not exist')
        img = image.load_img(imagepath, target_size=(target_x, target_y))

    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = processInputImage(name, x)
    preds = decodePrediction(name, model.predict(x))
    result = []
    for p in preds[0]:
        result.append({"synset": p[0], "text": p[1], "prediction": float("{0:.2f}".format((p[2] * 100)))})

    return json.loads(jsonpickle.encode(result, unpicklable=False)) 
Example #6
Source File: data_Keras.py    From U-net with MIT License 6 votes vote down vote up
def create_test_data(self):
		# 测试集生成npy
		i = 0
		print('-' * 30)
		print('Creating test images...')
		print('-' * 30)
		imgs = glob.glob(self.test_path + "/*." + self.img_type)           # ../data_set/train
		print(len(imgs))
		imgdatas = np.ndarray((len(imgs), self.out_rows, self.out_cols, 1), dtype=np.uint8)
		for imgname in imgs:
			midname = imgname[imgname.rindex("/") + 1:]   # 图像的名字
			img = load_img(self.test_path + "/" + midname, grayscale=True)   # 转换为灰度图
			img = img_to_array(img)
			imgdatas[i] = img
			if i % 100 == 0:
				print('Done: {0}/{1} images'.format(i, len(imgs)))
			i += 1
		print('loading done', imgdatas.shape)
		np.save(self.npy_path + '/imgs_test.npy', imgdatas)            # 将30张训练集和30张label生成npy数据
		# np.save(self.npy_path + '/imgs_mask_train.npy', imglabels)
		print('Saving to .npy files done.') 
Example #7
Source File: ensemble_gpu.py    From kaggle-carvana-2017 with MIT License 6 votes vote down vote up
def data_loader(q, ):
    for bi in batch_indices:
        start, end = bi
        x_batch = []
        filenames_batch = filenames[start:end]

        for filename in filenames_batch:
            imgs = []
            for d in dirs:
                img = img_to_array(load_img(os.path.join(d, filename), grayscale=True))
                imgs.append(np.squeeze(img))
            x_batch.append(np.array(imgs).transpose((1, 2, 0)))
        q.put((filenames_batch, np.array(x_batch)))

    for gpu in gpus:
        q.put((None, None)) 
Example #8
Source File: test.py    From Image-Caption-Generator with MIT License 6 votes vote down vote up
def extract_features(filename, model, model_type):
	if model_type == 'inceptionv3':
		from keras.applications.inception_v3 import preprocess_input
		target_size = (299, 299)
	elif model_type == 'vgg16':
		from keras.applications.vgg16 import preprocess_input
		target_size = (224, 224)
	# Loading and resizing image
	image = load_img(filename, target_size=target_size)
	# Convert the image pixels to a numpy array
	image = img_to_array(image)
	# Reshape data for the model
	image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
	# Prepare the image for the CNN Model model
	image = preprocess_input(image)
	# Pass image into model to get encoded features
	features = model.predict(image, verbose=0)
	return features

# Load the tokenizer 
Example #9
Source File: yolo_image.py    From ai-platform with MIT License 6 votes vote down vote up
def load_image_pixels(filename, shape):
	# load the image to get its shape
	image = load_img(filename)
	width, height = image.size
	# load the image with the required size
	image = load_img(filename, target_size=shape)
	# convert to numpy array
	image = img_to_array(image)
	# scale pixel values to [0, 1]
	image = image.astype('float32')
	image /= 255.0
	# add a dimension so that we have one sample
	image = expand_dims(image, 0)
	return image, width, height

# get all of the results above a threshold 
Example #10
Source File: analyze.py    From Car-Recognition with MIT License 6 votes vote down vote up
def predict(img_dir, model):
    img_files = []
    for root, dirs, files in os.walk(img_dir, topdown=False):
        for name in files:
            img_files.append(os.path.join(root, name))
    img_files = sorted(img_files)

    y_pred = []
    y_test = []

    for img_path in tqdm(img_files):
        # print(img_path)
        img = image.load_img(img_path, target_size=(224, 224))
        x = image.img_to_array(img)
        preds = model.predict(x[None, :, :, :])
        decoded = decode_predictions(preds, top=1)
        pred_label = decoded[0][0][0]
        # print(pred_label)
        y_pred.append(pred_label)
        tokens = img_path.split(os.pathsep)
        class_id = int(tokens[-2])
        # print(str(class_id))
        y_test.append(class_id)

    return y_pred, y_test 
Example #11
Source File: preprocessing.py    From Image-Caption-Generator with MIT License 6 votes vote down vote up
def extract_features(path, model_type):
	if model_type == 'inceptionv3':
		from keras.applications.inception_v3 import preprocess_input
		target_size = (299, 299)
	elif model_type == 'vgg16':
		from keras.applications.vgg16 import preprocess_input
		target_size = (224, 224)
	# Get CNN Model from model.py
	model = CNNModel(model_type)
	features = dict()
	# Extract features from each photo
	for name in tqdm(os.listdir(path)):
		# Loading and resizing image
		filename = path + name
		image = load_img(filename, target_size=target_size)
		# Convert the image pixels to a numpy array
		image = img_to_array(image)
		# Reshape data for the model
		image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
		# Prepare the image for the CNN Model model
		image = preprocess_input(image)
		# Pass image into model to get encoded features
		feature = model.predict(image, verbose=0)
		# Store encoded features for the image
		image_id = name.split('.')[0]
		features[image_id] = feature
	return features 
Example #12
Source File: utils.py    From neural-style-keras with MIT License 6 votes vote down vote up
def preprocess_image_crop(image_path, img_size):
    '''
    Preprocess the image scaling it so that its smaller size is img_size.
    The larger size is then cropped in order to produce a square image.
    '''
    img = load_img(image_path)
    scale = float(img_size) / min(img.size)
    new_size = (int(np.ceil(scale * img.size[0])), int(np.ceil(scale * img.size[1])))
    # print('old size: %s,new size: %s' %(str(img.size), str(new_size)))
    img = img.resize(new_size, resample=Image.BILINEAR)
    img = img_to_array(img)
    crop_h = img.shape[0] - img_size
    crop_v = img.shape[1] - img_size
    img = img[crop_h:img_size+crop_h, crop_v:img_size+crop_v, :]
    img = np.expand_dims(img, axis=0)
    img = vgg16.preprocess_input(img)
    return img

# util function to open, resize and format pictures into appropriate tensors 
Example #13
Source File: helper.py    From heatmaps with MIT License 6 votes vote down vote up
def helper_test(model):
    img_path = "../examples/dog.jpg"
    new_model = to_heatmap(model)

    # Loading the image
    img = image.load_img(img_path, target_size=(800, 800))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    out = new_model.predict(x)

    s = "n02084071"  # Imagenet code for "dog"
    ids = synset_to_dfs_ids(s)
    heatmap = out[0]
    if K.image_data_format() == 'channels_first':
        heatmap = heatmap[ids]
        heatmap = np.sum(heatmap, axis=0)
    else:
        heatmap = heatmap[:, :, ids]
        heatmap = np.sum(heatmap, axis=2)
    print(heatmap.shape)
    assert heatmap.shape[0] == heatmap.shape[1]
    K.clear_session() 
Example #14
Source File: data.py    From detect-cell-edge-use-unet with GNU General Public License v2.0 6 votes vote down vote up
def create_test_data(self):

        # 测试集生成npy
        i = 0
        print('-' * 30)
        print('Creating training images...')
        print('-' * 30)
        imgs = glob.glob(self.test_path + "/*." + self.img_type)           # deform/train
        print(len(imgs))
        imgdatas = np.ndarray((len(imgs), self.out_rows, self.out_cols, 1), dtype=np.uint8)
        for imgname in imgs:
            midname = imgname[imgname.rindex("/") + 1:]   # 图像的名字
            img = load_img(self.test_path + "/" + midname, grayscale=True)   # 转换为灰度图
            img = img_to_array(img)
            imgdatas[i] = img
            if i % 100 == 0:
                print('Done: {0}/{1} images'.format(i, len(imgs)))
            i += 1
        print('loading done', imgdatas.shape)
        np.save(self.npy_path + '/imgs_test.npy', imgdatas)            # 将30张训练集和30张label生成npy数据
        # np.save(self.npy_path + '/imgs_mask_train.npy', imglabels)
        print('Saving to .npy files done.') 
Example #15
Source File: dataset.py    From DeepTL-Lane-Change-Classification with MIT License 6 votes vote down vote up
def load_images_for_keras(self, img_path, target_size=(224, 224)):

        features = []
        filenames = sorted(os.listdir(img_path))

        for filename in filenames:

            img = image.load_img(os.path.join(img_path, filename), target_size=target_size)
            img = image.img_to_array(img)
            img = np.expand_dims(img, axis=0)
            img = preprocess_input(img)

            feature = self.model.predict(img)

            if img is not None:
                features.append(feature)

        return features 
Example #16
Source File: data.py    From vess2ret with MIT License 6 votes vote down vote up
def _load_img_pair(self, idx, load_from_memory):
        """Get a pair of images with index idx."""
        if load_from_memory:
            a = self.a[idx]
            b = self.b[idx]
            return a, b

        fname = self.filenames[idx]

        a = load_img(os.path.join(self.a_dir, fname),
                     grayscale=self.is_a_grayscale,
                     target_size=self.target_size)
        b = load_img(os.path.join(self.b_dir, fname),
                     grayscale=self.is_b_grayscale,
                     target_size=self.target_size)

        a = img_to_array(a, self.dim_ordering)
        b = img_to_array(b, self.dim_ordering)

        return a, b 
Example #17
Source File: common.py    From semantic-embeddings with MIT License 6 votes vote down vote up
def _compute_stats(self, mean = None, std = None):
        """ Computes channel-wise mean and standard deviation of all images in the dataset.
        
        If `mean` and `std` arguments are given, they will just be stored instead of being re-computed.

        The channel order of both is always "RGB", independent of `color_mode`.
        """
        
        if mean is None:
            mean = 0
            for fn in tqdm(self.train_img_files, desc = 'Computing channel mean'):
                mean += np.mean(np.asarray(load_img(fn), dtype=np.float64), axis = (0,1))
            mean /= len(self.train_img_files)
            print('Channel-wise mean:               {}'.format(mean))
        self.mean = np.asarray(mean, dtype=np.float32)
        if (mean is None) or (std is None):
            std = 0
            for fn in tqdm(self.train_img_files, desc = 'Computing channel variance'):
                std += np.mean((np.asarray(load_img(fn), dtype=np.float64) - self.mean) ** 2, axis = (0,1))
            std = np.sqrt(std / (len(self.train_img_files) - 1))
            print('Channel-wise standard deviation: {}'.format(std))
        self.std = np.asarray(std, dtype=np.float32) 
Example #18
Source File: testScoreWithAdapaKeras.py    From nyoka with Apache License 2.0 6 votes vote down vote up
def test_01_image_classifier_with_image_as_input(self):
        
        cnn_pmml = KerasToPmml(self.model_final,model_name="MobileNetImage",description="Demo",\
            copyright="Internal User",dataSet='image',predictedClasses=['dogs','cats'])
        cnn_pmml.export(open('2classMBNet.pmml', "w"), 0)

        img = image.load_img('nyoka/tests/resizedCat.png')
        img = img_to_array(img)
        img = preprocess_input(img)
        imgtf = np.expand_dims(img, axis=0)
        model_pred=self.model_final.predict(imgtf)
        model_preds = {'dogs':model_pred[0][0],'cats':model_pred[0][1]}

        model_name  = self.adapa_utility.upload_to_zserver('2classMBNet.pmml')

        predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, 'nyoka/tests/resizedCat.png','DN')
  
        self.assertEqual(abs(probabilities['cats'] - model_preds['cats']) < 0.00001, True)
        self.assertEqual(abs(probabilities['dogs'] - model_preds['dogs']) < 0.00001, True) 
Example #19
Source File: predict.py    From fine-tuning with GNU General Public License v3.0 6 votes vote down vote up
def predict(imagePath):
    img = load_img(imagePath)
    img = img_to_array(img)
    output = img.copy()
    # make prediction
    results = rcnn.detect([img], verbose=0)
    r = results[0]
    for (box, score) in zip(r['rois'], r['scores']):
          # filter out weak detections
          if score < 0.5:
               continue
          label = "{}: {:.2f}".format('table', score)
          cv2.rectangle(output, (box[1], box[0]), (box[3], box[2]),(0, 255, 0), 2)
          cv2.putText(output, label, (box[1], box[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
    cv2.imwrite("prediction.jpg", output)
    return r['rois'] 
Example #20
Source File: demo.py    From heatmaps with MIT License 6 votes vote down vote up
def display_heatmap(new_model, img_path, ids, preprocessing=None):
    # The quality is reduced.
    # If you have more than 8GB of RAM, you can try to increase it.
    img = image.load_img(img_path, target_size=(800, 1280))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    if preprocessing is not None:
        x = preprocess_input(x)

    out = new_model.predict(x)

    heatmap = out[0]  # Removing batch axis.

    if K.image_data_format() == 'channels_first':
        heatmap = heatmap[ids]
        if heatmap.ndim == 3:
            heatmap = np.sum(heatmap, axis=0)
    else:
        heatmap = heatmap[:, :, ids]
        if heatmap.ndim == 3:
            heatmap = np.sum(heatmap, axis=2)

    plt.imshow(heatmap, interpolation="none")
    plt.show() 
Example #21
Source File: common.py    From semantic-embeddings with MIT License 5 votes vote down vote up
def _load_image(self, filename, target_size = None, randzoom = False):
        """ Loads an image file.

        # Arguments:

        - filename: The path of the image file.

        - target_size: Int or tuple of ints. Specifies the target size which the image will be resized to.
                       If a single int is given, it specifies the size of the smaller side of the image and the aspect ratio will be retained.
                       If set to -1, the image won't be resized.
                       If set to None, the default_target_size passed to the constructor will be used.
                       The actual size may be modified further is `randzoom` is True.
        
        - randzoom: If True and `self.randzoom_range` is not None, random zooming will be applied.
                    If `self.randzoom_range` is given as floats defining a range relative to the image size,
                    `target_size` will be used as reference if it is not None, otherwise the original image size.
        
        # Returns:
            the image as PIL image.
        """

        img = load_img(filename)
        if target_size is None:
            target_size = self.default_target_size
        
        if (target_size > 0) or (randzoom and (self.randzoom_range is not None)):
            if target_size <= 0:
                target_size = img.size
            if randzoom and (self.randzoom_range is not None):
                if isinstance(self.randzoom_range[0], float):
                    target_size = np.round(np.array(target_size) * np.random.uniform(self.randzoom_range[0], self.randzoom_range[1])).astype(int).tolist()
                else:
                    target_size = np.random.randint(self.randzoom_range[0], self.randzoom_range[1])
            if isinstance(target_size, int):
                target_size = (target_size, round(img.size[1] * (target_size / img.size[0]))) if img.size[0] < img.size[1] else (round(img.size[0] * (target_size / img.size[1])), target_size)
            img = img.resize(target_size, PIL.Image.BILINEAR)
        
        return img 
Example #22
Source File: image_helper.py    From detection-2016-nipsws with MIT License 5 votes vote down vote up
def get_all_images(image_names, path_voc):
    images = []
    for j in range(np.size(image_names)):
        image_name = image_names[0][j]
        string = path_voc + '/JPEGImages/' + image_name + '.jpg'
        images.append(image.load_img(string, False))
    return images 
Example #23
Source File: datasets.py    From MLBlocks with MIT License 5 votes vote down vote up
def _load_images(image_dir, filenames):
    LOGGER.debug('Loading %s images from %s', len(filenames), image_dir)
    images = []
    for filename in filenames:
        filename = os.path.join(image_dir, filename)

        image = load_img(filename)
        image = image.resize(tuple(INPUT_SHAPE[0:2]))
        image = img_to_array(image)
        image = image / 255.0  # Quantize images.
        images.append(image)

    return np.array(images) 
Example #24
Source File: predict_multithreaded.py    From kaggle-carvana-2017 with MIT License 5 votes vote down vote up
def data_loader(q, ):
    for start in tqdm(range(0, len(filenames), batch_size)):
        x_batch = []
        end = min(start + batch_size, len(filenames))
        filenames_batch = filenames[start:end]

        for filename in filenames_batch:
            img = load_img(filename)

            stacked_channels = []
            for i in range(args.stacked_channels):
                channel_path = os.path.join(args.stacked_channels_dir,
                                            str(i),
                                            filename.split('/')[-1].replace('.jpg', '.png'))
                stacked_channel = load_img(channel_path, grayscale=True)
                stacked_channels.append(stacked_channel)
            stacked_img = np.dstack((img, *stacked_channels))

            x_batch.append(img_to_array(stacked_img))


        x_batch = preprocess_input(np.array(x_batch, np.float32), mode=args.preprocessing_function)
        if args.pred_tta:
            x_batch = do_tta(x_batch, args.pred_tta)
        padded_x = np.zeros((batch_size, 1280, 1920, args.stacked_channels + 3))
        padded_x[:, :, 1:-1, :] = x_batch
        q.put((filenames_batch, padded_x))

    for gpu in gpus:
        q.put((None, None)) 
Example #25
Source File: visualization.py    From Aesthetic_attributes_maps with MIT License 5 votes vote down vote up
def deprocess(path):
    img_path = path
    img = load_img(img_path, target_size=(299, 299))
    x = img_to_array(img)
    return x 
Example #26
Source File: visualization.py    From Aesthetic_attributes_maps with MIT License 5 votes vote down vote up
def load_image(path):
    img_path = path
    img = load_img(img_path, target_size=(299, 299))
    x = img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    return x 
Example #27
Source File: Siamese.py    From Siamese-neural-network-for-change-detection with GNU General Public License v3.0 5 votes vote down vote up
def siamese_network(input_image):
#    print('Inside Siamese')
    base_model = VGG16(weights='imagenet', include_top=False)
    model = Model(inputs=base_model.input, outputs=base_model.get_layer('block5_pool').output)
    #print model.summary()
    img_path = input_image
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    feature = model.predict(x)
    return feature 
Example #28
Source File: Siamese_predict.py    From Siamese-neural-network-for-change-detection with GNU General Public License v3.0 5 votes vote down vote up
def layman(input_image):
    img_path = input_image
    img = image.load_img(img_path, target_size=(50, 50))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    
    return x 
Example #29
Source File: images_to_array.py    From transfer with MIT License 5 votes vote down vote up
def val_images_to_array(img_path, source_path, img_dim, categories):

    array_path = os.path.join(source_path, 'array')
    shutil.rmtree(array_path,ignore_errors=True)
    os.makedirs(array_path)

    print('Iterating over all categories: ', categories)
    category_lengths = []
    for category_idx, category in enumerate(categories):
        print('categories:', category)
        category_path = os.path.join(img_path, category)
        img_files = sorted(os.listdir(category_path))
        category_lengths.append(len(img_files))
        for img_idx, img_file in tqdm(enumerate(img_files)):
            this_img_path = os.path.join(category_path, img_file)
            img = load_img(this_img_path, target_size=(img_dim, img_dim))

            img_name = '{}-img-{}-{}'.format(img_idx, category, category_idx)
            label_name = '{}-label-{}-{}'.format(img_idx, category, category_idx)

            label = np.eye(len(categories), dtype = np.float32)[category_idx]

            img_array_path = os.path.join(array_path, img_name)
            img_label_path = os.path.join(array_path, label_name)

            np.save(img_array_path, img)
            np.save(img_label_path, label)
    category_lengths = np.array(category_lengths) / sum(category_lengths)
    category_lengths = list(category_lengths / max(category_lengths))
    category_rounds = {cat: min(int(np.round(1 / l)), 10) for cat, l in zip(categories, category_lengths)}
    return category_rounds 
Example #30
Source File: ResNet.py    From Model-Playgrounds with MIT License 5 votes vote down vote up
def predict(path, model_path, index_file_path, MainUI):
    try:
        result_string = " Detected Object : Probability \n \n"

        # Making check to load Model
        if (MainUI.resnet_model_loaded == False):
            wx.CallAfter(pub.sendMessage, "report101", message="Loading ResNet model for the first time. This may take a few minutes or less than a minute. Please wait. \nLoading.....")
            model = ResNet50(include_top=True, weights="imagenet", model_path=model_path)
            wx.CallAfter(pub.sendMessage, "report101", message="ResNet model loaded.. Picture about to be processed.. \nLoading......")
            MainUI.model_collection_resnet.append(model)  # Loading model if not loaded yet
            MainUI.resnet_model_loaded = True
        else:
            wx.CallAfter(pub.sendMessage, "report101", message="Retrieving loaded model. \nLoading........")
            model = MainUI.model_collection_resnet[0]  # Getting Model from model array if loaded before
            wx.CallAfter(pub.sendMessage, "report101", message="ResNet model loaded.. Picture about to be processed.. \nLoading......")

        # Image prediction processing
        target_image = image.load_img(path, grayscale=False, target_size=(224, 224))
        target_image = image.img_to_array(target_image, data_format="channels_last")
        target_image = np.expand_dims(target_image, axis=0)

        target_image = preprocess_input(target_image, data_format="channels_last")
        wx.CallAfter(pub.sendMessage, "report101", message="Picture is transformed for prediction. \nLoading........")
        prediction = model.predict(x=target_image, steps=1)
        wx.CallAfter(pub.sendMessage, "report101", message="Picture prediction is done. Sending in results. \nLoading......")

        # Retrieving prediction result and sending it back to the thread
        prediction_result = decode_predictions(prediction, top=10, index_file_path=index_file_path)

        for results in prediction_result:
            countdown = 0
            for result in results:
                countdown += 1
                result_string += "(" + str(countdown) + ") " + str(result[1]) + " : " + str(100 * result[2])[
                                                                                        0:4] + "% \n"

        return result_string
    except Exception as e:
        return getattr(e, "message", repr(e))