Python keras.preprocessing.image.array_to_img() Examples

The following are 30 code examples of keras.preprocessing.image.array_to_img(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.preprocessing.image , or try the search function .
Example #1
Source File: test_preprocessing.py    From Keras-FCN with MIT License 6 votes vote down vote up
def test_pair_crop(crop_function):
    arr1 = np.random.random(500, 800)
    arr2 = np.random.random(500, 800)

    img1 = PILImage.fromarray(arr1)
    img2 = PILImage.fromarray(arr2)

    crop_width = img1.width / 5
    crop_height = img1.height / 5

    result1, result2 = crop_function(img_to_array(img1),
        img_to_array(img2),
        (crop_height, crop_width),
        'channels_last')
    result1 = array_to_img(result1)
    result2 = array_to_img(result2)

    assert result1.width == crop_width == result2.width
    assert result2.height == crop_height == result2.height 
Example #2
Source File: my_image.py    From MachineLearning with Apache License 2.0 6 votes vote down vote up
def next(self):
        # Keeps under lock only the mechanism which advances
        # the indexing of each batch.
        with self.lock:
            index_array, current_index, current_batch_size = next(self.index_generator)
        # The transformation of images is not under thread lock
        # so it can be done in parallel
        batch_x = np.zeros(tuple([current_batch_size] + list(self.image_size)), dtype=K.floatx())
        for i, j in enumerate(index_array):
            x = scipy.misc.imread(self.x[j])
            x = scipy.misc.imresize(x, self.image_size)
            x = self.image_data_generator.random_transform(x.astype(K.floatx()))
            x = self.image_data_generator.standardize(x)
            batch_x[i] = x
        if self.save_to_dir:
            for i in range(current_batch_size):
                img = image.array_to_img(batch_x[i], self.data_format, scale=True)
                fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
                                                                  index=current_index + i,
                                                                  hash=np.random.randint(1e4),
                                                                  format=self.save_format)
                img.save(os.path.join(self.save_to_dir, fname))
        batch_y = self.y[index_array]
        return batch_x, batch_y 
Example #3
Source File: utils.py    From enet-keras with MIT License 6 votes vote down vote up
def resize(item, target_h, target_w, keep_aspect_ratio=False):
    """
    Resizes an image to match target dimensions
    :type item: np.ndarray
    :type target_h: int
    :type target_w: int
    :param item: 3d numpy array or PIL.Image
    :param target_h: height in pixels
    :param target_w: width in pixels
    :param keep_aspect_ratio: If False then image is rescaled to smallest dimension and then cropped
    :return: 3d numpy array
    """
    img = array_to_img(item, scale=False)
    if keep_aspect_ratio:
        img.thumbnail((target_w, target_w), PILImage.ANTIALIAS)
        img_resized = img
    else:
        img_resized = img.resize((target_w, target_h), resample=PILImage.NEAREST)

    # convert output
    img_resized = img_to_array(img_resized)
    img_resized = img_resized.astype(dtype=np.uint8)

    return img_resized 
Example #4
Source File: datasets.py    From DEC-keras with MIT License 6 votes vote down vote up
def extract_vgg16_features(x):
    from keras.preprocessing.image import img_to_array, array_to_img
    from keras.applications.vgg16 import preprocess_input, VGG16
    from keras.models import Model

    # im_h = x.shape[1]
    im_h = 224
    model = VGG16(include_top=True, weights='imagenet', input_shape=(im_h, im_h, 3))
    # if flatten:
    #     add_layer = Flatten()
    # else:
    #     add_layer = GlobalMaxPool2D()
    # feature_model = Model(model.input, add_layer(model.output))
    feature_model = Model(model.input, model.get_layer('fc1').output)
    print('extracting features...')
    x = np.asarray([img_to_array(array_to_img(im, scale=False).resize((im_h,im_h))) for im in x])
    x = preprocess_input(x)  # data - 127. #data/255.#
    features = feature_model.predict(x)
    print('Features shape = ', features.shape)

    return features 
Example #5
Source File: KerasCallback.py    From aetros-cli with MIT License 5 votes vote down vote up
def make_image_from_dense_softmax(self, neurons):
        from aetros.utils import array_to_img

        img = array_to_img(neurons.reshape((1, len(neurons), 1)))
        img = img.resize((9, len(neurons) * 8))

        return img 
Example #6
Source File: test_preprocessing.py    From Keras-FCN with MIT License 5 votes vote down vote up
def test_crop(crop_function):
    arr = np.random.random(500, 800)

    img = PILImage.fromarray(arr)

    crop_width = img.width / 5
    crop_height = img.height / 5

    result = crop_function(img_to_array(img), (crop_height, crop_width), 'channels_last')
    result = array_to_img(result)

    assert result.width == crop_width
    assert result.height == crop_height 
Example #7
Source File: predict.py    From enet-keras with MIT License 5 votes vote down vote up
def run(segmenter, data):
    data_gen = data['data_gen']
    num_instances = data['num_instances']
    out_directory = os.path.realpath(data['dir_target'])
    keep_context = data['keep_context']
    # dataset = getattr(datasets, data['dataset_name'])(**data)
    dataset = getattr(datasets, data['dataset_name'])

    for idx, image in enumerate(data_gen):
        if idx > 20:
            break
        print('Processing {} out of {}'.format(idx+1, num_instances), end='\r')

        pred_final, scores = predict(segmenter, image, h=dh, w=dw)

        # draw prediction as rgb
        pred_final = color_output_image(dataset.palette, pred_final[:, :, 0])
        pred_final = array_to_img(pred_final)

        out_file = os.path.join(
            out_directory,
            '{}_{}_{}_out.png'.format(
                idx,
                keep_context,
                utils.basename_without_ext(pw)))

        sys.stdout.flush()
        if os.path.isfile(out_file):
            continue

        utils.ensure_dir(out_directory)
        print('Saving output to {}'.format(out_file))
        pilimg = PILImage.fromarray(image.astype(np.uint8), mode='RGB')
        pilimg.save(out_file.replace('_out.png', '.png'))
        pred_final.save(out_file) 
Example #8
Source File: predict.py    From AdvancedEAST with MIT License 5 votes vote down vote up
def cut_text_line(geo, scale_ratio_w, scale_ratio_h, im_array, img_path, s):
    geo /= [scale_ratio_w, scale_ratio_h]
    p_min = np.amin(geo, axis=0)
    p_max = np.amax(geo, axis=0)
    min_xy = p_min.astype(int)
    max_xy = p_max.astype(int) + 2
    sub_im_arr = im_array[min_xy[1]:max_xy[1], min_xy[0]:max_xy[0], :].copy()
    for m in range(min_xy[1], max_xy[1]):
        for n in range(min_xy[0], max_xy[0]):
            if not point_inside_of_quad(n, m, geo, p_min, p_max):
                sub_im_arr[m - min_xy[1], n - min_xy[0], :] = 255
    sub_im = image.array_to_img(sub_im_arr, scale=False)
    sub_im.save(img_path + '_subim%d.jpg' % s) 
Example #9
Source File: data.py    From U-net-segmentation with GNU General Public License v2.0 5 votes vote down vote up
def Augmentation(self):
        # 读入3通道的train和label, 分别转换成矩阵, 然后将label的第一个通道放在train的第2个通处, 做数据增强
        print("运行 Augmentation")
        """
        Start augmentation.....
        """
        trains = self.train_imgs
        labels = self.label_imgs
        path_train = self.train_path
        path_label = self.label_path
        path_merge = self.merge_path
        imgtype = self.img_type
        path_aug_merge = self.aug_merge_path
        print(len(trains), len(labels))
        if len(trains) != len(labels) or len(trains) == 0 or len(trains) == 0:
            print("trains can't match labels")
            return 0
        for i in range(len(trains)):
            img_t = load_img(path_train + "/" + str(i) + "." + imgtype)  # 读入train
            img_l = load_img(path_label + "/" + str(i) + "." + imgtype)  # 读入label
            x_t = img_to_array(img_t)                                    # 转换成矩阵
            x_l = img_to_array(img_l)
            x_t[:, :, 2] = x_l[:, :, 0]                                  # 把label当做train的第三个通道
            img_tmp = array_to_img(x_t)
            img_tmp.save(path_merge + "/" + str(i) + "." + imgtype)      # 保存合并后的图像
            img = x_t
            img = img.reshape((1,) + img.shape)                          # 改变shape(1, 512, 512, 3)
            savedir = path_aug_merge + "/" + str(i)                      # 存储合并增强后的图像
            if not os.path.lexists(savedir):
                os.mkdir(savedir)
            self.doAugmentate(img, savedir, str(i))                      # 数据增强 
Example #10
Source File: KerasCallback.py    From aetros-cli with MIT License 5 votes vote down vote up
def make_image(self, data):
        from keras.preprocessing.image import array_to_img
        try:
            if len(data.shape) == 2:
                # grayscale image, just add once channel
                data = data.reshape((data.shape[0], data.shape[1], 1))

            image = array_to_img(data)
        except Exception:
            return None

        # image = image.resize((128, 128))

        return image 
Example #11
Source File: KerasCallback.py    From aetros-cli with MIT License 5 votes vote down vote up
def make_image_from_dense(self, neurons):
        from aetros.utils import array_to_img
        cols = int(math.ceil(math.sqrt(len(neurons))))

        even_length = cols * cols
        diff = even_length - len(neurons)
        if diff > 0:
            neurons = np.append(neurons, np.zeros(diff, dtype=neurons.dtype))

        img = array_to_img(neurons.reshape((1, cols, cols)))
        img = img.resize((cols * 8, cols * 8))

        return img 
Example #12
Source File: utils_backdoor.py    From backdoor with MIT License 5 votes vote down vote up
def dump_image(x, filename, format):
    img = image.array_to_img(x, scale=False)
    img.save(filename, format)
    return 
Example #13
Source File: data_Keras.py    From U-net with MIT License 5 votes vote down vote up
def augmentation(self):
		# 读入3通道的train和label, 分别转换成矩阵, 然后将label的第一个通道放在train的第2个通处, 做数据增强
		print("运行 Augmentation")

		# Start augmentation.....
		trains = self.train_imgs
		labels = self.label_imgs
		path_train = self.train_path
		path_label = self.label_path
		path_merge = self.merge_path
		imgtype = self.img_type
		path_aug_merge = self.aug_merge_path
		print('%d images \n%d labels' % (len(trains), len(labels)))
		if len(trains) != len(labels) or len(trains) == 0 or len(trains) == 0:
			print("trains can't match labels")
			return 0
		if not os.path.lexists(path_merge):
			os.mkdir(path_merge)
		if not os.path.lexists(path_aug_merge):
			os.mkdir(path_aug_merge)
		for i in range(len(trains)):
			img_t = load_img(path_train + "/" + str(i) + "." + imgtype)  # 读入train
			img_l = load_img(path_label + "/" + str(i) + "." + imgtype)  # 读入label
			x_t = img_to_array(img_t)                                    # 转换成矩阵
			x_l = img_to_array(img_l)
			x_t[:, :, 2] = x_l[:, :, 0]                                  # 把label当做train的第三个通道
			img_tmp = array_to_img(x_t)
			img_tmp.save(path_merge + "/" + str(i) + "." + imgtype)      # 保存合并后的图像
			img = x_t
			img = img.reshape((1,) + img.shape)                          # 改变shape(1, 512, 512, 3)
			savedir = path_aug_merge + "/" + str(i)                      # 存储合并增强后的图像
			if not os.path.lexists(savedir):
				os.mkdir(savedir)
			print("running %d doAugmenttaion" % i)
			self.do_augmentate(img, savedir, str(i))                      # 数据增强 
Example #14
Source File: data.py    From detect-cell-edge-use-unet with GNU General Public License v2.0 5 votes vote down vote up
def Augmentation(self):
        # 读入3通道的train和label, 分别转换成矩阵, 然后将label的第一个通道放在train的第2个通处, 做数据增强
        print("运行 Augmentation")
        """
        Start augmentation.....
        """
        trains = self.train_imgs
        labels = self.label_imgs
        path_train = self.train_path
        path_label = self.label_path
        path_merge = self.merge_path
        imgtype = self.img_type
        path_aug_merge = self.aug_merge_path
        print(len(trains), len(labels))
        if len(trains) != len(labels) or len(trains) == 0 or len(trains) == 0:
            print("trains can't match labels")
            return 0
        for i in range(len(trains)):
            img_t = load_img(path_train + "/" + str(i) + "." + imgtype)  # 读入train
            img_l = load_img(path_label + "/" + str(i) + "." + imgtype)  # 读入label
            x_t = img_to_array(img_t)                                    # 转换成矩阵
            x_l = img_to_array(img_l)
            x_t[:, :, 2] = x_l[:, :, 0]                                  # 把label当做train的第三个通道
            img_tmp = array_to_img(x_t)
            img_tmp.save(path_merge + "/" + str(i) + "." + imgtype)      # 保存合并后的图像
            img = x_t
            img = img.reshape((1,) + img.shape)                          # 改变shape(1, 512, 512, 3)
            savedir = path_aug_merge + "/" + str(i)                      # 存储合并增强后的图像
            if not os.path.lexists(savedir):
                os.mkdir(savedir)
            self.doAugmentate(img, savedir, str(i))                      # 数据增强 
Example #15
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_img_utils(self):
        height, width = 10, 8

        # Test th data format
        x = np.random.random((3, height, width))
        img = image.array_to_img(x, data_format='channels_first')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_first')
        assert x.shape == (3, height, width)
        # Test 2D
        x = np.random.random((1, height, width))
        img = image.array_to_img(x, data_format='channels_first')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_first')
        assert x.shape == (1, height, width)

        # Test tf data format
        x = np.random.random((height, width, 3))
        img = image.array_to_img(x, data_format='channels_last')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_last')
        assert x.shape == (height, width, 3)
        # Test 2D
        x = np.random.random((height, width, 1))
        img = image.array_to_img(x, data_format='channels_last')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_last')
        assert x.shape == (height, width, 1)

        # Test invalid use case
        with pytest.raises(ValueError):
            x = np.random.random((height, width))  # not 3D
            img = image.array_to_img(x, data_format='channels_first')
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 3))
            img = image.array_to_img(x, data_format='channels')  # unknown data_format
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 5))  # neither RGB nor gray-scale
            img = image.array_to_img(x, data_format='channels_last')
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 3))
            img = image.img_to_array(x, data_format='channels')  # unknown data_format
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 5, 3))  # neither RGB nor gray-scale
            img = image.img_to_array(x, data_format='channels_last') 
Example #16
Source File: coco_extract_labels.py    From enet-keras with MIT License 4 votes vote down vote up
def extract_coco_labels(target_dir):
    kwargs = {
        'h': 512,
        'w': 512,
        'batch_size': 2,
        'root_dir': 'data',
        'dataset_name': 'mscoco',
        'data_type': 'train2017',
        'sample_size': 0.01,
        'instance_mode': False,
        'keep_context': 0.25,
        'merge_annotations': True,
        'cover_gaps': True,
        'resize_mode': 'stretch',
    }

    dataset = datasets.MSCOCO(**kwargs)

    for idx, res in enumerate(dataset.flow()):
        if not res:
            status = 'Skip'
        else:
            # convert label to rgb
            img, mask = res[0], res[1]
            rgb_label = one_hot_to_rgb(mask, dataset.PALETTE)

            # extract target filename
            filename_no_ext = os.path.splitext(img['file_name'])[0]
            lbl_path = os.path.join(
                target_dir,
                kwargs['data_type'],
                'labels',
                '{}.png'.format(filename_no_ext)
            )

            # convert array to PIL Image and save image to disk in png format (lossless)
            label = array_to_img(rgb_label)
            label.save(lbl_path)
            status = 'OK'
        msg = 'Processed {}/{} items. Status: {}'.format(idx + 1, dataset.num_items, status)
        print(msg, end='\r')
        sys.stdout.flush() 
Example #17
Source File: cifar10_eval.py    From DenseNet-Cifar10 with MIT License 4 votes vote down vote up
def eval_model():
    model = createDenseNet(nb_classes=nb_classes,img_dim=img_dim,depth=densenet_depth,
                  growth_rate = densenet_growth_rate)
    model.load_weights(check_point_file)
    optimizer = Adam()
    model.compile(loss='categorical_crossentropy',optimizer=optimizer,metrics=['accuracy'])
    
    label_list_path = 'datasets/cifar-10-batches-py/batches.meta'   
    keras_dir = os.path.expanduser(os.path.join('~', '.keras'))
    datadir_base = os.path.expanduser(keras_dir)
    if not os.access(datadir_base, os.W_OK):
        datadir_base = os.path.join('/tmp', '.keras')
    label_list_path = os.path.join(datadir_base, label_list_path)
    with open(label_list_path, mode='rb') as f:
        labels = pickle.load(f)
    
    (x_train,y_train),(x_test,y_test) = cifar10.load_data()
    x_test = x_test.astype('float32')
    x_test /= 255
    y_test= keras.utils.to_categorical(y_test, nb_classes)
    test_datagen = getDataGenerator(train_phase=False)
    test_datagen = test_datagen.flow(x_test,y_test,batch_size = batch_size,shuffle=False)
    
    # Evaluate model with test data set and share sample prediction results
    evaluation = model.evaluate_generator(test_datagen,
                                        steps=x_test.shape[0] // batch_size,
                                        workers=4)
    print('Model Accuracy = %.2f' % (evaluation[1]))
    
    counter = 0
    figure = plt.figure()
    plt.subplots_adjust(left=0.1,bottom=0.1, right=0.9, top=0.9,hspace=0.5, wspace=0.3)
    for x_batch,y_batch in test_datagen:
        predict_res = model.predict_on_batch(x_batch)
        for i in range(batch_size):
            actual_label = labels['label_names'][np.argmax(y_batch[i])]
            predicted_label = labels['label_names'][np.argmax(predict_res[i])]
            if actual_label != predicted_label:
                counter += 1
                pics_raw = x_batch[i]
                pics_raw *= 255
                pics = array_to_img(pics_raw)
                ax = plt.subplot(25//5, 5, counter)
                ax.axis('off')
                ax.set_title(predicted_label)
                plt.imshow(pics)
            if counter >= 25:
                plt.savefig("./wrong_predicted.jpg")
                break
        if counter >= 25:
                break
    print("Everything seems OK...") 
Example #18
Source File: data_input.py    From DenseNet-Cifar10 with MIT License 4 votes vote down vote up
def testDataGenerator(pics_num):
    """visualize the pics after data augmentation
    Args:
        pics_num:
            the number of pics you want to observe
    return:
        None
    """
    
    print("Now, we are testing data generator......")
    
    (x_train,y_train),(x_test,y_test) = cifar10.load_data()
    x_train = x_train.astype('float32')
    y_train = keras.utils.to_categorical(y_train, 10)
    
    # Load label names to use in prediction results
    label_list_path = 'datasets/cifar-10-batches-py/batches.meta'
    keras_dir = os.path.expanduser(os.path.join('~', '.keras'))
    datadir_base = os.path.expanduser(keras_dir)
    if not os.access(datadir_base, os.W_OK):
        datadir_base = os.path.join('/tmp', '.keras')
    label_list_path = os.path.join(datadir_base, label_list_path)
    with open(label_list_path, mode='rb') as f:
        labels = pickle.load(f)
    
    datagen = getDataGenerator(train_phase=True)
    """
    x_batch is a [-1,row,col,channel] np array
    y_batch is a [-1,labels] np array
    """
    figure = plt.figure()
    plt.subplots_adjust(left=0.1,bottom=0.1, right=0.9, top=0.9,hspace=0.5, wspace=0.3)
    for x_batch,y_batch in datagen.flow(x_train,y_train,batch_size = pics_num):
        for i in range(pics_num):
            pics_raw = x_batch[i]
            pics = array_to_img(pics_raw)
            ax = plt.subplot(pics_num//5, 5, i+1)
            ax.axis('off')
            ax.set_title(labels['label_names'][np.argmax(y_batch[i])])
            plt.imshow(pics)
        plt.savefig("./processed_data.jpg")
        break   
    print("Everything seems OK...") 
Example #19
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_load_img(self, tmpdir):
        filename = str(tmpdir / 'image.png')

        original_im_array = np.array(255 * np.random.rand(100, 100, 3),
                                     dtype=np.uint8)
        original_im = image.array_to_img(original_im_array, scale=False)
        original_im.save(filename)

        # Test that loaded image is exactly equal to original.

        loaded_im = image.load_img(filename)
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == original_im_array.shape
        assert np.all(loaded_im_array == original_im_array)

        loaded_im = image.load_img(filename, grayscale=True)
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (original_im_array.shape[0],
                                         original_im_array.shape[1], 1)

        # Test that nothing is changed when target size is equal to original.

        loaded_im = image.load_img(filename, target_size=(100, 100))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == original_im_array.shape
        assert np.all(loaded_im_array == original_im_array)

        loaded_im = image.load_img(filename, grayscale=True,
                                   target_size=(100, 100))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (original_im_array.shape[0],
                                         original_im_array.shape[1], 1)

        # Test down-sampling with bilinear interpolation.

        loaded_im = image.load_img(filename, target_size=(25, 25))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (25, 25, 3)

        loaded_im = image.load_img(filename, grayscale=True,
                                   target_size=(25, 25))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (25, 25, 1)

        # Test down-sampling with nearest neighbor interpolation.

        loaded_im_nearest = image.load_img(filename, target_size=(25, 25),
                                           interpolation="nearest")
        loaded_im_array_nearest = image.img_to_array(loaded_im_nearest)
        assert loaded_im_array_nearest.shape == (25, 25, 3)
        assert np.any(loaded_im_array_nearest != loaded_im_array)

        # Check that exception is raised if interpolation not supported.

        loaded_im = image.load_img(filename, interpolation="unsupported")
        with pytest.raises(ValueError):
            loaded_im = image.load_img(filename, target_size=(25, 25),
                                       interpolation="unsupported") 
Example #20
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_img_utils(self):
        height, width = 10, 8

        # Test th data format
        x = np.random.random((3, height, width))
        img = image.array_to_img(x, data_format='channels_first')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_first')
        assert x.shape == (3, height, width)
        # Test 2D
        x = np.random.random((1, height, width))
        img = image.array_to_img(x, data_format='channels_first')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_first')
        assert x.shape == (1, height, width)

        # Test tf data format
        x = np.random.random((height, width, 3))
        img = image.array_to_img(x, data_format='channels_last')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_last')
        assert x.shape == (height, width, 3)
        # Test 2D
        x = np.random.random((height, width, 1))
        img = image.array_to_img(x, data_format='channels_last')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_last')
        assert x.shape == (height, width, 1)

        # Test invalid use case
        with pytest.raises(ValueError):
            x = np.random.random((height, width))  # not 3D
            img = image.array_to_img(x, data_format='channels_first')
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 3))
            img = image.array_to_img(x, data_format='channels')  # unknown data_format
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 5))  # neither RGB nor gray-scale
            img = image.array_to_img(x, data_format='channels_last')
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 3))
            img = image.img_to_array(x, data_format='channels')  # unknown data_format
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 5, 3))  # neither RGB nor gray-scale
            img = image.img_to_array(x, data_format='channels_last') 
Example #21
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_load_img(self, tmpdir):
        filename = str(tmpdir / 'image.png')

        original_im_array = np.array(255 * np.random.rand(100, 100, 3),
                                     dtype=np.uint8)
        original_im = image.array_to_img(original_im_array, scale=False)
        original_im.save(filename)

        # Test that loaded image is exactly equal to original.

        loaded_im = image.load_img(filename)
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == original_im_array.shape
        assert np.all(loaded_im_array == original_im_array)

        loaded_im = image.load_img(filename, grayscale=True)
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (original_im_array.shape[0],
                                         original_im_array.shape[1], 1)

        # Test that nothing is changed when target size is equal to original.

        loaded_im = image.load_img(filename, target_size=(100, 100))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == original_im_array.shape
        assert np.all(loaded_im_array == original_im_array)

        loaded_im = image.load_img(filename, grayscale=True,
                                   target_size=(100, 100))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (original_im_array.shape[0],
                                         original_im_array.shape[1], 1)

        # Test down-sampling with bilinear interpolation.

        loaded_im = image.load_img(filename, target_size=(25, 25))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (25, 25, 3)

        loaded_im = image.load_img(filename, grayscale=True,
                                   target_size=(25, 25))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (25, 25, 1)

        # Test down-sampling with nearest neighbor interpolation.

        loaded_im_nearest = image.load_img(filename, target_size=(25, 25),
                                           interpolation="nearest")
        loaded_im_array_nearest = image.img_to_array(loaded_im_nearest)
        assert loaded_im_array_nearest.shape == (25, 25, 3)
        assert np.any(loaded_im_array_nearest != loaded_im_array)

        # Check that exception is raised if interpolation not supported.

        loaded_im = image.load_img(filename, interpolation="unsupported")
        with pytest.raises(ValueError):
            loaded_im = image.load_img(filename, target_size=(25, 25),
                                       interpolation="unsupported") 
Example #22
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_img_utils(self):
        height, width = 10, 8

        # Test th data format
        x = np.random.random((3, height, width))
        img = image.array_to_img(x, data_format='channels_first')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_first')
        assert x.shape == (3, height, width)
        # Test 2D
        x = np.random.random((1, height, width))
        img = image.array_to_img(x, data_format='channels_first')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_first')
        assert x.shape == (1, height, width)

        # Test tf data format
        x = np.random.random((height, width, 3))
        img = image.array_to_img(x, data_format='channels_last')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_last')
        assert x.shape == (height, width, 3)
        # Test 2D
        x = np.random.random((height, width, 1))
        img = image.array_to_img(x, data_format='channels_last')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_last')
        assert x.shape == (height, width, 1)

        # Test invalid use case
        with pytest.raises(ValueError):
            x = np.random.random((height, width))  # not 3D
            img = image.array_to_img(x, data_format='channels_first')
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 3))
            img = image.array_to_img(x, data_format='channels')  # unknown data_format
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 5))  # neither RGB nor gray-scale
            img = image.array_to_img(x, data_format='channels_last')
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 3))
            img = image.img_to_array(x, data_format='channels')  # unknown data_format
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 5, 3))  # neither RGB nor gray-scale
            img = image.img_to_array(x, data_format='channels_last') 
Example #23
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_load_img(self, tmpdir):
        filename = str(tmpdir / 'image.png')

        original_im_array = np.array(255 * np.random.rand(100, 100, 3),
                                     dtype=np.uint8)
        original_im = image.array_to_img(original_im_array, scale=False)
        original_im.save(filename)

        # Test that loaded image is exactly equal to original.

        loaded_im = image.load_img(filename)
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == original_im_array.shape
        assert np.all(loaded_im_array == original_im_array)

        loaded_im = image.load_img(filename, grayscale=True)
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (original_im_array.shape[0],
                                         original_im_array.shape[1], 1)

        # Test that nothing is changed when target size is equal to original.

        loaded_im = image.load_img(filename, target_size=(100, 100))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == original_im_array.shape
        assert np.all(loaded_im_array == original_im_array)

        loaded_im = image.load_img(filename, grayscale=True,
                                   target_size=(100, 100))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (original_im_array.shape[0],
                                         original_im_array.shape[1], 1)

        # Test down-sampling with bilinear interpolation.

        loaded_im = image.load_img(filename, target_size=(25, 25))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (25, 25, 3)

        loaded_im = image.load_img(filename, grayscale=True,
                                   target_size=(25, 25))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (25, 25, 1)

        # Test down-sampling with nearest neighbor interpolation.

        loaded_im_nearest = image.load_img(filename, target_size=(25, 25),
                                           interpolation="nearest")
        loaded_im_array_nearest = image.img_to_array(loaded_im_nearest)
        assert loaded_im_array_nearest.shape == (25, 25, 3)
        assert np.any(loaded_im_array_nearest != loaded_im_array)

        # Check that exception is raised if interpolation not supported.

        loaded_im = image.load_img(filename, interpolation="unsupported")
        with pytest.raises(ValueError):
            loaded_im = image.load_img(filename, target_size=(25, 25),
                                       interpolation="unsupported") 
Example #24
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_img_utils(self):
        height, width = 10, 8

        # Test th data format
        x = np.random.random((3, height, width))
        img = image.array_to_img(x, data_format='channels_first')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_first')
        assert x.shape == (3, height, width)
        # Test 2D
        x = np.random.random((1, height, width))
        img = image.array_to_img(x, data_format='channels_first')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_first')
        assert x.shape == (1, height, width)

        # Test tf data format
        x = np.random.random((height, width, 3))
        img = image.array_to_img(x, data_format='channels_last')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_last')
        assert x.shape == (height, width, 3)
        # Test 2D
        x = np.random.random((height, width, 1))
        img = image.array_to_img(x, data_format='channels_last')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_last')
        assert x.shape == (height, width, 1)

        # Test invalid use case
        with pytest.raises(ValueError):
            x = np.random.random((height, width))  # not 3D
            img = image.array_to_img(x, data_format='channels_first')
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 3))
            img = image.array_to_img(x, data_format='channels')  # unknown data_format
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 5))  # neither RGB nor gray-scale
            img = image.array_to_img(x, data_format='channels_last')
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 3))
            img = image.img_to_array(x, data_format='channels')  # unknown data_format
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 5, 3))  # neither RGB nor gray-scale
            img = image.img_to_array(x, data_format='channels_last') 
Example #25
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_load_img(self, tmpdir):
        filename = str(tmpdir / 'image.png')

        original_im_array = np.array(255 * np.random.rand(100, 100, 3),
                                     dtype=np.uint8)
        original_im = image.array_to_img(original_im_array, scale=False)
        original_im.save(filename)

        # Test that loaded image is exactly equal to original.

        loaded_im = image.load_img(filename)
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == original_im_array.shape
        assert np.all(loaded_im_array == original_im_array)

        loaded_im = image.load_img(filename, grayscale=True)
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (original_im_array.shape[0],
                                         original_im_array.shape[1], 1)

        # Test that nothing is changed when target size is equal to original.

        loaded_im = image.load_img(filename, target_size=(100, 100))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == original_im_array.shape
        assert np.all(loaded_im_array == original_im_array)

        loaded_im = image.load_img(filename, grayscale=True,
                                   target_size=(100, 100))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (original_im_array.shape[0],
                                         original_im_array.shape[1], 1)

        # Test down-sampling with bilinear interpolation.

        loaded_im = image.load_img(filename, target_size=(25, 25))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (25, 25, 3)

        loaded_im = image.load_img(filename, grayscale=True,
                                   target_size=(25, 25))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (25, 25, 1)

        # Test down-sampling with nearest neighbor interpolation.

        loaded_im_nearest = image.load_img(filename, target_size=(25, 25),
                                           interpolation="nearest")
        loaded_im_array_nearest = image.img_to_array(loaded_im_nearest)
        assert loaded_im_array_nearest.shape == (25, 25, 3)
        assert np.any(loaded_im_array_nearest != loaded_im_array)

        # Check that exception is raised if interpolation not supported.

        loaded_im = image.load_img(filename, interpolation="unsupported")
        with pytest.raises(ValueError):
            loaded_im = image.load_img(filename, target_size=(25, 25),
                                       interpolation="unsupported") 
Example #26
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_img_utils(self):
        height, width = 10, 8

        # Test th data format
        x = np.random.random((3, height, width))
        img = image.array_to_img(x, data_format='channels_first')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_first')
        assert x.shape == (3, height, width)
        # Test 2D
        x = np.random.random((1, height, width))
        img = image.array_to_img(x, data_format='channels_first')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_first')
        assert x.shape == (1, height, width)

        # Test tf data format
        x = np.random.random((height, width, 3))
        img = image.array_to_img(x, data_format='channels_last')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_last')
        assert x.shape == (height, width, 3)
        # Test 2D
        x = np.random.random((height, width, 1))
        img = image.array_to_img(x, data_format='channels_last')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_last')
        assert x.shape == (height, width, 1)

        # Test invalid use case
        with pytest.raises(ValueError):
            x = np.random.random((height, width))  # not 3D
            img = image.array_to_img(x, data_format='channels_first')
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 3))
            img = image.array_to_img(x, data_format='channels')  # unknown data_format
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 5))  # neither RGB nor gray-scale
            img = image.array_to_img(x, data_format='channels_last')
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 3))
            img = image.img_to_array(x, data_format='channels')  # unknown data_format
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 5, 3))  # neither RGB nor gray-scale
            img = image.img_to_array(x, data_format='channels_last') 
Example #27
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_img_utils(self):
        height, width = 10, 8

        # Test th data format
        x = np.random.random((3, height, width))
        img = image.array_to_img(x, data_format='channels_first')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_first')
        assert x.shape == (3, height, width)
        # Test 2D
        x = np.random.random((1, height, width))
        img = image.array_to_img(x, data_format='channels_first')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_first')
        assert x.shape == (1, height, width)

        # Test tf data format
        x = np.random.random((height, width, 3))
        img = image.array_to_img(x, data_format='channels_last')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_last')
        assert x.shape == (height, width, 3)
        # Test 2D
        x = np.random.random((height, width, 1))
        img = image.array_to_img(x, data_format='channels_last')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_last')
        assert x.shape == (height, width, 1)

        # Test invalid use case
        with pytest.raises(ValueError):
            x = np.random.random((height, width))  # not 3D
            img = image.array_to_img(x, data_format='channels_first')
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 3))
            img = image.array_to_img(x, data_format='channels')  # unknown data_format
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 5))  # neither RGB nor gray-scale
            img = image.array_to_img(x, data_format='channels_last')
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 3))
            img = image.img_to_array(x, data_format='channels')  # unknown data_format
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 5, 3))  # neither RGB nor gray-scale
            img = image.img_to_array(x, data_format='channels_last') 
Example #28
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_load_img(self, tmpdir):
        filename = str(tmpdir / 'image.png')

        original_im_array = np.array(255 * np.random.rand(100, 100, 3),
                                     dtype=np.uint8)
        original_im = image.array_to_img(original_im_array, scale=False)
        original_im.save(filename)

        # Test that loaded image is exactly equal to original.

        loaded_im = image.load_img(filename)
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == original_im_array.shape
        assert np.all(loaded_im_array == original_im_array)

        loaded_im = image.load_img(filename, grayscale=True)
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (original_im_array.shape[0],
                                         original_im_array.shape[1], 1)

        # Test that nothing is changed when target size is equal to original.

        loaded_im = image.load_img(filename, target_size=(100, 100))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == original_im_array.shape
        assert np.all(loaded_im_array == original_im_array)

        loaded_im = image.load_img(filename, grayscale=True,
                                   target_size=(100, 100))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (original_im_array.shape[0],
                                         original_im_array.shape[1], 1)

        # Test down-sampling with bilinear interpolation.

        loaded_im = image.load_img(filename, target_size=(25, 25))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (25, 25, 3)

        loaded_im = image.load_img(filename, grayscale=True,
                                   target_size=(25, 25))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (25, 25, 1)

        # Test down-sampling with nearest neighbor interpolation.

        loaded_im_nearest = image.load_img(filename, target_size=(25, 25),
                                           interpolation="nearest")
        loaded_im_array_nearest = image.img_to_array(loaded_im_nearest)
        assert loaded_im_array_nearest.shape == (25, 25, 3)
        assert np.any(loaded_im_array_nearest != loaded_im_array)

        # Check that exception is raised if interpolation not supported.

        loaded_im = image.load_img(filename, interpolation="unsupported")
        with pytest.raises(ValueError):
            loaded_im = image.load_img(filename, target_size=(25, 25),
                                       interpolation="unsupported") 
Example #29
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_img_utils(self):
        height, width = 10, 8

        # Test th data format
        x = np.random.random((3, height, width))
        img = image.array_to_img(x, data_format='channels_first')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_first')
        assert x.shape == (3, height, width)
        # Test 2D
        x = np.random.random((1, height, width))
        img = image.array_to_img(x, data_format='channels_first')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_first')
        assert x.shape == (1, height, width)

        # Test tf data format
        x = np.random.random((height, width, 3))
        img = image.array_to_img(x, data_format='channels_last')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_last')
        assert x.shape == (height, width, 3)
        # Test 2D
        x = np.random.random((height, width, 1))
        img = image.array_to_img(x, data_format='channels_last')
        assert img.size == (width, height)
        x = image.img_to_array(img, data_format='channels_last')
        assert x.shape == (height, width, 1)

        # Test invalid use case
        with pytest.raises(ValueError):
            x = np.random.random((height, width))  # not 3D
            img = image.array_to_img(x, data_format='channels_first')
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 3))
            img = image.array_to_img(x, data_format='channels')  # unknown data_format
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 5))  # neither RGB nor gray-scale
            img = image.array_to_img(x, data_format='channels_last')
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 3))
            img = image.img_to_array(x, data_format='channels')  # unknown data_format
        with pytest.raises(ValueError):
            x = np.random.random((height, width, 5, 3))  # neither RGB nor gray-scale
            img = image.img_to_array(x, data_format='channels_last') 
Example #30
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_load_img(self, tmpdir):
        filename = str(tmpdir / 'image.png')

        original_im_array = np.array(255 * np.random.rand(100, 100, 3),
                                     dtype=np.uint8)
        original_im = image.array_to_img(original_im_array, scale=False)
        original_im.save(filename)

        # Test that loaded image is exactly equal to original.

        loaded_im = image.load_img(filename)
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == original_im_array.shape
        assert np.all(loaded_im_array == original_im_array)

        loaded_im = image.load_img(filename, grayscale=True)
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (original_im_array.shape[0],
                                         original_im_array.shape[1], 1)

        # Test that nothing is changed when target size is equal to original.

        loaded_im = image.load_img(filename, target_size=(100, 100))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == original_im_array.shape
        assert np.all(loaded_im_array == original_im_array)

        loaded_im = image.load_img(filename, grayscale=True,
                                   target_size=(100, 100))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (original_im_array.shape[0],
                                         original_im_array.shape[1], 1)

        # Test down-sampling with bilinear interpolation.

        loaded_im = image.load_img(filename, target_size=(25, 25))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (25, 25, 3)

        loaded_im = image.load_img(filename, grayscale=True,
                                   target_size=(25, 25))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (25, 25, 1)

        # Test down-sampling with nearest neighbor interpolation.

        loaded_im_nearest = image.load_img(filename, target_size=(25, 25),
                                           interpolation="nearest")
        loaded_im_array_nearest = image.img_to_array(loaded_im_nearest)
        assert loaded_im_array_nearest.shape == (25, 25, 3)
        assert np.any(loaded_im_array_nearest != loaded_im_array)

        # Check that exception is raised if interpolation not supported.

        loaded_im = image.load_img(filename, interpolation="unsupported")
        with pytest.raises(ValueError):
            loaded_im = image.load_img(filename, target_size=(25, 25),
                                       interpolation="unsupported")