Python tensorflow.keras.preprocessing.image.ImageDataGenerator() Examples

The following are 23 code examples of tensorflow.keras.preprocessing.image.ImageDataGenerator(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.preprocessing.image , or try the search function .
Example #1
Source File: coco_hpe2_dataset.py    From imgclsmob with MIT License 6 votes vote down vote up
def cocohpe_val_transform(ds_metainfo,
                          data_format="channels_last"):
    """
    Create image transform sequence for validation subset.

    Parameters:
    ----------
    ds_metainfo : DatasetMetaInfo
        Pascal VOC2012 dataset metainfo.
    data_format : str, default 'channels_last'
        The ordering of the dimensions in tensors.

    Returns
    -------
    ImageDataGenerator
        Image transform sequence.
    """
    data_generator = CocoHpeImageDataGenerator(
        preprocessing_function=(lambda img: ds_metainfo.val_transform2(ds_metainfo=ds_metainfo)(img)),
        data_format=data_format)
    return data_generator 
Example #2
Source File: coco_hpe3_dataset.py    From imgclsmob with MIT License 6 votes vote down vote up
def cocohpe_val_transform(ds_metainfo,
                          data_format="channels_last"):
    """
    Create image transform sequence for validation subset.

    Parameters:
    ----------
    ds_metainfo : DatasetMetaInfo
        Pascal VOC2012 dataset metainfo.
    data_format : str, default 'channels_last'
        The ordering of the dimensions in tensors.

    Returns
    -------
    ImageDataGenerator
        Image transform sequence.
    """
    data_generator = CocoHpeImageDataGenerator(
        preprocessing_function=(lambda img: ds_metainfo.val_transform2(ds_metainfo=ds_metainfo)(img)),
        data_format=data_format)
    return data_generator 
Example #3
Source File: coco_hpe1_dataset.py    From imgclsmob with MIT License 6 votes vote down vote up
def cocohpe_val_transform(ds_metainfo,
                          data_format="channels_last"):
    """
    Create image transform sequence for validation subset.

    Parameters:
    ----------
    ds_metainfo : DatasetMetaInfo
        Pascal VOC2012 dataset metainfo.
    data_format : str, default 'channels_last'
        The ordering of the dimensions in tensors.

    Returns
    -------
    ImageDataGenerator
        Image transform sequence.
    """
    data_generator = CocoHpeImageDataGenerator(
        preprocessing_function=(lambda img: ds_metainfo.val_transform2(ds_metainfo=ds_metainfo)(img)),
        data_format=data_format)
    return data_generator 
Example #4
Source File: FcDEC.py    From DEC-DA with MIT License 6 votes vote down vote up
def __init__(self,
                 dims,
                 n_clusters=10,
                 alpha=1.0):

        super(FcDEC, self).__init__()

        self.dims = dims
        self.input_dim = dims[0]
        self.n_stacks = len(self.dims) - 1

        self.n_clusters = n_clusters
        self.alpha = alpha
        self.pretrained = False
        self.datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, rotation_range=10)
        self.autoencoder, self.encoder = autoencoder(self.dims)

        # prepare FcDEC model
        clustering_layer = ClusteringLayer(self.n_clusters, name='clustering')(self.encoder.output)
        self.model = Model(inputs=self.encoder.input, outputs=clustering_layer) 
Example #5
Source File: cifar10_cls_dataset.py    From imgclsmob with MIT License 6 votes vote down vote up
def cifar10_val_transform(ds_metainfo,
                          data_format="channels_last"):
    """
    Create image transform sequence for validation subset.

    Parameters:
    ----------
    ds_metainfo : DatasetMetaInfo
        ImageNet-1K dataset metainfo.
    data_format : str, default 'channels_last'
        The ordering of the dimensions in tensors.

    Returns
    -------
    ImageDataGenerator
        Image transform sequence.
    """
    data_generator = ImageDataGenerator(
        preprocessing_function=(lambda img: img_normalization(
            img=img,
            mean_rgb=ds_metainfo.mean_rgb,
            std_rgb=ds_metainfo.std_rgb)),
        data_format=data_format)
    return data_generator 
Example #6
Source File: cub200_2011_cls_dataset.py    From imgclsmob with MIT License 6 votes vote down vote up
def cub200_val_transform(ds_metainfo,
                         data_format="channels_last"):
    """
    Create image transform sequence for validation subset.

    Parameters:
    ----------
    ds_metainfo : DatasetMetaInfo
        CUB-200-2011 dataset metainfo.
    data_format : str, default 'channels_last'
        The ordering of the dimensions in tensors.

    Returns
    -------
    ImageDataGenerator
        Image transform sequence.
    """
    data_generator = CubImageDataGenerator(
        preprocessing_function=(lambda img: img_normalization(
            img=img,
            mean_rgb=ds_metainfo.mean_rgb,
            std_rgb=ds_metainfo.std_rgb)),
        data_format=data_format)
    return data_generator 
Example #7
Source File: imagenet1k_cls_dataset.py    From imgclsmob with MIT License 6 votes vote down vote up
def imagenet_val_transform(ds_metainfo,
                           data_format="channels_last"):
    """
    Create image transform sequence for validation subset.

    Parameters:
    ----------
    ds_metainfo : DatasetMetaInfo
        ImageNet-1K dataset metainfo.
    data_format : str, default 'channels_last'
        The ordering of the dimensions in tensors.

    Returns
    -------
    ImageDataGenerator
        Image transform sequence.
    """
    data_generator = ImageDataGenerator(
        preprocessing_function=(lambda img: img_normalization(
            img=img,
            mean_rgb=ds_metainfo.mean_rgb,
            std_rgb=ds_metainfo.std_rgb)),
        data_format=data_format)
    return data_generator 
Example #8
Source File: coco_hpe2_dataset.py    From imgclsmob with MIT License 5 votes vote down vote up
def cocohpe_val_generator(data_generator,
                          ds_metainfo,
                          batch_size):
    """
    Create image generator for validation subset.

    Parameters:
    ----------
    data_generator : ImageDataGenerator
        Image transform sequence.
    ds_metainfo : DatasetMetaInfo
        Pascal VOC2012 dataset metainfo.
    batch_size : int
        Batch size.

    Returns
    -------
    Sequential
        Image transform sequence.
    """
    split = "val"
    root = ds_metainfo.root_dir_path
    root = os.path.join(root, split)
    generator = data_generator.flow_from_directory(
        directory=root,
        target_size=ds_metainfo.input_image_size,
        class_mode="binary",
        batch_size=batch_size,
        shuffle=False,
        interpolation="bilinear",
        dataset=ds_metainfo.dataset_class(
            root=ds_metainfo.root_dir_path,
            mode="val",
            transform=ds_metainfo.val_transform2(
                ds_metainfo=ds_metainfo)))
    return generator 
Example #9
Source File: train_imagenet.py    From keras-YOLOv3-model-set with MIT License 5 votes vote down vote up
def evaluate_model(args, model, input_shape):
    # eval data generator
    eval_datagen = ImageDataGenerator(preprocessing_function=preprocess)
    eval_generator = eval_datagen.flow_from_directory(
            args.val_data_path,
            target_size=input_shape,
            batch_size=args.batch_size)

    # get optimizer
    optimizer = get_optimizer(args.optim_type, args.learning_rate)

    # start training
    model.compile(
              optimizer=optimizer,
              metrics=['accuracy', 'top_k_categorical_accuracy'],
              loss='categorical_crossentropy')

    print('Evaluate on {} samples, with batch size {}.'.format(eval_generator.samples, args.batch_size))
    scores = model.evaluate_generator(
            eval_generator,
            steps=eval_generator.samples // args.batch_size,
            max_queue_size=10,
            workers=1,
            use_multiprocessing=False,
            verbose=1)

    print('Evaluate loss:', scores[0])
    print('Top-1 accuracy:', scores[1])
    print('Top-k accuracy:', scores[2]) 
Example #10
Source File: cifar_tf_example.py    From ray with Apache License 2.0 5 votes vote down vote up
def _make_generator(x_train, y_train, batch_size):
    # This will do preprocessing and realtime data augmentation:
    datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        # divide inputs by std of the dataset
        featurewise_std_normalization=False,
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        zca_epsilon=1e-06,  # epsilon for ZCA whitening
        # randomly rotate images in the range (degrees, 0 to 180)
        rotation_range=0,
        # randomly shift images horizontally (fraction of total width)
        width_shift_range=0.1,
        # randomly shift images vertically (fraction of total height)
        height_shift_range=0.1,
        shear_range=0.,  # set range for random shear
        zoom_range=0.,  # set range for random zoom
        channel_shift_range=0.,  # set range for random channel shifts
        # set mode for filling points outside the input boundaries
        fill_mode="nearest",
        cval=0.,  # value used for fill_mode = "constant"
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False,  # randomly flip images
        # set rescaling factor (applied before any other transformation)
        rescale=None,
        # set function that will be applied on each input
        preprocessing_function=None,
        # image data format, either "channels_first" or "channels_last"
        data_format=None,
        # fraction of images reserved for validation (strictly between 0 and 1)
        validation_split=0.0)

    # Compute quantities required for feature-wise normalization
    # (std, mean, and principal components if ZCA whitening is applied).
    datagen.fit(x_train)
    return datagen.flow(x_train, y_train, batch_size=batch_size) 
Example #11
Source File: ConvDEC.py    From DEC-DA with MIT License 5 votes vote down vote up
def __init__(self,
                 input_shape,
                 filters=[32, 64, 128, 10],
                 n_clusters=10):

        self.n_clusters = n_clusters
        self.input_shape = input_shape
        self.datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, rotation_range=10)
        self.datagenx = ImageDataGenerator()
        self.autoencoder, self.encoder = CAE(input_shape, filters)

        # Define ConvIDEC model
        clustering_layer = ClusteringLayer(self.n_clusters, name='clustering')(self.encoder.output)
        self.model = Model(inputs=self.autoencoder.input,
                           outputs=clustering_layer) 
Example #12
Source File: cub200_2011_cls_dataset.py    From imgclsmob with MIT License 5 votes vote down vote up
def cub200_val_generator(data_generator,
                         ds_metainfo,
                         batch_size):
    """
    Create image generator for validation subset.

    Parameters:
    ----------
    data_generator : ImageDataGenerator
        Image transform sequence.
    ds_metainfo : DatasetMetaInfo
        ImageNet-1K dataset metainfo.
    batch_size : int
        Batch size.

    Returns
    -------
    Sequential
        Image transform sequence.
    """
    root = ds_metainfo.root_dir_path
    generator = data_generator.flow_from_directory(
        directory=root,
        target_size=ds_metainfo.input_image_size,
        class_mode="binary",
        batch_size=batch_size,
        shuffle=False,
        interpolation=ds_metainfo.interpolation_msg,
        mode="val")
    return generator 
Example #13
Source File: cub200_2011_cls_dataset.py    From imgclsmob with MIT License 5 votes vote down vote up
def cub200_train_generator(data_generator,
                           ds_metainfo,
                           batch_size):
    """
    Create image generator for training subset.

    Parameters:
    ----------
    data_generator : ImageDataGenerator
        Image transform sequence.
    ds_metainfo : DatasetMetaInfo
        ImageNet-1K dataset metainfo.
    batch_size : int
        Batch size.

    Returns
    -------
    Sequential
        Image transform sequence.
    """
    root = ds_metainfo.root_dir_path
    generator = data_generator.flow_from_directory(
        directory=root,
        target_size=ds_metainfo.input_image_size,
        class_mode="binary",
        batch_size=batch_size,
        shuffle=False,
        interpolation=ds_metainfo.interpolation_msg,
        mode="val")
    return generator 
Example #14
Source File: imagenet1k_cls_dataset.py    From imgclsmob with MIT License 5 votes vote down vote up
def imagenet_val_generator(data_generator,
                           ds_metainfo,
                           batch_size):
    """
    Create image generator for validation subset.

    Parameters:
    ----------
    data_generator : ImageDataGenerator
        Image transform sequence.
    ds_metainfo : DatasetMetaInfo
        ImageNet-1K dataset metainfo.
    batch_size : int
        Batch size.

    Returns
    -------
    Sequential
        Image transform sequence.
    """
    split = "val"
    root = ds_metainfo.root_dir_path
    root = os.path.join(root, split)
    generator = data_generator.flow_from_directory(
        directory=root,
        target_size=ds_metainfo.input_image_size,
        class_mode="binary",
        batch_size=batch_size,
        shuffle=False,
        interpolation=ds_metainfo.interpolation_msg)
    return generator 
Example #15
Source File: imagenet1k_cls_dataset.py    From imgclsmob with MIT License 5 votes vote down vote up
def imagenet_train_generator(data_generator,
                             ds_metainfo,
                             batch_size):
    """
    Create image generator for training subset.

    Parameters:
    ----------
    data_generator : ImageDataGenerator
        Image transform sequence.
    ds_metainfo : DatasetMetaInfo
        ImageNet-1K dataset metainfo.
    batch_size : int
        Batch size.

    Returns
    -------
    Sequential
        Image transform sequence.
    """
    split = "train"
    root = ds_metainfo.root_dir_path
    root = os.path.join(root, split)
    generator = data_generator.flow_from_directory(
        directory=root,
        target_size=ds_metainfo.input_image_size,
        class_mode="binary",
        batch_size=batch_size,
        shuffle=False,
        interpolation=ds_metainfo.interpolation_msg)
    return generator 
Example #16
Source File: imagenet1k_cls_dataset.py    From imgclsmob with MIT License 5 votes vote down vote up
def imagenet_train_transform(ds_metainfo,
                             data_format="channels_last"):
    """
    Create image transform sequence for training subset.

    Parameters:
    ----------
    ds_metainfo : DatasetMetaInfo
        ImageNet-1K dataset metainfo.
    data_format : str, default 'channels_last'
        The ordering of the dimensions in tensors.

    Returns
    -------
    ImageDataGenerator
        Image transform sequence.
    """
    data_generator = ImageDataGenerator(
        preprocessing_function=(lambda img: img_normalization(
            img=img,
            mean_rgb=ds_metainfo.mean_rgb,
            std_rgb=ds_metainfo.std_rgb)),
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
        data_format=data_format)
    return data_generator 
Example #17
Source File: cifar10_cls_dataset.py    From imgclsmob with MIT License 5 votes vote down vote up
def cifar10_val_generator(data_generator,
                          ds_metainfo,
                          batch_size):
    """
    Create image generator for validation subset.

    Parameters:
    ----------
    data_generator : ImageDataGenerator
        Image transform sequence.
    ds_metainfo : DatasetMetaInfo
        ImageNet-1K dataset metainfo.
    batch_size : int
        Batch size.

    Returns
    -------
    Sequential
        Image transform sequence.
    """
    assert(ds_metainfo is not None)
    _, (x_test, y_test) = cifar10.load_data()
    generator = data_generator.flow(
        x=x_test,
        y=y_test,
        batch_size=batch_size,
        shuffle=False)
    return generator 
Example #18
Source File: cifar10_cls_dataset.py    From imgclsmob with MIT License 5 votes vote down vote up
def cifar10_train_transform(ds_metainfo,
                            data_format="channels_last"):
    """
    Create image transform sequence for training subset.

    Parameters:
    ----------
    ds_metainfo : DatasetMetaInfo
        ImageNet-1K dataset metainfo.
    data_format : str, default 'channels_last'
        The ordering of the dimensions in tensors.

    Returns
    -------
    ImageDataGenerator
        Image transform sequence.
    """
    data_generator = ImageDataGenerator(
        preprocessing_function=(lambda img: img_normalization(
            img=img,
            mean_rgb=ds_metainfo.mean_rgb,
            std_rgb=ds_metainfo.std_rgb)),
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
        data_format=data_format)
    return data_generator 
Example #19
Source File: coco_hpe1_dataset.py    From imgclsmob with MIT License 5 votes vote down vote up
def cocohpe_test_generator(data_generator,
                           ds_metainfo,
                           batch_size):
    """
    Create image generator for testing subset.

    Parameters:
    ----------
    data_generator : ImageDataGenerator
        Image transform sequence.
    ds_metainfo : DatasetMetaInfo
        Pascal VOC2012 dataset metainfo.
    batch_size : int
        Batch size.

    Returns
    -------
    Sequential
        Image transform sequence.
    """
    split = "val"
    root = ds_metainfo.root_dir_path
    root = os.path.join(root, split)
    generator = data_generator.flow_from_directory(
        directory=root,
        target_size=ds_metainfo.input_image_size,
        class_mode="binary",
        batch_size=batch_size,
        shuffle=False,
        interpolation="bilinear",
        dataset=ds_metainfo.dataset_class(
            root=ds_metainfo.root_dir_path,
            mode="test",
            transform=ds_metainfo.test_transform2(
                ds_metainfo=ds_metainfo)))
    return generator 
Example #20
Source File: coco_hpe1_dataset.py    From imgclsmob with MIT License 5 votes vote down vote up
def cocohpe_val_generator(data_generator,
                          ds_metainfo,
                          batch_size):
    """
    Create image generator for validation subset.

    Parameters:
    ----------
    data_generator : ImageDataGenerator
        Image transform sequence.
    ds_metainfo : DatasetMetaInfo
        Pascal VOC2012 dataset metainfo.
    batch_size : int
        Batch size.

    Returns
    -------
    Sequential
        Image transform sequence.
    """
    split = "val"
    root = ds_metainfo.root_dir_path
    root = os.path.join(root, split)
    generator = data_generator.flow_from_directory(
        directory=root,
        target_size=ds_metainfo.input_image_size,
        class_mode="binary",
        batch_size=batch_size,
        shuffle=False,
        interpolation="bilinear",
        dataset=ds_metainfo.dataset_class(
            root=ds_metainfo.root_dir_path,
            mode="val",
            transform=ds_metainfo.val_transform2(
                ds_metainfo=ds_metainfo)))
    return generator 
Example #21
Source File: coco_hpe3_dataset.py    From imgclsmob with MIT License 5 votes vote down vote up
def cocohpe_val_generator(data_generator,
                          ds_metainfo,
                          batch_size):
    """
    Create image generator for validation subset.

    Parameters:
    ----------
    data_generator : ImageDataGenerator
        Image transform sequence.
    ds_metainfo : DatasetMetaInfo
        Pascal VOC2012 dataset metainfo.
    batch_size : int
        Batch size.

    Returns
    -------
    Sequential
        Image transform sequence.
    """
    split = "val"
    root = ds_metainfo.root_dir_path
    root = os.path.join(root, split)
    generator = data_generator.flow_from_directory(
        directory=root,
        target_size=ds_metainfo.input_image_size,
        class_mode="binary",
        batch_size=batch_size,
        shuffle=False,
        interpolation="bilinear",
        dataset=ds_metainfo.dataset_class(
            root=ds_metainfo.root_dir_path,
            mode="val",
            transform=ds_metainfo.val_transform2(
                ds_metainfo=ds_metainfo)))
    return generator 
Example #22
Source File: coco_hpe2_dataset.py    From imgclsmob with MIT License 5 votes vote down vote up
def cocohpe_test_generator(data_generator,
                           ds_metainfo,
                           batch_size):
    """
    Create image generator for testing subset.

    Parameters:
    ----------
    data_generator : ImageDataGenerator
        Image transform sequence.
    ds_metainfo : DatasetMetaInfo
        Pascal VOC2012 dataset metainfo.
    batch_size : int
        Batch size.

    Returns
    -------
    Sequential
        Image transform sequence.
    """
    split = "val"
    root = ds_metainfo.root_dir_path
    root = os.path.join(root, split)
    generator = data_generator.flow_from_directory(
        directory=root,
        target_size=ds_metainfo.input_image_size,
        class_mode="binary",
        batch_size=batch_size,
        shuffle=False,
        interpolation="bilinear",
        dataset=ds_metainfo.dataset_class(
            root=ds_metainfo.root_dir_path,
            mode="test",
            transform=ds_metainfo.test_transform2(
                ds_metainfo=ds_metainfo)))
    return generator 
Example #23
Source File: train.py    From TF.Keras-Commonly-used-models with Apache License 2.0 4 votes vote down vote up
def generate(batch, shape, ptrain, pval):
    """Data generation and augmentation
    # Arguments
        batch: Integer, batch size.
        size: Integer, image size.
        ptrain: train dir.
        pval: eval dir.
    # Returns
        train_generator: train set generator
        validation_generator: validation set generator
        count1: Integer, number of train set.
        count2: Integer, number of test set.
    """

    #  Using the data Augmentation in traning data
    datagen1 = ImageDataGenerator(
        rescale=1. / 255,
        shear_range=0.2,
        zoom_range=0.2,
        rotation_range=90,
        width_shift_range=0.2,
        height_shift_range=0.2,
        horizontal_flip=True)

    datagen2 = ImageDataGenerator(rescale=1. / 255)

    train_generator = datagen1.flow_from_directory(
        ptrain,
        target_size=shape,
        batch_size=batch,
        class_mode='categorical')

    validation_generator = datagen2.flow_from_directory(
        pval,
        target_size=shape,
        batch_size=batch,
        class_mode='categorical')

    count1 = 0
    for root, dirs, files in os.walk(ptrain):
        for each in files:
            count1 += 1

    count2 = 0
    for root, dirs, files in os.walk(pval):
        for each in files:
            count2 += 1

    return train_generator, validation_generator, count1, count2