Python skimage.data() Examples

The following are 30 code examples of skimage.data(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module skimage , or try the search function .
Example #1
Source File: data.py    From learn_prox_ops with GNU General Public License v3.0 6 votes vote down vote up
def load_image(path):
    """Open, load and normalize an image.

    :param path: Image path.
    :type path: String

    :returns: Normalized image.
    :rtype: np.ndarray
    """
    img = skimage.data.imread(path)

    if img.dtype == np.uint8:
        normalizer = 255.
    else:
        normalizer = 65535.

    img = img / normalizer
    return img.astype(np.float32) 
Example #2
Source File: train.py    From Hopfield-Network with MIT License 6 votes vote down vote up
def plot(data, test, predicted, figsize=(5, 6)):
    data = [reshape(d) for d in data]
    test = [reshape(d) for d in test]
    predicted = [reshape(d) for d in predicted]

    fig, axarr = plt.subplots(len(data), 3, figsize=figsize)
    for i in range(len(data)):
        if i==0:
            axarr[i, 0].set_title('Train data')
            axarr[i, 1].set_title("Input data")
            axarr[i, 2].set_title('Output data')

        axarr[i, 0].imshow(data[i])
        axarr[i, 0].axis('off')
        axarr[i, 1].imshow(test[i])
        axarr[i, 1].axis('off')
        axarr[i, 2].imshow(predicted[i])
        axarr[i, 2].axis('off')

    plt.tight_layout()
    plt.savefig("result.png")
    plt.show() 
Example #3
Source File: words.py    From 12306-captcha with Apache License 2.0 6 votes vote down vote up
def classify_image(path_dir=cfg.ROOT + '/data/download/words',
                   dest_path=cfg.ROOT + '/data/download/words-classify'):
    """
        :param path_dir:
        :param dest_path:
        :return:
    """
    temp = filter(lambda s: not s.startswith("."), os.listdir(path_dir))
    for words_name in temp:
        print words_name
        if not os.path.exists(dest_path):
            os.makedirs(dest_path)
        sub_root_path = os.path.join(path_dir, words_name)
        if os.path.isdir(sub_root_path):
            classify_image(sub_root_path, os.path.join(dest_path, words_name))
            continue
        img = cv2.imread(sub_root_path, 0)
        new_img = judge_the_image_size(img)
        cv2.imwrite(os.path.join(dest_path, words_name), new_img)
    return 0 
Example #4
Source File: data.py    From learn_prox_ops with GNU General Public License v3.0 6 votes vote down vote up
def init_file_lists(self):
        """
        Load and split the BSDS500 dataset.

        :returns: Two lists of train and test file paths.
        :rtype: Tuple
        """
        if self.noise_type == 'gaussian_random_sigma':
            self.patch_size = 50
            self.train_set_multiplier = 960  # to have 128 * 3000 patches in one epoche

        data_path = os.path.join(ROOT_DIR, 'data/bsds_500')
        if self.grayscale:
            train_files = glob(os.path.join(data_path, "greyscale_images/train/*.png"))
            test_files = glob(os.path.join(data_path, "greyscale_images/test/*.png"))
            self.img_decoder = tf.image.decode_png
        else:
            train_files = glob(os.path.join(data_path, 'color_images/train/*.jpg')) + \
                          glob(os.path.join(data_path, 'color_images/test/*.jpg'))
            train_files = train_files[:400]
            test_files = glob(os.path.join(data_path, 'data/color_images/val/*.jpg'))[:68]

        return train_files, test_files 
Example #5
Source File: custom.py    From Practical-Convolutional-Neural-Networks with MIT License 6 votes vote down vote up
def load_data(data_dir):
    """Loads a data set and returns two lists:
    
    images: a list of Numpy arrays, each representing an image.
    labels: a list of numbers that represent the images labels.
    """
    # Get all the subdirectories of the data folder (i.e. traing or test). Each folder represents an unique label.
    directories = [d for d in os.listdir(data_dir) 
                   if os.path.isdir(os.path.join(data_dir, d))]
    
    # Iterate for loop through the label directories and collect the data in two lists, labels and images.
    labels = []
    images = []
    for d in directories:
        label_dir = os.path.join(data_dir, d)
        file_names = [os.path.join(label_dir, f) for f in os.listdir(label_dir) if f.endswith(".ppm")]

        # For each label, load it's images and add them to the images list.
        # And add the label number (i.e. directory name) to the labels list.
        for f in file_names:
            images.append(skimage.data.imread(f))
            labels.append(int(d))
    return images, labels

# Load training and testing datasets. 
Example #6
Source File: TrainEVSegNet.py    From EVDodgeNet with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def ReadDirNames(DirNamesPath, LabelNamesPath, TrainPath):
    """
    Inputs: 
    Path is the path of the file you want to read
    Outputs:
    DirNames is the data loaded from ./TxtFiles/DirNames.txt which has full path to all image files without extension
    """
    # Read DirNames and LabelNames files
    DirNames = open(DirNamesPath, 'r')
    DirNames = DirNames.read()
    DirNames = DirNames.split()

    LabelNames = open(LabelNamesPath, 'r')
    LabelNames = LabelNames.read()
    LabelNames = LabelNames.split()
    
    # Read Train, Val and Test Idxs
    TrainIdxs = open(TrainPath, 'r')
    TrainIdxs = TrainIdxs.read()
    TrainIdxs = TrainIdxs.split()
    TrainIdxs = [int(val) for val in TrainIdxs]
    TrainNames = [DirNames[i] for i in TrainIdxs]
    TrainLabels = [LabelNames[i] for i in TrainIdxs]

    return DirNames, TrainNames, TrainLabels 
Example #7
Source File: RunEVSegNet.py    From EVDodgeNet with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def ReadDirNames(DirNamesPath, TrainPath):
    """
    Inputs: 
    Path is the path of the file you want to read
    Outputs:
    DirNames is the data loaded from ./TxtFiles/DirNames.txt which has full path to all image files without extension
    """
    # Read DirNames file
    DirNames = open(DirNamesPath, 'r')
    DirNames = DirNames.read()
    DirNames = DirNames.split()

    # Read TestIdxs file
    TrainIdxs = open(TrainPath, 'r')
    TrainIdxs = TrainIdxs.read()
    TrainIdxs = TrainIdxs.split()
    TrainIdxs = [int(val) for val in TrainIdxs]
    TrainNames = [DirNames[i] for i in TrainIdxs]

    return DirNames, TrainNames 
Example #8
Source File: RunEVDeblurNet.py    From EVDodgeNet with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def ReadDirNames(DirNamesPath, TrainPath):
    """
    Inputs: 
    Path is the path of the file you want to read
    Outputs:
    DirNames is the data loaded from ./TxtFiles/DirNames.txt which has full path to all image files without extension
    """
    # Read DirNames file
    DirNames = open(DirNamesPath, 'r')
    DirNames = DirNames.read()
    DirNames = DirNames.split()

    # Read TestIdxs file
    TrainIdxs = open(TrainPath, 'r')
    TrainIdxs = TrainIdxs.read()
    TrainIdxs = TrainIdxs.split()
    TrainIdxs = [int(val) for val in TrainIdxs]
    TrainNames = [DirNames[i] for i in TrainIdxs]

    return DirNames, TrainNames 
Example #9
Source File: RunEVHomographyNet.py    From EVDodgeNet with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def ReadDirNames(DirNamesPath, TrainPath):
    """
    Inputs: 
    Path is the path of the file you want to read
    Outputs:
    DirNames is the data loaded from ./TxtFiles/DirNames.txt which has full path to all image files without extension
    """
    # Read DirNames file
    DirNames = open(DirNamesPath, 'r')
    DirNames = DirNames.read()
    DirNames = DirNames.split()

    # Read TestIdxs file
    TrainIdxs = open(TrainPath, 'r')
    TrainIdxs = TrainIdxs.read()
    TrainIdxs = TrainIdxs.split()
    TrainIdxs = [int(val) for val in TrainIdxs]
    TrainNames = [DirNames[i] for i in TrainIdxs]

    return DirNames, TrainNames 
Example #10
Source File: TrainEVDeblurNet.py    From EVDodgeNet with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def PrettyPrint(NumEpochs, DivTrain, MiniBatchSize, NumTrainSamples, NumTestSamples, LatestFile):
    """
    Prints all stats with all arguments
    """
    print('Number of Epochs Training will run for ' + str(NumEpochs))
    print('Factor of reduction in training data is ' + str(DivTrain))
    print('Mini Batch Size ' + str(MiniBatchSize))
    print('Number of Training Images ' + str(NumTrainSamples))
    print('Number of Testing Images ' + str(NumTestSamples))
    if LatestFile is not None:
        print('Loading latest checkpoint with the name ' + LatestFile) 
Example #11
Source File: test_bm3d.py    From pybm3d with GNU General Public License v3.0 5 votes vote down vote up
def noise_data():
    """Provide grayscale data for denoising."""
    noise_std_dev = 40.0
    img = skimage.data.camera()

    noise = np.random.normal(0, noise_std_dev, img.shape).astype(img.dtype)
    noisy_img = np.clip(img + noise, 0, 255)
    return img, noisy_img, noise_std_dev 
Example #12
Source File: test_bm3d.py    From pybm3d with GNU General Public License v3.0 5 votes vote down vote up
def color_noise_data():
    """Provide color data for denoising."""
    noise_std_dev = 40.0
    img = skimage.data.astronaut()

    noise = np.random.normal(0, noise_std_dev, img.shape).astype(img.dtype)
    noisy_img = np.clip(img + noise, 0, 255)
    return img, noisy_img, noise_std_dev 
Example #13
Source File: TrainEVHomographyNet.py    From EVDodgeNet with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def ReadDirNames(DirNamesPath, TrainPath, ValPath, TestPath):
    """
    Inputs: 
    Path is the path of the file you want to read
    Outputs:
    DirNames is the data loaded from ./TxtFiles/DirNames.txt which has full path to all image files without extension
    """
    # Read DirNames and LabelNames files
    DirNames = open(DirNamesPath, 'r')
    DirNames = DirNames.read()
    DirNames = DirNames.split()

    # LabelNames = open(LabelNamesPath, 'r')
    # LabelNames = LabelNames.read()
    # LabelNames = LabelNames.split()
    
    # Read Train, Val and Test Idxs
    TrainIdxs = open(TrainPath, 'r')
    TrainIdxs = TrainIdxs.read()
    TrainIdxs = TrainIdxs.split()
    TrainIdxs = [int(val) for val in TrainIdxs]
    TrainNames = [DirNames[i] for i in TrainIdxs]
    # TrainLabels = [LabelNames[i] for i in TrainIdxs]

    ValIdxs = open(ValPath, 'r')
    ValIdxs = ValIdxs.read()
    ValIdxs = ValIdxs.split()
    ValIdxs = [int(val) for val in ValIdxs]
    ValNames = [DirNames[i] for i in ValIdxs]
    # ValLabels = [LabelNames[i] for i in ValIdxs]

    TestIdxs = open(TestPath, 'r')
    TestIdxs = TestIdxs.read()
    TestIdxs = TestIdxs.split()
    TestIdxs = [int(val) for val in TestIdxs]
    TestNames = [DirNames[i] for i in TestIdxs]
    # TestLabels = [LabelNames[i] for i in TestIdxs]

    return DirNames, TrainNames, ValNames, TestNames 
Example #14
Source File: TrainEVHomographyNet.py    From EVDodgeNet with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def PrettyPrint(NumEpochs, DivTrain, MiniBatchSize, NumTrainSamples, NumTestSamples, LatestFile):
    """
    Prints all stats with all arguments
    """
    print('Number of Epochs Training will run for ' + str(NumEpochs))
    print('Factor of reduction in training data is ' + str(DivTrain))
    print('Mini Batch Size ' + str(MiniBatchSize))
    print('Number of Training Images ' + str(NumTrainSamples))
    print('Number of Testing Images ' + str(NumTestSamples))
    if LatestFile is not None:
        print('Loading latest checkpoint with the name ' + LatestFile) 
Example #15
Source File: RunEVHomographyNet.py    From EVDodgeNet with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def SetupAll(ReadPath):
    """
    Inputs: 
    BasePath is the base path where Images are saved without "/" at the end
    Outputs:
    DirNames - Full path to all image files without extension
    Train/Val/Test - Idxs of all the images to be used for training/validation (held-out testing in this case)/testing
    Ratios - Ratios is a list of fraction of data used for [Train, Val, Test]
    CheckPointPath - Path to save checkpoints/model
    OptimizerParams - List of all OptimizerParams: depends on Optimizer
    SaveCheckPoint - Save checkpoint every SaveCheckPoint iteration in every epoch, checkpoint saved automatically after every epoch
    ImageSize - Size of the image
    NumTrain/Val/TestSamples - length(Train/Val/Test)
    NumTestRunsPerEpoch - Number of passes of Val data with MiniBatchSize 
    Train/Val/TestLabels - Labels corresponding to Train/Val/Test
    """
    # Setup DirNames
    DirNamesPath = ReadPath + os.sep + 'DirNames.txt'
    TrainPath = ReadPath + os.sep + 'Train.txt'
    DirNames, TrainNames = ReadDirNames(DirNamesPath, TrainPath)
    
    # Image Input Shape
    PatchSize = np.array([128, 128, 3])
    ImageSize = np.array([260, 346, 3])
    NumTrainSamples = len(TrainNames)
    
    return TrainNames, ImageSize, PatchSize, NumTrainSamples 
Example #16
Source File: TrainEVDeblurNet.py    From EVDodgeNet with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def ReadDirNames(DirNamesPath, TrainPath, ValPath, TestPath):
    """
    Inputs: 
    Path is the path of the file you want to read
    Outputs:
    DirNames is the data loaded from ./TxtFiles/DirNames.txt which has full path to all image files without extension
    """
    # Read DirNames and LabelNames files
    DirNames = open(DirNamesPath, 'r')
    DirNames = DirNames.read()
    DirNames = DirNames.split()
    
    # Read Train, Val and Test Idxs
    TrainIdxs = open(TrainPath, 'r')
    TrainIdxs = TrainIdxs.read()
    TrainIdxs = TrainIdxs.split()
    TrainIdxs = [int(val) for val in TrainIdxs]
    TrainNames = [DirNames[i] for i in TrainIdxs]

    ValIdxs = open(ValPath, 'r')
    ValIdxs = ValIdxs.read()
    ValIdxs = ValIdxs.split()
    ValIdxs = [int(val) for val in ValIdxs]
    ValNames = [DirNames[i] for i in ValIdxs]

    TestIdxs = open(TestPath, 'r')
    TestIdxs = TestIdxs.read()
    TestIdxs = TestIdxs.split()
    TestIdxs = [int(val) for val in TestIdxs]
    TestNames = [DirNames[i] for i in TestIdxs]

    return DirNames, TrainNames, ValNames, TestNames 
Example #17
Source File: train.py    From Hopfield-Network with MIT License 5 votes vote down vote up
def main():
    # Load data
    camera = skimage.data.camera()
    astronaut = rgb2gray(skimage.data.astronaut())
    horse = skimage.data.horse()
    coffee = rgb2gray(skimage.data.coffee())

    # Marge data
    data = [camera, astronaut, horse, coffee]

    # Preprocessing
    print("Start to data preprocessing...")
    data = [preprocessing(d) for d in data]

    # Create Hopfield Network Model
    model = network.HopfieldNetwork()
    model.train_weights(data)

    # Generate testset
    test = [get_corrupted_input(d, 0.3) for d in data]

    predicted = model.predict(test, threshold=0, asyn=False)
    print("Show prediction results...")
    plot(data, test, predicted)
    print("Show network weights matrix...")
    #model.plot_weights() 
Example #18
Source File: TrainEVSegNet.py    From EVDodgeNet with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def PrettyPrint(NumEpochs, DivTrain, MiniBatchSize, NumTrainSamples, LatestFile):
    """
    Prints all stats with all arguments
    """
    print('Number of Epochs Training will run for ' + str(NumEpochs))
    print('Factor of reduction in training data is ' + str(DivTrain))
    print('Mini Batch Size ' + str(MiniBatchSize))
    print('Number of Training Images ' + str(NumTrainSamples))
    if LatestFile is not None:
        print('Loading latest checkpoint with the name ' + LatestFile) 
Example #19
Source File: words.py    From 12306-captcha with Apache License 2.0 5 votes vote down vote up
def cut(image_cnt, path_dir= cfg.ROOT + '/data/download'):
    """
    :param image_cnt: words对应编号
    :param path_dir:
    :return:
    """
    del_text_path = os.path.join(path_dir, 'words_cut_result')
    make_dir(del_text_path)

    words_path = os.path.join(path_dir, 'words/words_' + str(image_cnt))
    if not os.path.exists(words_path):
        return
    for words_name in os.listdir(words_path):
        try:
            print words_name
            img = cv2.imread(os.path.join(words_path, words_name), 0)
            img = get_real_image(img, 160)
            if 2 == analyse_version(img):
                "说明有背景"
                img_rect_st, img_rect_en = cut_version2(img, 182)
                write_image(img, del_text_path, image_cnt, words_name, img_rect_st, img_rect_en, False)
            else:
                # img = rm_noise(img)
                n_img = pretreatment_image(img, 3)
                n_img = binary_text(n_img)
                n_img = get_binary_real_image(n_img)
                save_img(img, n_img, del_text_path, image_cnt, words_name, y_value=10, x_value=28)
                # mesr_text(img, del_text_path, image_cnt, words_name, y_value=10, x_value=28)
        except Exception as e:
            pass 
Example #20
Source File: segmentation_labelling.py    From kaggle-heart with MIT License 5 votes vote down vote up
def align_images(data):

    numslices=len(data)
    imageshifts = np.zeros((numslices,2))

    # calculate image shifts
    for idx in range(numslices):
        if idx == 0:
            pass
        else:
            image = np.mean(data[idx-1]['data'],0)
            offset_image = np.mean(data[idx]['data'],0)

            ## shifts in pixel precision for speed
            shift, error, diffphase = register_translation(image, offset_image)
            imageshifts[idx,:] = imageshifts[idx-1,:] + shift

    # apply image shifts
    for idx in range(numslices):
        non = lambda s: s if s<0 else None
        mom = lambda s: max(0,s)
        padded = np.zeros_like(data[idx]['data'])
        oy, ox = imageshifts[idx,:]
        padded[:,mom(oy):non(oy), mom(ox):non(ox)] = data[idx]['data'][:,mom(-oy):non(-oy), mom(-ox):non(-ox)]
        data[idx]['data']=padded.copy()
        #tform=SimilarityTransform(translation = imageshifts[idx,:])
        #for idx2 in range(data[idx]['data'].shape[0]):
        #    tformed = warp(data[idx]['data'][idx2,:,:], inverse_map = tform)
        #    data[idx]['data'][idx2,:,:]= tformed

    return data 
Example #21
Source File: segmentation_labelling.py    From kaggle-heart with MIT License 5 votes vote down vote up
def sort_images(data):
    numslices=len(data)
    positions=np.zeros((numslices,))
    for idx in range(numslices):
        positions[idx] = data[idx]['metadata']['SliceLocation']
    newdata=[x for y, x in sorted(zip(positions.tolist(), data), key=lambda dd: dd[0], reverse=True)]
    return newdata 
Example #22
Source File: segmentation_labelling.py    From kaggle-heart with MIT License 5 votes vote down vote up
def filter_sequence(seq_block, order = 5, relcutoff = 0.1):

    def butter_lowpass(cutoff, fs, order=5):
        nyq = 0.5 * fs
        normal_cutoff = cutoff / nyq
        b, a = butter(order, normal_cutoff, btype='low', analog=False)
        return b, a

    def butter_lowpass_filter(data, cutoff, fs, order=5):
        b, a = butter_lowpass(cutoff, fs, order=order)
        y = lfilter(b, a, data, axis = 0)
        return y

    zdim = seq_block.shape[0]
    xdim = seq_block.shape[1]
    ydim = seq_block.shape[2]

    ffdata = np.zeros((3*zdim, xdim, ydim))
    ffdata[:zdim] = seq_block
    ffdata[zdim:2*zdim] = seq_block
    ffdata[2*zdim:3*zdim] = seq_block

    ffdata = butter_lowpass_filter(ffdata, relcutoff, 1.0, order)

    ffdata = ffdata[zdim:2*zdim,:,:]

    return ffdata 
Example #23
Source File: train.py    From Hopfield-Network with MIT License 5 votes vote down vote up
def reshape(data):
    dim = int(np.sqrt(len(data)))
    data = np.reshape(data, (dim, dim))
    return data 
Example #24
Source File: data.py    From learn_prox_ops with GNU General Public License v3.0 5 votes vote down vote up
def load_deblurring_grey_data(experiment_name=None, image_name=None):
    """
    Load the data for the grayscale deblurring experiments on 11 standard test
    images first conducted in [A machine learning approach for non-blind image deconvolution](http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Schuler_A_Machine_Learning_2013_CVPR_paper.pdf).

    :param experiment_name: Name of the experiment a-e: experiment_*
    :type experiment_name: String
    :param image_name: Name of the image
    :type image_name: String

    :returns: Experiment data as Dict or single Tuple
    :rtype: Tuple
    """
    crop = 12
    data_dir = os.path.join(ROOT_DIR, 'data/deblurring_grey')
    experiments_data = {os.path.basename(experiment_dir):
                        {os.path.basename(image_dir):
                         {'f': load_image(os.path.join(image_dir, 'blurred_observation.png')),
                          'img': load_image(os.path.join(image_dir, 'original.png'))}
                         for image_dir in glob(experiment_dir + '/*')
                         if os.path.isdir(image_dir)}
                        for experiment_dir in glob(data_dir + '/*')}

    for experiment, experiment_images in experiments_data.items():
        kernel_img = load_image(os.path.join(data_dir, experiment + '/kernel.png'))
        kernel_img /= kernel_img.sum()
        experiment_images['kernel_img'] = kernel_img

    if experiment_name is not None:
        experiment_data = experiments_data[experiment_name]
        if image_name is not None:
            image_data = experiment_data[image_name]
            return (image_data['f'], image_data['img'], experiment_data['kernel_img'], crop)
        return experiment_data, crop
    else:
        if image_name is not None:
            print("Specifying only an image is not possible.")
        return experiments_data, crop 
Example #25
Source File: data.py    From learn_prox_ops with GNU General Public License v3.0 5 votes vote down vote up
def load_demosaicking_data(image_name=None, dataset="mc_master"):
    """
    Load McMaster or Kodak demosaicking data.

    :param image_name: Name of a particular image
    :type image_name: String

    :return test_images: Experiment data as Dict or single Tuple
    :rtype test_images: Tuple
    """
    crop = 5
    image_paths = glob(os.path.join(ROOT_DIR,
                                    "data/demosaicking",
                                    dataset.lower(),
                                    "*"))

    def sort_key(d): return os.path.splitext(os.path.basename(d))[0]
    data = {os.path.splitext(os.path.basename(d))[0]:
            {'img': load_image(d)}
             for d in sorted(image_paths, key=sort_key)}

    if image_name is not None:
        return (data[str(image_name).lower()]['img'],
                crop)
    else:
        return data, crop 
Example #26
Source File: data.py    From learn_prox_ops with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, opt, test_epochs):
        """
        Class constructor.

        :param opt: Option flags.
        :type opt: tf.app.flags.FLAGS
        :param test_epochs: Number of test_epochs. Usually None for 1 entire epoch.
        :type test_epochs: Int
        """
        self.input_shape = (self.patch_size, self.patch_size, opt.channels)
        self.train_shape = (self.patch_size, self.patch_size, opt.channels)
        self.test_shape = (self.patch_size, self.patch_size, opt.channels)
        self.sigma_noise = opt.sigma_noise
        self.noise_type = opt.noise_type
        self.batch_size = opt.batch_size
        self.img_decoder = tf.image.decode_jpeg
        self.is_train = tf.placeholder(tf.bool, name='is_train')

        train_files, test_files = self.init_file_lists()
        train_pipe = self.tf_data_pipeline(train_files * self.train_set_multiplier,
                                           self.train_shape,
                                           'train_pipeline',
                                           opt.train_epochs)
        test_pipe = self.tf_data_pipeline(test_files * self.test_set_multiplier,
                                          self.test_shape,
                                          'test_pipeline',
                                          test_epochs,
                                          train=False)

        Pipeline = namedtuple('Pipeline',
                              ['data', 'labels', 'num', 'epochs', 'batch_size'])
        self.train = Pipeline(*train_pipe,
                              epochs=opt.train_epochs,
                              batch_size=opt.batch_size)
        self.test = Pipeline(*test_pipe,
                             epochs=test_epochs,
                             batch_size=opt.batch_size) 
Example #27
Source File: train_validation_split.py    From piecewisecrf with MIT License 5 votes vote down vote up
def _label_statistics(image_paths):
    '''

    Calculates label statistics (number of picked pixels for each class)

    Parameters
    ----------
    image_paths : list
        List of absolute paths for picked images

    Returns
    -------
    array: numpy array
        Number of selected pixels per class


    '''
    ds = KittiDataset()

    def _rgb_2_label(rgb):
        return ds.color2label[tuple(rgb)].trainId

    total_counts = np.zeros(ds.num_classes())
    for img in image_paths:
        rgb = skimage.data.load(img)
        labels = np.apply_along_axis(_rgb_2_label, 2, rgb)
        indices, counts = np.unique(labels, return_counts=True)
        if indices[-1] >= ds.num_classes():
            indices = indices[0:-1]
            counts = counts[0:-1]
        total_counts[indices] += counts
    return total_counts 
Example #28
Source File: image_preprocessing.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def img_to_array(img, data_format=None):
    """Converts a PIL Image instance to a Numpy array.

    # Arguments
        img: PIL Image instance.
        data_format: Image data format.

    # Returns
        A 3D Numpy array.

    # Raises
        ValueError: if invalid `img` or `data_format` is passed.
    """
    if data_format is None:
        data_format = K.image_data_format()
    if data_format not in {'channels_first', 'channels_last'}:
        raise ValueError('Unknown data_format: ', data_format)
    # Numpy array x has format (height, width, channel)
    # or (channel, height, width)
    # but original PIL image has format (width, height, channel)
    x = np.asarray(img, dtype=K.floatx())
    if len(x.shape) == 3:
        if data_format == 'channels_first':
            x = x.transpose(2, 0, 1)
    elif len(x.shape) == 2:
        if data_format == 'channels_first':
            x = x.reshape((1, x.shape[0], x.shape[1]))
        else:
            x = x.reshape((x.shape[0], x.shape[1], 1))
    else:
        raise ValueError('Unsupported image shape: ', x.shape)
    return x 
Example #29
Source File: image_preprocessing.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def __init__(self, x, y, image_data_generator,
                 batch_size=32, shuffle=False, seed=None,
                 data_format=None,
                 save_to_dir=None, save_prefix='', save_format='png'):
        if y is not None and len(x) != len(y):
            raise ValueError('X (images tensor) and y (labels) '
                             'should have the same length. '
                             'Found: X.shape = %s, y.shape = %s' %
                             (np.asarray(x).shape, np.asarray(y).shape))

        if data_format is None:
            data_format = K.image_data_format()
        self.x = np.asarray(x, dtype=K.floatx())

        if self.x.ndim != 4:
            raise ValueError('Input data in `NumpyArrayIterator` '
                             'should have rank 4. You passed an array '
                             'with shape', self.x.shape)
        channels_axis = 3 if data_format == 'channels_last' else 1
        if self.x.shape[channels_axis] not in {1, 3, 4}:
            warnings.warn('NumpyArrayIterator is set to use the '
                          'data format convention "' + data_format + '" '
                          '(channels on axis ' + str(channels_axis) + '), i.e. expected '
                          'either 1, 3 or 4 channels on axis ' + str(channels_axis) + '. '
                          'However, it was passed an array with shape ' + str(self.x.shape) +
                          ' (' + str(self.x.shape[channels_axis]) + ' channels).')
        if y is not None:
            self.y = np.asarray(y)
        else:
            self.y = None
        self.image_data_generator = image_data_generator
        self.data_format = data_format
        self.save_to_dir = save_to_dir
        self.save_prefix = save_prefix
        self.save_format = save_format
        super(NumpyArrayIterator, self).__init__(x.shape[0], batch_size, shuffle, seed) 
Example #30
Source File: image_preprocessing.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def _get_batches_of_transformed_samples(self, index_array):
        batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=K.floatx())
        grayscale = self.color_mode == 'grayscale'
        # build batch of image data
        for i, j in enumerate(index_array):
            fname = self.filenames[j]
            img = load_img(os.path.join(self.directory, fname),
                           grayscale=grayscale,
                           target_size=self.target_size)
            x = img_to_array(img, data_format=self.data_format)
            x = self.image_data_generator.random_transform(x)
            x = self.image_data_generator.standardize(x)
            batch_x[i] = x
        # optionally save augmented images to disk for debugging purposes
        if self.save_to_dir:
            for i, j in enumerate(index_array):
                img = array_to_img(batch_x[i], self.data_format, scale=True)
                fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
                                                                  index=j,
                                                                  hash=np.random.randint(1e7),
                                                                  format=self.save_format)
                img.save(os.path.join(self.save_to_dir, fname))
        # build batch of labels
        if self.class_mode == 'input':
            batch_y = batch_x.copy()
        elif self.class_mode == 'sparse':
            batch_y = self.classes[index_array]
        elif self.class_mode == 'binary':
            batch_y = self.classes[index_array].astype(K.floatx())
        elif self.class_mode == 'categorical':
            batch_y = np.zeros((len(batch_x), self.num_classes), dtype=K.floatx())
            for i, label in enumerate(self.classes[index_array]):
                batch_y[i, label] = 1.
        else:
            return batch_x
        return batch_x, batch_y