Python keras.datasets.mnist.load_data() Examples

The following are 30 code examples of keras.datasets.mnist.load_data(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.datasets.mnist , or try the search function .
Example #1
Source File: datasets.py    From super-simple-distributed-keras with MIT License 7 votes vote down vote up
def get_mnist():
    """Retrieve the MNIST dataset and process the data."""
    # Set defaults.
    nb_classes = 10
    batch_size = 128
    input_shape = (784,)

    # Get the data.
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(60000, 784)
    x_test = x_test.reshape(10000, 784)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train, nb_classes)
    y_test = to_categorical(y_test, nb_classes)

    return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test) 
Example #2
Source File: DataSampler.py    From MassImageRetrieval with Apache License 2.0 6 votes vote down vote up
def mnist_dataset_reader():
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = X_train.reshape(60000, 784)
    X_test = X_test.reshape(10000, 784)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255  # 归一化
    X_test /= 255

    digit_indices = [np.where(y_train == i)[0] for i in range(10)]
    tr_pairs, tr_y = create_pairs(X_train, digit_indices)

    digit_indices = [np.where(y_test == i)[0] for i in range(10)]
    te_pairs, te_y = create_pairs(X_test, digit_indices)

    input_dim = 784

    return input_dim, tr_pairs, tr_y, te_pairs, te_y 
Example #3
Source File: data_loader.py    From Keras-GAN with MIT License 6 votes vote down vote up
def setup_mnist(self, img_res):

        print ("Setting up MNIST...")

        if not os.path.exists('datasets/mnist_x.npy'):
            # Load the dataset
            (mnist_X, mnist_y), (_, _) = mnist.load_data()

            # Normalize and rescale images
            mnist_X = self.normalize(mnist_X)
            mnist_X = np.array([imresize(x, img_res) for x in mnist_X])
            mnist_X = np.expand_dims(mnist_X, axis=-1)
            mnist_X = np.repeat(mnist_X, 3, axis=-1)

            self.mnist_X, self.mnist_y = mnist_X, mnist_y

            # Save formatted images
            np.save('datasets/mnist_x.npy', self.mnist_X)
            np.save('datasets/mnist_y.npy', self.mnist_y)
        else:
            self.mnist_X = np.load('datasets/mnist_x.npy')
            self.mnist_y = np.load('datasets/mnist_y.npy')

        print ("+ Done.") 
Example #4
Source File: reversing_gan.py    From gandlf with MIT License 6 votes vote down vote up
def get_mnist_data(binarize=False):
    """Puts the MNIST data in the right format."""

    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    if binarize:
        X_test = np.where(X_test >= 10, 1, -1)
        X_train = np.where(X_train >= 10, 1, -1)
    else:
        X_train = (X_train.astype(np.float32) - 127.5) / 127.5
        X_test = (X_test.astype(np.float32) - 127.5) / 127.5

    X_train = np.expand_dims(X_train, axis=-1)
    X_test = np.expand_dims(X_test, axis=-1)

    y_train = np.eye(10)[y_train]
    y_test = np.eye(10)[y_test]

    return (X_train, y_train), (X_test, y_test) 
Example #5
Source File: mnist_gan.py    From gandlf with MIT License 6 votes vote down vote up
def get_mnist_data(binarize=False):
    """Puts the MNIST data in the right format."""

    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    if binarize:
        X_test = np.where(X_test >= 10, 1, -1)
        X_train = np.where(X_train >= 10, 1, -1)
    else:
        X_train = (X_train.astype(np.float32) - 127.5) / 127.5
        X_test = (X_test.astype(np.float32) - 127.5) / 127.5

    X_train = np.expand_dims(X_train, axis=-1)
    X_test = np.expand_dims(X_test, axis=-1)

    y_train = np.expand_dims(y_train, axis=-1)
    y_test = np.expand_dims(y_test, axis=-1)

    return (X_train, y_train), (X_test, y_test) 
Example #6
Source File: utils.py    From RelativisticGAN-Tensorflow with MIT License 6 votes vote down vote up
def load_cifar10(size=64) :
    (train_data, train_labels), (test_data, test_labels) = cifar10.load_data()
    train_data = normalize(train_data)
    test_data = normalize(test_data)

    x = np.concatenate((train_data, test_data), axis=0)
    # y = np.concatenate((train_labels, test_labels), axis=0).astype(np.int)

    seed = 777
    np.random.seed(seed)
    np.random.shuffle(x)
    # np.random.seed(seed)
    # np.random.shuffle(y)

    x = np.asarray([scipy.misc.imresize(x_img, [size, size]) for x_img in x])

    return x 
Example #7
Source File: main.py    From DiscriminativeActiveLearning with MIT License 6 votes vote down vote up
def load_mnist():
    """
    load and pre-process the MNIST data
    """

    from keras.datasets import mnist
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    if K.image_data_format() == 'channels_last':
        x_train = x_train.reshape((x_train.shape[0], 28, 28, 1))
        x_test = x_test.reshape((x_test.shape[0], 28, 28, 1))
    else:
        x_train = x_train.reshape((x_train.shape[0], 1, 28, 28))
        x_test = x_test.reshape((x_test.shape[0], 1, 28, 28))

    # standardise the dataset:
    x_train = np.array(x_train).astype('float32') / 255
    x_test = np.array(x_test).astype('float32') / 255

    # shuffle the data:
    perm = np.random.permutation(x_train.shape[0])
    x_train = x_train[perm]
    y_train = y_train[perm]

    return (x_train, y_train), (x_test, y_test) 
Example #8
Source File: utils.py    From RelativisticGAN-Tensorflow with MIT License 6 votes vote down vote up
def load_mnist(size=64):
    (train_data, train_labels), (test_data, test_labels) = mnist.load_data()
    train_data = normalize(train_data)
    test_data = normalize(test_data)

    x = np.concatenate((train_data, test_data), axis=0)
    # y = np.concatenate((train_labels, test_labels), axis=0).astype(np.int)

    seed = 777
    np.random.seed(seed)
    np.random.shuffle(x)
    # np.random.seed(seed)
    # np.random.shuffle(y)
    # x = np.expand_dims(x, axis=-1)

    x = np.asarray([scipy.misc.imresize(x_img, [size, size]) for x_img in x])
    x = np.expand_dims(x, axis=-1)
    return x 
Example #9
Source File: HandWritingRecognition.py    From Jtyoui with MIT License 6 votes vote down vote up
def nn_model():
    (x_train, y_train), _ = mnist.load_data()
    # 归一化
    x_train = x_train.reshape(x_train.shape[0], -1) / 255.
    # one-hot
    y_train = np_utils.to_categorical(y=y_train, num_classes=10)
    # constant(value=1.)自定义常数,constant(value=1.)===one()
    # 创建模型:输入784个神经元,输出10个神经元
    model = Sequential([
        Dense(units=200, input_dim=784, bias_initializer=constant(value=1.), activation=tanh),
        Dense(units=100, bias_initializer=one(), activation=tanh),
        Dense(units=10, bias_initializer=one(), activation=softmax),
    ])

    opt = SGD(lr=0.2, clipnorm=1.)  # 优化器
    model.compile(optimizer=opt, loss=categorical_crossentropy, metrics=['acc', 'mae'])  # 编译
    model.fit(x_train, y_train, batch_size=64, epochs=20, callbacks=[RemoteMonitor()])
    model_save(model, './model.h5') 
Example #10
Source File: utils.py    From Self-Attention-GAN-Tensorflow with MIT License 6 votes vote down vote up
def load_cifar10(size=64) :
    (train_data, train_labels), (test_data, test_labels) = cifar10.load_data()
    train_data = normalize(train_data)
    test_data = normalize(test_data)

    x = np.concatenate((train_data, test_data), axis=0)
    # y = np.concatenate((train_labels, test_labels), axis=0).astype(np.int)

    seed = 777
    np.random.seed(seed)
    np.random.shuffle(x)
    # np.random.seed(seed)
    # np.random.shuffle(y)

    x = np.asarray([scipy.misc.imresize(x_img, [size, size]) for x_img in x])

    return x 
Example #11
Source File: xp_elm.py    From brainforge with GNU General Public License v3.0 6 votes vote down vote up
def pull_mnist(split=0.1, flatten=True):
    learning, testing = mnist.load_data()
    X = np.concatenate([learning[0], testing[0]]).astype(typing.floatX)
    Y = np.concatenate([learning[1], testing[1]]).astype("uint8")
    X -= X.mean()
    X /= X.std()
    if flatten:
        X = X.reshape(-1, 784)
    else:
        X = X[:, None, ...]
    Y = np.eye(10)[Y]

    if split:
        arg = np.arange(len(X))
        np.random.shuffle(arg)
        div = int(len(X) * split)
        targ, larg = arg[:div], arg[div:]
        return X[larg], Y[larg], X[targ], Y[targ]

    return X, Y 
Example #12
Source File: test_hyperband.py    From deep_architect with MIT License 6 votes vote down vote up
def main():

    num_classes = 10
    num_samples = 3 # number of architecture to sample
    metric = 'val_accuracy' # evaluation metric
    resource_type = 'epoch'
    max_resource = 81 # max resource that a configuration can have

    # load and normalize data
    (x_train, y_train),(x_test, y_test) = mnist.load_data()
    x_train, x_test = x_train / 255.0, x_test / 255.0

    # defining searcher and evaluator
    evaluator = SimpleClassifierEvaluator((x_train, y_train), num_classes,
                                        max_num_training_epochs=5)
    searcher = se.RandomSearcher(get_search_space(num_classes))
    hyperband = SimpleArchitectureSearchHyperBand(searcher, hyperband, metric, resource_type)
    (best_config, best_perf) = hyperband.evaluate(max_resource)
    print("Best %s is %f with architecture %d" % (metric, best_perf[0], best_config[0])) 
Example #13
Source File: train.py    From neural-network-genetic-algorithm with MIT License 6 votes vote down vote up
def get_cifar10():
    """Retrieve the CIFAR dataset and process the data."""
    # Set defaults.
    nb_classes = 10
    batch_size = 64
    input_shape = (3072,)

    # Get the data.
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    x_train = x_train.reshape(50000, 3072)
    x_test = x_test.reshape(10000, 3072)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train, nb_classes)
    y_test = to_categorical(y_test, nb_classes)

    return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test) 
Example #14
Source File: train.py    From neural-network-genetic-algorithm with MIT License 6 votes vote down vote up
def get_mnist():
    """Retrieve the MNIST dataset and process the data."""
    # Set defaults.
    nb_classes = 10
    batch_size = 128
    input_shape = (784,)

    # Get the data.
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(60000, 784)
    x_test = x_test.reshape(10000, 784)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train, nb_classes)
    y_test = to_categorical(y_test, nb_classes)

    return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test) 
Example #15
Source File: datasets.py    From super-simple-distributed-keras with MIT License 6 votes vote down vote up
def get_cifar10():
    """Retrieve the CIFAR dataset and process the data."""
    # Set defaults.
    nb_classes = 10
    batch_size = 64
    input_shape = (3072,)

    # Get the data.
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    x_train = x_train.reshape(50000, 3072)
    x_test = x_test.reshape(10000, 3072)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train, nb_classes)
    y_test = to_categorical(y_test, nb_classes)

    return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test) 
Example #16
Source File: test_datasets.py    From CAPTCHA-breaking with MIT License 6 votes vote down vote up
def test_imdb(self):
        print('imdb')
        (X_train, y_train), (X_test, y_test) = imdb.load_data() 
Example #17
Source File: test_datasets.py    From CAPTCHA-breaking with MIT License 6 votes vote down vote up
def test_cifar(self):
        print('cifar10')
        (X_train, y_train), (X_test, y_test) = cifar10.load_data()
        print(X_train.shape)
        print(X_test.shape)
        print(y_train.shape)
        print(y_test.shape)

        print('cifar100 fine')
        (X_train, y_train), (X_test, y_test) = cifar100.load_data('fine')
        print(X_train.shape)
        print(X_test.shape)
        print(y_train.shape)
        print(y_test.shape)

        print('cifar100 coarse')
        (X_train, y_train), (X_test, y_test) = cifar100.load_data('coarse')
        print(X_train.shape)
        print(X_test.shape)
        print(y_train.shape)
        print(y_test.shape) 
Example #18
Source File: hyperparam_optimization.py    From elephas with MIT License 6 votes vote down vote up
def data():
    """Data providing function:

    Make sure to have every relevant import statement included here and return data as
    used in model function below. This function is separated from model() so that hyperopt
    won't reload data for each evaluation run.
    """
    from keras.datasets import mnist
    from keras.utils import np_utils
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(60000, 784)
    x_test = x_test.reshape(10000, 784)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255
    nb_classes = 10
    y_train = np_utils.to_categorical(y_train, nb_classes)
    y_test = np_utils.to_categorical(y_test, nb_classes)
    return x_train, y_train, x_test, y_test 
Example #19
Source File: utils.py    From Self-Attention-GAN-Tensorflow with MIT License 6 votes vote down vote up
def load_mnist(size=64):
    (train_data, train_labels), (test_data, test_labels) = mnist.load_data()
    train_data = normalize(train_data)
    test_data = normalize(test_data)

    x = np.concatenate((train_data, test_data), axis=0)
    # y = np.concatenate((train_labels, test_labels), axis=0).astype(np.int)

    seed = 777
    np.random.seed(seed)
    np.random.shuffle(x)
    # np.random.seed(seed)
    # np.random.shuffle(y)
    # x = np.expand_dims(x, axis=-1)

    x = np.asarray([scipy.misc.imresize(x_img, [size, size]) for x_img in x])
    x = np.expand_dims(x, axis=-1)
    return x 
Example #20
Source File: train.py    From Generative-Adversarial-Networks-Cookbook with MIT License 5 votes vote down vote up
def load_MNIST(self,model_type=3):
        allowed_types = [-1,0,1,2,3,4,5,6,7,8,9]
        if self.model_type not in allowed_types:
            print('ERROR: Only Integer Values from -1 to 9 are allowed')

        (self.X_train, self.Y_train), (_, _) = mnist.load_data()
        if self.model_type!=-1:
            self.X_train = self.X_train[np.where(self.Y_train==int(self.model_type))[0]]
        
        # Rescale -1 to 1
        # Find Normalize Function from CV Class  
        self.X_train = ( np.float32(self.X_train) - 127.5) / 127.5
        self.X_train = np.expand_dims(self.X_train, axis=3)
        return 
Example #21
Source File: datasets.py    From n2d with GNU General Public License v3.0 5 votes vote down vote up
def load_mnist():
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x = np.concatenate((x_train, x_test))
    y = np.concatenate((y_train, y_test))
    x = x.reshape((x.shape[0], -1))
    x = np.divide(x, 255.)
    return x, y 
Example #22
Source File: datasets.py    From n2d with GNU General Public License v3.0 5 votes vote down vote up
def load_mnist_test():
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x = x_test
    y = y_test
    x = np.divide(x, 255.)
    x = x.reshape((x.shape[0], -1))
    return x, y 
Example #23
Source File: utils.py    From SphereGAN-Tensorflow with MIT License 5 votes vote down vote up
def load_mnist():
    (train_data, train_labels), (test_data, test_labels) = mnist.load_data()
    x = np.concatenate((train_data, test_data), axis=0)
    x = np.expand_dims(x, axis=-1)

    return x 
Example #24
Source File: train.py    From Generative-Adversarial-Networks-Cookbook with MIT License 5 votes vote down vote up
def load_2D_encoded_MNIST(self):
        (_, self.Y_train_2D), (_, self.Y_test_2D) = mnist.load_data()
        self.X_train_2D_encoded = np.load('x_train_encoded.npy')
        self.X_test_2D_encoded = np.load('x_test_encoded.npy')
        return 
Example #25
Source File: conv_mnist_data_loader.py    From Keras-Project-Template with Apache License 2.0 5 votes vote down vote up
def __init__(self, config):
        super(ConvMnistDataLoader, self).__init__(config)
        (self.X_train, self.y_train), (self.X_test, self.y_test) = mnist.load_data()
        self.X_train = self.X_train.reshape((-1, 28, 28, 1))
        self.X_test = self.X_test.reshape((-1, 28, 28, 1)) 
Example #26
Source File: datasets.py    From n2d with GNU General Public License v3.0 5 votes vote down vote up
def load_fashion():
    (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
    x = np.concatenate((x_train, x_test))
    y = np.concatenate((y_train, y_test))
    x = x.reshape((x.shape[0], -1))
    x = np.divide(x, 255.)
    y_names = {0: "T-shirt", 1: "Trouser", 2: "Pullover", 3: "Dress", 4: "Coat",
               5: "Sandal", 6: "Shirt", 7: "Sneaker", 8: "Bag", 9: "Ankle Boot"}
    return x, y, y_names 
Example #27
Source File: DataSampler.py    From MassImageRetrieval with Apache License 2.0 5 votes vote down vote up
def __init__(self, dataset_name="mnist"):
        self.X_train = None
        self.y_train = None
        self.X_test = None
        self.y_test = None
        self.grouped = None
        self.num_classes = None
        self.train_colors = None
        self.train_colored_x = None
        self.test_colors = None
        self.test_colored_x = None
        self.epoch_id = 0

        self.m_AvgSampler = None
        self.m_InverseSampler = None

        if dataset_name == "mnist":
            self.num_classes = 10
            (X_train, y_train), (X_test, y_test) = mnist.load_data()
            X_train = X_train.reshape(60000, 28, 28, 1)
            X_test = X_test.reshape(10000, 28, 28, 1)
            X_train = X_train.astype('float32')
            X_test = X_test.astype('float32')
            y_train = y_train.astype("int32")
            y_test = y_test.astype("int32")
            X_train /= 255  # 归一化
            X_test /= 255
            self.X_train, self.y_train = X_train, keras.utils.to_categorical(y_train, self.num_classes)
            self.X_test, self.y_test = X_test, keras.utils.to_categorical(y_test, self.num_classes)
            self.y_train = self.y_train.astype("int32")
            self.y_test = self.y_test.astype("int32")
            print(self.X_train.shape, self.X_train.dtype)
            print(self.y_train.shape, self.y_train.dtype)

            self.shuffle_train_samples() 
Example #28
Source File: simple_mnist_data_loader.py    From Keras-Project-Template with Apache License 2.0 5 votes vote down vote up
def __init__(self, config):
        super(SimpleMnistDataLoader, self).__init__(config)
        (self.X_train, self.y_train), (self.X_test, self.y_test) = mnist.load_data()
        self.X_train = self.X_train.reshape((-1, 28 * 28))
        self.X_test = self.X_test.reshape((-1, 28 * 28)) 
Example #29
Source File: train.py    From Generative-Adversarial-Networks-Cookbook with MIT License 5 votes vote down vote up
def load_MNIST(self,model_type=3):
        allowed_types = [-1,0,1,2,3,4,5,6,7,8,9]
        if self.model_type not in allowed_types:
            print('ERROR: Only Integer Values from -1 to 9 are allowed')

        (self.X_train, self.Y_train), (_, _) = mnist.load_data()
        if self.model_type!=-1:
            self.X_train = self.X_train[np.where(self.Y_train==int(self.model_type))[0]]
        
        # Rescale -1 to 1
        # Find Normalize Function from CV Class  
        self.X_train = ( np.float32(self.X_train) - 127.5) / 127.5
        self.X_train = np.expand_dims(self.X_train, axis=3)
        return 
Example #30
Source File: mnist.py    From keras-deepcv with MIT License 5 votes vote down vote up
def get_data(num_classes=10):
	"""
	Get the MNIST dataset.
	
	Will download dataset if first time and will be downloaded
	to ~/.keras/datasets/mnist.npz
	Parameters:
		None
	Returns:
		train_data - training data split
		train_labels - training labels
		test_data - test data split
		test_labels - test labels
	"""
	print('[INFO] Loading the MNIST dataset...')
	(train_data, train_labels), (test_data, test_labels) = mnist.load_data()

	# Reshape the data from (samples, height, width) to
	# (samples, height, width, depth) where depth is 1 channel (grayscale)
	train_data = train_data[:, :, :, np.newaxis]
	test_data = test_data[:, :, :, np.newaxis]

	# Normalize the data
	train_data = train_data / 255.0
	test_data = test_data / 255.0

	# Transform labels to one hot labels
	# Example: '0' will become [1, 0, 0, 0, 0, 0, 0, 0, 0]
	#          '1' will become [0, 1, 0, 0, 0, 0, 0, 0, 0]
	#          and so on...
	train_labels = np_utils.to_categorical(train_labels, num_classes)
	test_labels = np_utils.to_categorical(test_labels, num_classes)

	return train_data, train_labels, test_data, test_labels