Python utils.load_mnist() Examples

The following are 10 code examples of utils.load_mnist(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module utils , or try the search function .
Example #1
Source File: plot.py    From adversarial-autoencoder with MIT License 5 votes vote down vote up
def plot_pca():
    print('loading data')
    X_train, y_train, X_test, y_test = utils.load_mnist()
    pca = PCA(n_components=2)

    print('transforming training data')
    Z_train = pca.fit_transform(X_train)

    print('transforming test data')
    Z_test = pca.transform(X_test)

    plot(Z_train, y_train, Z_test, y_test,
         filename='pca.png', title='projected onto principle components') 
Example #2
Source File: plot.py    From adversarial-autoencoder with MIT License 5 votes vote down vote up
def plot_autoencoder(weightsfile):
    print('building model')
    layers = model.build_model()

    batch_size = 128

    print('compiling theano function')
    encoder_func = theano_funcs.create_encoder_func(layers)

    print('loading weights from %s' % (weightsfile))
    model.load_weights([
        layers['l_decoder_out'],
        layers['l_discriminator_out'],
    ], weightsfile)

    print('loading data')
    X_train, y_train, X_test, y_test = utils.load_mnist()

    train_datapoints = []
    print('transforming training data')
    for train_idx in get_batch_idx(X_train.shape[0], batch_size):
        X_train_batch = X_train[train_idx]
        train_batch_codes = encoder_func(X_train_batch)
        train_datapoints.append(train_batch_codes)

    test_datapoints = []
    print('transforming test data')
    for test_idx in get_batch_idx(X_test.shape[0], batch_size):
        X_test_batch = X_test[test_idx]
        test_batch_codes = encoder_func(X_test_batch)
        test_datapoints.append(test_batch_codes)

    Z_train = np.vstack(train_datapoints)
    Z_test = np.vstack(test_datapoints)

    plot(Z_train, y_train, Z_test, y_test,
         filename='adversarial_train_val.png',
         title='projected onto latent space of autoencoder') 
Example #3
Source File: distributed_train.py    From capsule-networks with MIT License 5 votes vote down vote up
def create_inputs():
    trX, trY = load_mnist(cfg.dataset, cfg.is_training)

    num_pre_threads = cfg.thread_per_gpu*cfg.num_gpu
    data_queue = tf.train.slice_input_producer([trX, trY], capacity=64*num_pre_threads)
    X, Y = tf.train.shuffle_batch(data_queue, num_threads=num_pre_threads,
                                  batch_size=cfg.batch_size_per_gpu*cfg.num_gpu,
                                  capacity=cfg.batch_size_per_gpu*cfg.num_gpu * 64,
                                  min_after_dequeue=cfg.batch_size_per_gpu*cfg.num_gpu * 32,
                                  allow_smaller_final_batch=False)

    return (X, Y) 
Example #4
Source File: distributed_train.py    From CapsNet-Tensorflow with Apache License 2.0 5 votes vote down vote up
def create_inputs():
    trX, trY = load_mnist(cfg.dataset, cfg.is_training)

    num_pre_threads = cfg.thread_per_gpu*cfg.num_gpu
    data_queue = tf.train.slice_input_producer([trX, trY], capacity=64*num_pre_threads)
    X, Y = tf.train.shuffle_batch(data_queue, num_threads=num_pre_threads,
                                  batch_size=cfg.batch_size_per_gpu*cfg.num_gpu,
                                  capacity=cfg.batch_size_per_gpu*cfg.num_gpu * 64,
                                  min_after_dequeue=cfg.batch_size_per_gpu*cfg.num_gpu * 32,
                                  allow_smaller_final_batch=False)

    return (X, Y) 
Example #5
Source File: evaluation.py    From PCANet with MIT License 5 votes vote down vote up
def run_mnist(n_train=None, n_test=None, model_type="normal"):
    datasize = {"n_train": n_train, "n_test": n_test}
    transformer_params = {
        "image_shape": 28,
        "filter_shape_l1": 5, "step_shape_l1": 1, "n_l1_output": 8,
        "filter_shape_l2": 5, "step_shape_l2": 1, "n_l2_output": 4,
        "filter_shape_pooling": 5, "step_shape_pooling": 5
    }
    ensemble_params = {
        "n_estimators" : 40,
        "sampling_ratio" : 0.03,
        "n_jobs" : -1
    }
    dataset = utils.load_mnist()
    run(dataset, datasize, transformer_params, ensemble_params, model_type) 
Example #6
Source File: eval.py    From capsule-networks with Apache License 2.0 4 votes vote down vote up
def main(_):
    # Load Graph
    capsNet = CapsNet(is_training=False)
    print('[+] Graph is constructed')

    # Load test data
    teX, teY = load_mnist(conf.dataset, is_training=False)

    # Start session
    with capsNet.graph.as_default():
        sv = tf.train.Supervisor(logdir=conf.logdir)
        with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
            # Restore parameters
            checkpoint_path = tf.train.latest_checkpoint(conf.logdir)
            sv.saver.restore(sess, checkpoint_path)
            print('[+] Graph is restored from ' + checkpoint_path)

            # Make results directory
            if not os.path.exists('results'):
                os.mkdir('results')

            reconstruction_err = []
            classification_acc = []
            for i in range(10000 // conf.batch_size):
                start = i * conf.batch_size
                end = start + conf.batch_size

                # Reconstruction
                recon_imgs = sess.run(capsNet.decoded, {capsNet.x: teX[start:end]})
                recon_imgs = np.reshape(recon_imgs, (conf.batch_size, -1))
                orgin_imgs = np.reshape(teX[start:end], (conf.batch_size, -1))
                squared = np.square(recon_imgs - orgin_imgs)
                reconstruction_err.append(np.mean(squared))
                if i % 5 == 0:
                    imgs = np.reshape(recon_imgs, (conf.batch_size, 28, 28, 1))
                    size = 6
                    save_images(imgs[0:size * size, :], [size, size], 'results/test_%03d.png' % i)

                # Classification
                cls_result = sess.run(capsNet.preds, {capsNet.x: teX[start:end]})
                cls_answer = teY[start:end]
                cls_acc = np.mean(np.equal(cls_result, cls_answer).astype(np.float32))
                classification_acc.append(cls_acc)

            # Print classification accuracy & reconstruction error
            print('reconstruction_err : ' + str(np.mean(reconstruction_err)))
            print('classification_acc : ' + str(np.mean(classification_acc) * 100)) 
Example #7
Source File: CGAN.py    From Generative_Model_Zoo with MIT License 4 votes vote down vote up
def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 100
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.model_name = args.gan_type

        # networks init
        self.G = generator(self.dataset)
        self.D = discriminator(self.dataset)
        self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2))
        self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2))

        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()
            self.BCE_loss = nn.BCELoss().cuda()
        else:
            self.BCE_loss = nn.BCELoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        utils.print_network(self.D)
        print('-----------------------------------------------')

        # load mnist
        self.data_X, self.data_Y = utils.load_mnist(args.dataset)
        self.z_dim = 62
        self.y_dim = 10

        # fixed noise & condition
        self.sample_z_ = torch.zeros((self.sample_num, self.z_dim))
        for i in range(10):
            self.sample_z_[i*self.y_dim] = torch.rand(1, self.z_dim)
            for j in range(1, self.y_dim):
                self.sample_z_[i*self.y_dim + j] = self.sample_z_[i*self.y_dim]

        temp = torch.zeros((10, 1))
        for i in range(self.y_dim):
            temp[i, 0] = i

        temp_y = torch.zeros((self.sample_num, 1))
        for i in range(10):
            temp_y[i*self.y_dim: (i+1)*self.y_dim] = temp

        self.sample_y_ = torch.zeros((self.sample_num, self.y_dim))
        self.sample_y_.scatter_(1, temp_y.type(torch.LongTensor), 1)
        if self.gpu_mode:
            self.sample_z_, self.sample_y_ = Variable(self.sample_z_.cuda(), volatile=True), Variable(self.sample_y_.cuda(), volatile=True)
        else:
            self.sample_z_, self.sample_y_ = Variable(self.sample_z_, volatile=True), Variable(self.sample_y_, volatile=True) 
Example #8
Source File: ACGAN.py    From Generative_Model_Zoo with MIT License 4 votes vote down vote up
def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 100
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.model_name = args.gan_type

        # networks init
        self.G = generator(self.dataset)
        self.D = discriminator(self.dataset)
        self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2))
        self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2))

        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()
            self.BCE_loss = nn.BCELoss().cuda()
            self.CE_loss = nn.CrossEntropyLoss().cuda()
        else:
            self.BCE_loss = nn.BCELoss()
            self.CE_loss = nn.CrossEntropyLoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        utils.print_network(self.D)
        print('-----------------------------------------------')

        # load mnist
        self.data_X, self.data_Y = utils.load_mnist(args.dataset)
        self.z_dim = 62
        self.y_dim = 10

        # fixed noise & condition
        self.sample_z_ = torch.zeros((self.sample_num, self.z_dim))
        for i in range(10):
            self.sample_z_[i*self.y_dim] = torch.rand(1, self.z_dim)
            for j in range(1, self.y_dim):
                self.sample_z_[i*self.y_dim + j] = self.sample_z_[i*self.y_dim]

        temp = torch.zeros((10, 1))
        for i in range(self.y_dim):
            temp[i, 0] = i

        temp_y = torch.zeros((self.sample_num, 1))
        for i in range(10):
            temp_y[i*self.y_dim: (i+1)*self.y_dim] = temp

        self.sample_y_ = torch.zeros((self.sample_num, self.y_dim))
        self.sample_y_.scatter_(1, temp_y.type(torch.LongTensor), 1)
        if self.gpu_mode:
            self.sample_z_, self.sample_y_ = Variable(self.sample_z_.cuda(), volatile=True), Variable(self.sample_y_.cuda(), volatile=True)
        else:
            self.sample_z_, self.sample_y_ = Variable(self.sample_z_, volatile=True), Variable(self.sample_y_, volatile=True) 
Example #9
Source File: data_iter.py    From bruno with MIT License 4 votes vote down vote up
def __init__(self, seq_len, batch_size, dataset='mnist', set='train',
                 rng=None, infinite=True, digits=None):

        if dataset == 'fashion_mnist':
            (x_train, y_train), (x_test, y_test) = utils.load_fashion_mnist()
            if set == 'train':
                self.x = x_train
                self.y = y_train
            else:
                self.x = x_test
                self.y = y_test
        elif dataset == 'mnist':
            (x_train, y_train), (x_test, y_test) = utils.load_mnist()
            if set == 'train':
                self.x = x_train
                self.y = y_train
            elif set == 'test':
                self.x = x_test
                self.y = y_test
        elif dataset == 'cifar10':
            self.x, self.y = utils.load_cifar('data/cifar', subset=set)
            self.x = np.transpose(self.x, (0, 2, 3, 1))  # (N,3,32,32) -> (N,32,32,3)
            self.x = np.float32(self.x)
            self.img_shape = self.x.shape[1:]
            self.input_dim = np.prod(self.img_shape)
        else:
            raise ValueError('wrong dataset name')

        if dataset == 'mnist' or dataset == 'fashion_mnist':
            self.input_dim = self.x.shape[-1]
            self.img_shape = (int(np.sqrt(self.input_dim)), int(np.sqrt(self.input_dim)), 1)
            self.x = np.reshape(self.x, (self.x.shape[0],) + self.img_shape)
            self.x = np.float32(self.x)

        self.classes = np.unique(self.y)
        self.n_classes = len(self.classes)
        self.y2idxs = {}
        self.nsamples = 0
        for i in list(self.classes):
            self.y2idxs[i] = np.where(self.y == i)[0]
            self.nsamples += len(self.y2idxs[i])

        self.batch_size = batch_size
        self.seq_len = seq_len
        self.rng = np.random.RandomState(42) if not rng else rng
        self.infinite = infinite
        self.digits = digits if digits is not None else np.arange(self.n_classes)

        print(set, 'dataset size:', self.x.shape)
        print(set, 'N classes', self.n_classes)
        print(set, 'min, max', np.min(self.x), np.max(self.x))
        print(set, 'nsamples', self.nsamples)
        print(set, 'digits', self.digits)
        print('--------------') 
Example #10
Source File: data_iter.py    From bruno with MIT License 4 votes vote down vote up
def __init__(self, seq_len, set='train', dataset='mnist', rng=None, infinite=True, digits=None):

        if dataset == 'fashion_mnist':
            (x_train, y_train), (x_test, y_test) = utils.load_fashion_mnist()
        elif dataset == 'mnist':
            (x_train, y_train), (x_test, y_test) = utils.load_mnist()
        else:
            raise ValueError('wrong dataset name')

        self.x_train = x_train
        self.y_train = y_train

        self.y_train2idxs = {}
        for i in range(10):
            self.y_train2idxs[i] = np.where(self.y_train == i)[0]

        if set == 'train':
            self.x = x_train
            self.y = y_train
        else:
            self.x = x_test
            self.y = y_test

        self.input_dim = self.x.shape[-1]
        self.img_shape = (int(np.sqrt(self.input_dim)), int(np.sqrt(self.input_dim)), 1)
        self.x = np.reshape(self.x, (self.x.shape[0],) + self.img_shape)
        self.x = np.float32(self.x)

        self.x_train = np.reshape(self.x_train, (self.x_train.shape[0],) + self.img_shape)
        self.x_train = np.float32(self.x_train)

        self.y2idxs = {}
        for i in range(10):
            self.y2idxs[i] = np.where(self.y == i)[0]

        self.seq_len = seq_len
        self.rng = np.random.RandomState(42) if not rng else rng
        self.nsamples = self.x.shape[0]
        self.infinite = infinite
        self.digits = digits if digits is not None else range(10)
        self.n_classes = len(self.digits)
        self.batch_size = self.n_classes
        self.set = set

        print(set, 'dataset size:', self.x.shape)
        print(set, 'N classes', self.n_classes)
        print(set, 'min, max', np.min(self.x), np.max(self.x))
        print(set, 'nsamples', self.nsamples)
        print('--------------')