Python solver.Solver() Examples

The following are 30 code examples of solver.Solver(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module solver , or try the search function .
Example #1
Source File: autoencoder.py    From SNIPER-mxnet with Apache License 2.0 6 votes vote down vote up
def layerwise_pretrain(self, X, batch_size, n_iter, optimizer, l_rate, decay,
                           lr_scheduler=None, print_every=1000):
        def l2_norm(label, pred):
            return np.mean(np.square(label-pred))/2.0
        solver = Solver(optimizer, momentum=0.9, wd=decay, learning_rate=l_rate,
                        lr_scheduler=lr_scheduler)
        solver.set_metric(mx.metric.CustomMetric(l2_norm))
        solver.set_monitor(Monitor(print_every))
        data_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=True,
                                      last_batch_handle='roll_over')
        for i in range(self.N):
            if i == 0:
                data_iter_i = data_iter
            else:
                X_i = list(model.extract_feature(
                    self.internals[i-1], self.args, self.auxs, data_iter, X.shape[0],
                    self.xpu).values())[0]
                data_iter_i = mx.io.NDArrayIter({'data': X_i}, batch_size=batch_size,
                                                last_batch_handle='roll_over')
            logging.info('Pre-training layer %d...', i)
            solver.solve(self.xpu, self.stacks[i], self.args, self.args_grad, self.auxs,
                         data_iter_i, 0, n_iter, {}, False) 
Example #2
Source File: main.py    From dl-uncertainty with MIT License 6 votes vote down vote up
def main(_):
    
    with tf.device(FLAGS.device):
	
	model_save_path = 'model/'+FLAGS.model_save_path	
	# create directory if it does not exist
	if not tf.gfile.Exists(model_save_path):
		tf.gfile.MakeDirs(model_save_path)
	log_dir = 'logs/'+ model_save_path
	
	model = Model(learning_rate=0.0003, mode=FLAGS.mode)
	solver = Solver(model, model_save_path=model_save_path, log_dir=log_dir)
	
	# create directory if it does not exist
	if not tf.gfile.Exists(model_save_path):
		tf.gfile.MakeDirs(model_save_path)
	
	if FLAGS.mode == 'train':
		solver.train()
	elif FLAGS.mode == 'test':
		solver.test(checkpoint=FLAGS.checkpoint)
	else:
	    print 'Unrecognized mode.' 
Example #3
Source File: main.py    From dl-uncertainty with MIT License 6 votes vote down vote up
def main(_):
    
    with tf.device(FLAGS.device):
	
	model_save_path = 'model/'+FLAGS.model_save_path	
	# create directory if it does not exist
	if not tf.gfile.Exists(model_save_path):
		tf.gfile.MakeDirs(model_save_path)
	log_dir = 'logs/'+ model_save_path
	
	model = Model(learning_rate=0.0003, mode=FLAGS.mode)
	solver = Solver(model, model_save_path=model_save_path, log_dir=log_dir)
	
	# create directory if it does not exist
	if not tf.gfile.Exists(model_save_path):
		tf.gfile.MakeDirs(model_save_path)
	
	if FLAGS.mode == 'train':
		solver.train()
	elif FLAGS.mode == 'test':
		solver.test(checkpoint=FLAGS.checkpoint)
	else:
	    print 'Unrecognized mode.' 
Example #4
Source File: autoencoder.py    From training_results_v0.6 with Apache License 2.0 6 votes vote down vote up
def layerwise_pretrain(self, X, batch_size, n_iter, optimizer, l_rate, decay,
                           lr_scheduler=None, print_every=1000):
        def l2_norm(label, pred):
            return np.mean(np.square(label-pred))/2.0
        solver = Solver(optimizer, momentum=0.9, wd=decay, learning_rate=l_rate,
                        lr_scheduler=lr_scheduler)
        solver.set_metric(mx.metric.CustomMetric(l2_norm))
        solver.set_monitor(Monitor(print_every))
        data_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=True,
                                      last_batch_handle='roll_over')
        for i in range(self.N):
            if i == 0:
                data_iter_i = data_iter
            else:
                X_i = list(model.extract_feature(
                    self.internals[i-1], self.args, self.auxs, data_iter, X.shape[0],
                    self.xpu).values())[0]
                data_iter_i = mx.io.NDArrayIter({'data': X_i}, batch_size=batch_size,
                                                last_batch_handle='roll_over')
            logging.info('Pre-training layer %d...', i)
            solver.solve(self.xpu, self.stacks[i], self.args, self.args_grad, self.auxs,
                         data_iter_i, 0, n_iter, {}, False) 
Example #5
Source File: main.py    From Res2Net-PoolNet with MIT License 6 votes vote down vote up
def main(config):
    if config.mode == 'train':
        train_loader = get_loader(config)
        run = 0
        while os.path.exists("%s/run-%d" % (config.save_folder, run)):
            run += 1
        os.mkdir("%s/run-%d" % (config.save_folder, run))
        os.mkdir("%s/run-%d/models" % (config.save_folder, run))
        config.save_folder = "%s/run-%d" % (config.save_folder, run)
        train = Solver(train_loader, None, config)
        train.train()
    elif config.mode == 'test':
        config.test_root, config.test_list = get_test_info(config.sal_mode)
        test_loader = get_loader(config, mode='test')
        if not os.path.exists(config.test_fold): os.mkdir(config.test_fold)
        test = Solver(None, test_loader, config)
        test.test()
    else:
        raise IOError("illegal input!!!") 
Example #6
Source File: autoencoder.py    From training_results_v0.6 with Apache License 2.0 6 votes vote down vote up
def layerwise_pretrain(self, X, batch_size, n_iter, optimizer, l_rate, decay,
                           lr_scheduler=None, print_every=1000):
        def l2_norm(label, pred):
            return np.mean(np.square(label-pred))/2.0
        solver = Solver(optimizer, momentum=0.9, wd=decay, learning_rate=l_rate,
                        lr_scheduler=lr_scheduler)
        solver.set_metric(mx.metric.CustomMetric(l2_norm))
        solver.set_monitor(Monitor(print_every))
        data_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=True,
                                      last_batch_handle='roll_over')
        for i in range(self.N):
            if i == 0:
                data_iter_i = data_iter
            else:
                X_i = list(model.extract_feature(
                    self.internals[i-1], self.args, self.auxs, data_iter, X.shape[0],
                    self.xpu).values())[0]
                data_iter_i = mx.io.NDArrayIter({'data': X_i}, batch_size=batch_size,
                                                last_batch_handle='roll_over')
            logging.info('Pre-training layer %d...', i)
            solver.solve(self.xpu, self.stacks[i], self.args, self.args_grad, self.auxs,
                         data_iter_i, 0, n_iter, {}, False) 
Example #7
Source File: main.py    From PoolNet with MIT License 6 votes vote down vote up
def main(config):
    if config.mode == 'train':
        train_loader = get_loader(config)
        run = 0
        while os.path.exists("%s/run-%d" % (config.save_folder, run)):
            run += 1
        os.mkdir("%s/run-%d" % (config.save_folder, run))
        os.mkdir("%s/run-%d/models" % (config.save_folder, run))
        config.save_folder = "%s/run-%d" % (config.save_folder, run)
        train = Solver(train_loader, None, config)
        train.train()
    elif config.mode == 'test':
        config.test_root, config.test_list = get_test_info(config.sal_mode)
        test_loader = get_loader(config, mode='test')
        if not os.path.exists(config.test_fold): os.mkdir(config.test_fold)
        test = Solver(None, test_loader, config)
        test.test()
    else:
        raise IOError("illegal input!!!") 
Example #8
Source File: main.py    From mnist-svhn-transfer with MIT License 6 votes vote down vote up
def main(config):
    svhn_loader, mnist_loader = get_loader(config)
    
    solver = Solver(config, svhn_loader, mnist_loader)
    cudnn.benchmark = True 
    
    # create directories if not exist
    if not os.path.exists(config.model_path):
        os.makedirs(config.model_path)
    if not os.path.exists(config.sample_path):
        os.makedirs(config.sample_path)
    
    if config.mode == 'train':
        solver.train()
    elif config.mode == 'sample':
        solver.sample() 
Example #9
Source File: main.py    From domain-transfer-network with MIT License 6 votes vote down vote up
def main(_):
    
    model = DTN(mode=FLAGS.mode, learning_rate=0.0003)
    solver = Solver(model, batch_size=100, pretrain_iter=20000, train_iter=2000, sample_iter=100, 
                    svhn_dir='svhn', mnist_dir='mnist', model_save_path=FLAGS.model_save_path, sample_save_path=FLAGS.sample_save_path)
    
    # create directories if not exist
    if not tf.gfile.Exists(FLAGS.model_save_path):
        tf.gfile.MakeDirs(FLAGS.model_save_path)
    if not tf.gfile.Exists(FLAGS.sample_save_path):
        tf.gfile.MakeDirs(FLAGS.sample_save_path)
    
    if FLAGS.mode == 'pretrain':
        solver.pretrain()
    elif FLAGS.mode == 'train':
        solver.train()
    else:
        solver.eval() 
Example #10
Source File: autoencoder.py    From SNIPER-mxnet with Apache License 2.0 6 votes vote down vote up
def layerwise_pretrain(self, X, batch_size, n_iter, optimizer, l_rate, decay,
                           lr_scheduler=None, print_every=1000):
        def l2_norm(label, pred):
            return np.mean(np.square(label-pred))/2.0
        solver = Solver(optimizer, momentum=0.9, wd=decay, learning_rate=l_rate,
                        lr_scheduler=lr_scheduler)
        solver.set_metric(mx.metric.CustomMetric(l2_norm))
        solver.set_monitor(Monitor(print_every))
        data_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=True,
                                      last_batch_handle='roll_over')
        for i in range(self.N):
            if i == 0:
                data_iter_i = data_iter
            else:
                X_i = list(model.extract_feature(
                    self.internals[i-1], self.args, self.auxs, data_iter, X.shape[0],
                    self.xpu).values())[0]
                data_iter_i = mx.io.NDArrayIter({'data': X_i}, batch_size=batch_size,
                                                last_batch_handle='roll_over')
            logging.info('Pre-training layer %d...', i)
            solver.solve(self.xpu, self.stacks[i], self.args, self.args_grad, self.auxs,
                         data_iter_i, 0, n_iter, {}, False) 
Example #11
Source File: main.py    From minimal-entropy-correlation-alignment with MIT License 6 votes vote down vote up
def main(_):
    
    with tf.device(FLAGS.device):
	model_save_path = FLAGS.model_save_path + '/' + FLAGS.method + '/alpha_' + FLAGS.alpha
	log_dir = 'logs/' + FLAGS.method + '/alpha_' + FLAGS.alpha
	model = logDcoral(mode=FLAGS.mode, method=FLAGS.method, hidden_size = 64, learning_rate=0.0001, alpha=float(FLAGS.alpha))
	solver = Solver(model, model_save_path=model_save_path, log_dir=log_dir)
	
	# create directory if it does not exist
	if not tf.gfile.Exists(model_save_path):
		tf.gfile.MakeDirs(model_save_path)
	
	if FLAGS.mode == 'train':
		solver.train()
	elif FLAGS.mode == 'test':
		solver.test()
	elif FLAGS.mode == 'tsne':
		solver.tsne()
	else:
	    print 'Unrecognized mode.' 
Example #12
Source File: autoencoder.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def layerwise_pretrain(self, X, batch_size, n_iter, optimizer, l_rate, decay,
                           lr_scheduler=None, print_every=1000):
        def l2_norm(label, pred):
            return np.mean(np.square(label-pred))/2.0
        solver = Solver(optimizer, momentum=0.9, wd=decay, learning_rate=l_rate,
                        lr_scheduler=lr_scheduler)
        solver.set_metric(mx.metric.CustomMetric(l2_norm))
        solver.set_monitor(Monitor(print_every))
        data_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=True,
                                      last_batch_handle='roll_over')
        for i in range(self.N):
            if i == 0:
                data_iter_i = data_iter
            else:
                X_i = list(model.extract_feature(
                    self.internals[i-1], self.args, self.auxs, data_iter, X.shape[0],
                    self.xpu).values())[0]
                data_iter_i = mx.io.NDArrayIter({'data': X_i}, batch_size=batch_size,
                                                last_batch_handle='roll_over')
            logging.info('Pre-training layer %d...', i)
            solver.solve(self.xpu, self.stacks[i], self.args, self.args_grad, self.auxs,
                         data_iter_i, 0, n_iter, {}, False) 
Example #13
Source File: autoencoder.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def layerwise_pretrain(self, X, batch_size, n_iter, optimizer, l_rate, decay,
                           lr_scheduler=None, print_every=1000):
        def l2_norm(label, pred):
            return np.mean(np.square(label-pred))/2.0
        solver = Solver(optimizer, momentum=0.9, wd=decay, learning_rate=l_rate,
                        lr_scheduler=lr_scheduler)
        solver.set_metric(mx.metric.CustomMetric(l2_norm))
        solver.set_monitor(Monitor(print_every))
        data_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=True,
                                      last_batch_handle='roll_over')
        for i in range(self.N):
            if i == 0:
                data_iter_i = data_iter
            else:
                X_i = list(model.extract_feature(
                    self.internals[i-1], self.args, self.auxs, data_iter, X.shape[0],
                    self.xpu).values())[0]
                data_iter_i = mx.io.NDArrayIter({'data': X_i}, batch_size=batch_size,
                                                last_batch_handle='roll_over')
            logging.info('Pre-training layer %d...', i)
            solver.solve(self.xpu, self.stacks[i], self.args, self.args_grad, self.auxs,
                         data_iter_i, 0, n_iter, {}, False) 
Example #14
Source File: run.py    From quickNAT_pytorch with MIT License 5 votes vote down vote up
def train(train_params, common_params, data_params, net_params):
    train_data, test_data = load_data(data_params)

    train_loader = torch.utils.data.DataLoader(train_data, batch_size=train_params['train_batch_size'], shuffle=True,
                                               num_workers=4, pin_memory=True)
    val_loader = torch.utils.data.DataLoader(test_data, batch_size=train_params['val_batch_size'], shuffle=False,
                                             num_workers=4, pin_memory=True)

    if train_params['use_pre_trained']:
        quicknat_model = torch.load(train_params['pre_trained_path'])
    else:
        quicknat_model = QuickNat(net_params)

    solver = Solver(quicknat_model,
                    device=common_params['device'],
                    num_class=net_params['num_class'],
                    optim_args={"lr": train_params['learning_rate'],
                                "betas": train_params['optim_betas'],
                                "eps": train_params['optim_eps'],
                                "weight_decay": train_params['optim_weight_decay']},
                    model_name=common_params['model_name'],
                    exp_name=train_params['exp_name'],
                    labels=data_params['labels'],
                    log_nth=train_params['log_nth'],
                    num_epochs=train_params['num_epochs'],
                    lr_scheduler_step_size=train_params['lr_scheduler_step_size'],
                    lr_scheduler_gamma=train_params['lr_scheduler_gamma'],
                    use_last_checkpoint=train_params['use_last_checkpoint'],
                    log_dir=common_params['log_dir'],
                    exp_dir=common_params['exp_dir'])

    solver.train(train_loader, val_loader)
    final_model_path = os.path.join(common_params['save_model_dir'], train_params['final_model_file'])
    quicknat_model.save(final_model_path)
    print("final model saved @ " + str(final_model_path)) 
Example #15
Source File: main.py    From FactorVAE with MIT License 5 votes vote down vote up
def main(args):
    net = Solver(args)
    net.train() 
Example #16
Source File: main.py    From stargan with MIT License 5 votes vote down vote up
def main(config):
    # For fast training.
    cudnn.benchmark = True

    # Create directories if not exist.
    if not os.path.exists(config.log_dir):
        os.makedirs(config.log_dir)
    if not os.path.exists(config.model_save_dir):
        os.makedirs(config.model_save_dir)
    if not os.path.exists(config.sample_dir):
        os.makedirs(config.sample_dir)
    if not os.path.exists(config.result_dir):
        os.makedirs(config.result_dir)

    # Data loader.
    celeba_loader = None
    rafd_loader = None

    if config.dataset in ['CelebA', 'Both']:
        celeba_loader = get_loader(config.celeba_image_dir, config.attr_path, config.selected_attrs,
                                   config.celeba_crop_size, config.image_size, config.batch_size,
                                   'CelebA', config.mode, config.num_workers)
    if config.dataset in ['RaFD', 'Both']:
        rafd_loader = get_loader(config.rafd_image_dir, None, None,
                                 config.rafd_crop_size, config.image_size, config.batch_size,
                                 'RaFD', config.mode, config.num_workers)
    

    # Solver for training and testing StarGAN.
    solver = Solver(celeba_loader, rafd_loader, config)

    if config.mode == 'train':
        if config.dataset in ['CelebA', 'RaFD']:
            solver.train()
        elif config.dataset in ['Both']:
            solver.train_multi()
    elif config.mode == 'test':
        if config.dataset in ['CelebA', 'RaFD']:
            solver.test()
        elif config.dataset in ['Both']:
            solver.test_multi() 
Example #17
Source File: train.py    From Conv-TasNet with MIT License 5 votes vote down vote up
def main(args):
    # Construct Solver
    # data
    tr_dataset = AudioDataset(args.train_dir, args.batch_size,
                              sample_rate=args.sample_rate, segment=args.segment)
    cv_dataset = AudioDataset(args.valid_dir, batch_size=1,  # 1 -> use less GPU memory to do cv
                              sample_rate=args.sample_rate,
                              segment=-1, cv_maxlen=args.cv_maxlen)  # -1 -> use full audio
    tr_loader = AudioDataLoader(tr_dataset, batch_size=1,
                                shuffle=args.shuffle,
                                num_workers=args.num_workers)
    cv_loader = AudioDataLoader(cv_dataset, batch_size=1,
                                num_workers=0)
    data = {'tr_loader': tr_loader, 'cv_loader': cv_loader}
    # model
    model = ConvTasNet(args.N, args.L, args.B, args.H, args.P, args.X, args.R,
                       args.C, norm_type=args.norm_type, causal=args.causal,
                       mask_nonlinear=args.mask_nonlinear)
    print(model)
    if args.use_cuda:
        model = torch.nn.DataParallel(model)
        model.cuda()
    # optimizer
    if args.optimizer == 'sgd':
        optimizier = torch.optim.SGD(model.parameters(),
                                     lr=args.lr,
                                     momentum=args.momentum,
                                     weight_decay=args.l2)
    elif args.optimizer == 'adam':
        optimizier = torch.optim.Adam(model.parameters(),
                                      lr=args.lr,
                                      weight_decay=args.l2)
    else:
        print("Not support optimizer")
        return

    # solver
    solver = Solver(data, model, optimizier, args)
    solver.train() 
Example #18
Source File: train.py    From CARN-pytorch with MIT License 5 votes vote down vote up
def main(cfg):
    # dynamic import using --model argument
    net = importlib.import_module("model.{}".format(cfg.model)).Net
    print(json.dumps(vars(cfg), indent=4, sort_keys=True))
    
    solver = Solver(net, cfg)
    solver.fit() 
Example #19
Source File: main.py    From ultra-thin-PRM with MIT License 5 votes vote down vote up
def main(args):

    with open("config.yml", 'r') as stream:
        try:
            config = yaml.safe_load(stream)
        except yaml.YAMLError as exc:
            print(exc)
    
    train_trans = image_transform(**config['train_transform'])
    test_trans = image_transform(**config['test_transform'])

    config['dataset'].update({'transform': train_trans,
                              'target_transform': None})
    dataset = pascal_voc_classification(**config['dataset'])

    config['data_loaders']['dataset'] = dataset
    data_loader = fetch_voc(**config['data_loaders'])

    train_logger = SummaryWriter(log_dir = os.path.join(config['log'], 'train'), comment = 'training')

    solver = Solver(config)

    if args.train:
        solver.train(data_loader, train_logger)
    if args.run_demo:
        # Load demo images and pre-computed object proposals
        # change the idx to test different samples
        idx = 1
        raw_img = PIL.Image.open('./data/sample%d.jpg' % idx).convert('RGB')
        input_var = test_trans(raw_img).unsqueeze(0).cuda().requires_grad_()
        with open('./data/sample%d.json' % idx, 'r') as f:
            proposals = list(map(rle_decode, json.load(f)))
        solver.inference(input_var, raw_img, 19, proposals=proposals) 
Example #20
Source File: main.py    From Text2Colors with MIT License 5 votes vote down vote up
def main(args):

    # Create directory if it doesn't exist.
    if not os.path.exists(args.text2pal_dir):
        os.makedirs(args.text2pal_dir)
    if not os.path.exists(args.pal2color_dir):
        os.makedirs(args.pal2color_dir)
    if not os.path.exists(args.train_sample_dir):
        os.makedirs(args.train_sample_dir)
    if not os.path.exists(os.path.join(args.test_sample_dir, args.mode)):
        os.makedirs(os.path.join(args.test_sample_dir, args.mode))

    # Solver for training and testing Text2Colors.
    solver = Solver(args)

    # Train or test.
    if args.mode == 'train_TPN':
        solver.train_TPN()

    elif args.mode == 'train_PCN':
        solver.train_PCN()

    elif args.mode == 'test_TPN':
        solver.test_TPN()

    elif args.mode == 'test_text2colors':
        solver.test_text2colors() 
Example #21
Source File: train_model.py    From TuSimple-DUC with Apache License 2.0 5 votes vote down vote up
def train_end2end():
    config = ConfigParser.RawConfigParser()
    config_path = sys.argv[1]
    config.read(config_path)

    model = Solver(config)
    model.fit() 
Example #22
Source File: autoencoder.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def finetune(self, X, batch_size, n_iter, optimizer, l_rate, decay, lr_scheduler=None,
                 print_every=1000):
        def l2_norm(label, pred):
            return np.mean(np.square(label-pred))/2.0
        solver = Solver(optimizer, momentum=0.9, wd=decay, learning_rate=l_rate,
                        lr_scheduler=lr_scheduler)
        solver.set_metric(mx.metric.CustomMetric(l2_norm))
        solver.set_monitor(Monitor(print_every))
        data_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=True,
                                      last_batch_handle='roll_over')
        logging.info('Fine tuning...')
        solver.solve(self.xpu, self.loss, self.args, self.args_grad, self.auxs, data_iter,
                     0, n_iter, {}, False) 
Example #23
Source File: main.py    From SHN-based-2D-face-alignment with MIT License 5 votes vote down vote up
def main(config):
    # For fast training.
    cudnn.benchmark = True

    # Create directories if not exist.
    if not os.path.exists(config.log_dir):
        os.makedirs(config.log_dir)
    if not os.path.exists(config.model_save_dir):
        os.makedirs(config.model_save_dir)

    imgdirs_train = ['data/afw/', 'data/helen/trainset/', 'data/lfpw/trainset/']
    imgdirs_test_commomset = ['data/helen/testset/','data/lfpw/testset/']

    # Dataset and Dataloader
    if config.phase == 'test':
        trainset=None
        train_loader = None
    else:
        trainset = Dataset(imgdirs_train, config.phase, 'train', config.rotFactor, config.res, config.gamma)
        train_loader = data.DataLoader(trainset,
                                       batch_size=config.batch_size,
                                       shuffle=True,
                                       num_workers=config.num_workers,
                                       pin_memory=True)
    testset = Dataset(imgdirs_test_commomset, 'test', config.attr, config.rotFactor, config.res, config.gamma)
    test_loader = data.DataLoader(testset,
                                  batch_size=config.batch_size,
                                  num_workers=config.num_workers,
                                  pin_memory=True)
    
    # Solver for training and testing.
    solver = Solver(train_loader, test_loader, config)
    if config.phase == 'train':
        solver.train()
    else:
        solver.load_state_dict(config.best_model)
        solver.test() 
Example #24
Source File: main.py    From dl-uncertainty with MIT License 5 votes vote down vote up
def main(_):
    
    with tf.device(FLAGS.device):
	
	model_save_path = 'model/'+FLAGS.model_save_path	
	# create directory if it does not exist
	if not tf.gfile.Exists(model_save_path):
		tf.gfile.MakeDirs(model_save_path)
	log_dir = 'logs/'+ model_save_path
	
	if FLAGS.mode == 'test':
	    checkpoint = model_save_path+'/model'
	
	model = Model(learning_rate=0.0003, mode=FLAGS.mode)
	solver = Solver(model, model_save_path=model_save_path, 
					log_dir=log_dir, 
					training_size=int(FLAGS.training_size)
					)
	
	# create directory if it does not exist
	if not tf.gfile.Exists(model_save_path):
		tf.gfile.MakeDirs(model_save_path)
	
	if FLAGS.mode == 'train':
		solver.train()
	elif FLAGS.mode == 'test':
		solver.test(checkpoint=checkpoint)
	else:
	    print 'Unrecognized mode.' 
Example #25
Source File: autoencoder.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def finetune(self, X, batch_size, n_iter, optimizer, l_rate, decay, lr_scheduler=None,
                 print_every=1000):
        def l2_norm(label, pred):
            return np.mean(np.square(label-pred))/2.0
        solver = Solver(optimizer, momentum=0.9, wd=decay, learning_rate=l_rate,
                        lr_scheduler=lr_scheduler)
        solver.set_metric(mx.metric.CustomMetric(l2_norm))
        solver.set_monitor(Monitor(print_every))
        data_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=True,
                                      last_batch_handle='roll_over')
        logging.info('Fine tuning...')
        solver.solve(self.xpu, self.loss, self.args, self.args_grad, self.auxs, data_iter,
                     0, n_iter, {}, False) 
Example #26
Source File: main.py    From Beta-VAE with MIT License 5 votes vote down vote up
def main(args):
    seed = args.seed
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    np.random.seed(seed)

    net = Solver(args)

    if args.train:
        net.train()
    else:
        net.traverse() 
Example #27
Source File: train_test.py    From person-reid-lib with MIT License 5 votes vote down vote up
def main():
    cur_dir = file_abs_path(__file__)
    manager = Manager(cur_dir, seed=None, mode='Train')
    logger = manager.logger
    ParseArgs(logger)
    if manager.seed is not None:
        random.seed(manager.seed)
        np.random.seed(manager.seed)
        torch.manual_seed(manager.seed)

    # ['iLIDS-VID', 'PRID-2011', 'LPW', 'MARS', 'VIPeR', 'Market1501', 'CUHK03', 'CUHK01', 'DukeMTMCreID', 'GRID', 'DukeMTMC-VideoReID']
    #       0            1         2      3        4          5           6         7             8           9             10

    manager.set_dataset(0)

    perf_box = {}
    repeat_times = 10
    for task_i in range(repeat_times):
        manager.split_id = int(task_i) 
        task = Solver(manager)
        train_test_time = timer_lite(task.run)
        perf_box[str(task_i)] = task.perf_box
        manager.store_performance(perf_box)

        logger.info('-----------Total time------------')
        logger.info('Split ID:' + str(task_i) + '  ' + str(train_test_time))
        logger.info('---------------------------------')

    compute_rank(perf_box, logger) 
Example #28
Source File: train_test.py    From person-reid-lib with MIT License 5 votes vote down vote up
def main():
    cur_dir = file_abs_path(__file__)
    manager = Manager(cur_dir, seed=None, mode='Train')
    logger = manager.logger
    ParseArgs(logger)
    if manager.seed is not None:
        random.seed(manager.seed)
        np.random.seed(manager.seed)
        torch.manual_seed(manager.seed)

    # ['iLIDS-VID', 'PRID-2011', 'LPW', 'MARS', 'VIPeR', 'Market1501', 'CUHK03', 'CUHK01', 'DukeMTMCreID', 'GRID', 'DukeMTMC-VideoReID']
    #       0            1         2      3        4          5           6         7             8           9             10

    manager.set_dataset(4)

    perf_box = {}
    repeat_times = 10
    for task_i in range(repeat_times):
        manager.split_id = int(task_i) 
        task = Solver(manager)
        train_test_time = timer_lite(task.run)
        perf_box[str(task_i)] = task.perf_box
        manager.store_performance(perf_box)

        logger.info('-----------Total time------------')
        logger.info('Split ID:' + str(task_i) + '  ' + str(train_test_time))
        logger.info('---------------------------------')

    compute_rank(perf_box, logger) 
Example #29
Source File: autoencoder.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def finetune(self, X, batch_size, n_iter, optimizer, l_rate, decay, lr_scheduler=None,
                 print_every=1000):
        def l2_norm(label, pred):
            return np.mean(np.square(label-pred))/2.0
        solver = Solver(optimizer, momentum=0.9, wd=decay, learning_rate=l_rate,
                        lr_scheduler=lr_scheduler)
        solver.set_metric(mx.metric.CustomMetric(l2_norm))
        solver.set_monitor(Monitor(print_every))
        data_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=True,
                                      last_batch_handle='roll_over')
        logging.info('Fine tuning...')
        solver.solve(self.xpu, self.loss, self.args, self.args_grad, self.auxs, data_iter,
                     0, n_iter, {}, False) 
Example #30
Source File: main.py    From MCD_DA with MIT License 4 votes vote down vote up
def main():
    # if not args.one_step:

    solver = Solver(args, source=args.source, target=args.target, learning_rate=args.lr, batch_size=args.batch_size,
                    optimizer=args.optimizer, num_k=args.num_k, all_use=args.all_use,
                    checkpoint_dir=args.checkpoint_dir,
                    save_epoch=args.save_epoch)
    record_num = 0
    if args.source == 'usps' or args.target == 'usps':

        record_train = 'record/%s_%s_k_%s_alluse_%s_onestep_%s_%s.txt' % (
            args.source, args.target, args.num_k, args.all_use, args.one_step, record_num)
        record_test = 'record/%s_%s_k_%s_alluse_%s_onestep_%s_%s_test.txt' % (
            args.source, args.target, args.num_k, args.all_use, args.one_step, record_num)
        while os.path.exists(record_train):
            record_num += 1
            record_train = 'record/%s_%s_k_%s_alluse_%s_onestep_%s_%s.txt' % (
                args.source, args.target, args.num_k, args.all_use, args.one_step, record_num)
            record_test = 'record/%s_%s_k_%s_alluse_%s_onestep_%s_%s_test.txt' % (
                args.source, args.target, args.num_k, args.all_use, args.one_step, record_num)
    else:
        record_train = 'record/%s_%s_k_%s_onestep_%s_%s.txt' % (
            args.source, args.target, args.num_k, args.one_step, record_num)
        record_test = 'record/%s_%s_k_%s_onestep_%s_%s_test.txt' % (
            args.source, args.target, args.num_k, args.one_step, record_num)
        while os.path.exists(record_train):
            record_num += 1
            record_train = 'record/%s_%s_k_%s_onestep_%s_%s.txt' % (
                args.source, args.target, args.num_k, args.one_step, record_num)
            record_test = 'record/%s_%s_k_%s_onestep_%s_%s_test.txt' % (
                args.source, args.target, args.num_k, args.one_step, record_num)

    if not os.path.exists(args.checkpoint_dir):
        os.mkdir(args.checkpoint_dir)
    if not os.path.exists('record'):
        os.mkdir('record')
    if args.eval_only:
        solver.test(0)
    else:
        count = 0
        for t in xrange(args.max_epoch):
            if not args.one_step:
                num = solver.train(t, record_file=record_train)
            else:
                num = solver.train_onestep(t, record_file=record_train)
            count += num
            if t % 1 == 0:
                solver.test(t, record_file=record_test, save_model=args.save_model)
            if count >= 20000:
                break