Python utils.adjust_learning_rate() Examples
The following are 3
code examples of utils.adjust_learning_rate().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
utils
, or try the search function
.
Example #1
Source File: main.py From Where-are-they-looking-PyTorch with MIT License | 4 votes |
def main(): global opt, best_err1 opt = parser.parse_args() best_err1 = 1000000 print(opt) model = init.load_model(opt) model, criterion, optimizer = init.setup(model,opt) print(model) trainer = train.Trainer(model, criterion, optimizer, opt, writer) validator = train.Validator(model, criterion, opt, writer) random.seed(opt.seed) torch.manual_seed(opt.seed) cudnn.deterministic = True if opt.resume: if os.path.isfile(opt.resume): model, optimizer, opt, best_err1 = init.resumer(opt, model, optimizer) else: print("=> no checkpoint found at '{}'".format(opt.resume)) cudnn.benchmark = True dataloader = ld.GazeFollow(opt) train_loader = dataloader.train_loader val_loader = dataloader.val_loader for epoch in range(opt.start_epoch, opt.epochs): utils.adjust_learning_rate(opt, optimizer, epoch) print("Starting epoch number:", epoch+1, "Learning rate:", optimizer.param_groups[0]["lr"]) if opt.testOnly == False: trainer.train(train_loader, epoch, opt) err = validator.validate(val_loader, epoch, opt) best_err1 = min(err, best_err1) if epoch % 10 == 0: init.save_checkpoint(opt, model, optimizer, best_err1, epoch) print('Best error: [{0:.3f}]\t'.format(best_err1))
Example #2
Source File: train_splitted.py From PyTorch-BayesianCNN with MIT License | 4 votes |
def train_splitted(num_tasks, bayesian=True, net_type='lenet'): assert 10 % num_tasks == 0 # Hyper Parameter settings train_ens = cfg.train_ens valid_ens = cfg.valid_ens n_epochs = cfg.n_epochs lr_start = cfg.lr_start if bayesian: ckpt_dir = f"checkpoints/MNIST/bayesian/splitted/{num_tasks}-tasks/" else: ckpt_dir = f"checkpoints/MNIST/frequentist/splitted/{num_tasks}-tasks/" if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir, exist_ok=True) loaders, datasets = mix_utils.get_splitmnist_dataloaders(num_tasks, return_datasets=True) models = mix_utils.get_splitmnist_models(num_tasks, bayesian=bayesian, pretrained=False, net_type=net_type) for task in range(1, num_tasks + 1): print(f"Training task-{task}..") trainset, testset, _, _ = datasets[task-1] train_loader, valid_loader, _ = loaders[task-1] net = models[task-1] net = net.to(device) ckpt_name = ckpt_dir + f"model_{net_type}_{num_tasks}.{task}.pt" criterion = (metrics.ELBO(len(trainset)) if bayesian else nn.CrossEntropyLoss()).to(device) optimizer = Adam(net.parameters(), lr=lr_start) valid_loss_max = np.Inf for epoch in range(n_epochs): # loop over the dataset multiple times utils.adjust_learning_rate(optimizer, metrics.lr_linear(epoch, 0, n_epochs, lr_start)) if bayesian: train_loss, train_acc, train_kl = train_bayesian(net, optimizer, criterion, train_loader, num_ens=train_ens) valid_loss, valid_acc = validate_bayesian(net, criterion, valid_loader, num_ens=valid_ens) print('Epoch: {} \tTraining Loss: {:.4f} \tTraining Accuracy: {:.4f} \tValidation Loss: {:.4f} \tValidation Accuracy: {:.4f} \ttrain_kl_div: {:.4f}'.format( epoch, train_loss, train_acc, valid_loss, valid_acc, train_kl)) else: train_loss, train_acc = train_frequentist(net, optimizer, criterion, train_loader) valid_loss, valid_acc = validate_frequentist(net, criterion, valid_loader) print('Epoch: {} \tTraining Loss: {:.4f} \tTraining Accuracy: {:.4f} \tValidation Loss: {:.4f} \tValidation Accuracy: {:.4f}'.format( epoch, train_loss, train_acc, valid_loss, valid_acc)) # save model if validation accuracy has increased if valid_loss <= valid_loss_max: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_max, valid_loss)) torch.save(net.state_dict(), ckpt_name) valid_loss_max = valid_loss print(f"Done training task-{task}")
Example #3
Source File: main.py From Deep-Expander-Networks with GNU General Public License v3.0 | 4 votes |
def main(): global opt, best_prec1 opt = parser.parse_args() opt.logdir = opt.logdir+'/'+opt.name logger = None#Logger(opt.logdir) opt.lr = opt.maxlr print(opt) best_prec1 = 0 cudnn.benchmark = True model = init_model.load_model(opt) if opt.model_def.startswith('alexnet') or opt.model_def.startswith('vgg'): model.features = torch.nn.DataParallel(model.features) model.cuda() elif opt.ngpus > 1: model = torch.nn.DataParallel(model).cuda() print(model) model, criterion, optimizer = init_model.setup(model,opt) trainer = train.Trainer(model, criterion, optimizer, opt, logger) validator = train.Validator(model, criterion, opt, logger) if opt.resume: if os.path.isfile(opt.resume): model, optimizer, opt, best_acc = init_model.resumer(opt, model, optimizer) else: print("=> no checkpoint found at '{}'".format(opt.resume)) dataloader = init_data.load_data(opt) train_loader = dataloader.train_loader #print(utils.get_mean_and_std(train_loader)) val_loader = dataloader.val_loader for epoch in range(opt.start_epoch, opt.epochs): utils.adjust_learning_rate(opt, optimizer, epoch) print("Starting epoch number:",epoch,"Learning rate:", opt.lr) if opt.testOnly == False: trainer.train(train_loader, epoch, opt) if opt.tensorboard: logger.scalar_summary('learning_rate', opt.lr, epoch) prec1 = validator.validate(val_loader, epoch, opt) best_prec1 = max(prec1, best_prec1) init_model.save_checkpoint(opt, model, optimizer, best_prec1, epoch) print('Best Prec@1: [{0:.3f}]\t'.format(best_prec1))