Python torch.optim.lr_scheduler.StepLR() Examples
The following are 30
code examples of torch.optim.lr_scheduler.StepLR().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.optim.lr_scheduler
, or try the search function
.
Example #1
Source File: networks.py From deepsaber with GNU General Public License v3.0 | 8 votes |
def get_scheduler(optimizer, opt): if opt.lr_policy == 'lambda': def lambda_rule(epoch): lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.nepoch) / float(opt.nepoch_decay + 1) return lr_l scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) elif opt.lr_policy == 'step': scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1) elif opt.lr_policy == 'plateau': scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) elif opt.lr_policy == 'cosine': scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.nepoch, eta_min=0) elif opt.lr_policy == 'cyclic': scheduler = CyclicLR(optimizer, base_lr=opt.learning_rate / 10, max_lr=opt.learning_rate, step_size=opt.nepoch_decay, mode='triangular2') else: return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) return scheduler # learning rate schedules
Example #2
Source File: imagenet_classif.py From DTC with MIT License | 8 votes |
def train(model, train_loader, eva_loader, args): optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) criterion=nn.CrossEntropyLoss().cuda(device) scheduler = lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) for epoch in range(args.epochs): scheduler.step() loss_record = AverageMeter() acc_record = AverageMeter() model.train() for batch_idx, (x, label, _) in enumerate(tqdm(train_loader)): x, label = x.to(device), label.to(device) output = model(x) loss = criterion(output, label) acc = accuracy(output, label) acc_record.update(acc[0].item(), x.size(0)) loss_record.update(loss.item(), x.size(0)) optimizer.zero_grad() loss.backward() optimizer.step() print('Train Epoch: {} Avg Loss: {:.4f} \t Avg Acc: {:.4f}'.format(epoch, loss_record.avg, acc_record.avg)) test(model, eva_loader, args) torch.save(model.state_dict(), args.model_dir) print("model saved to {}.".format(args.model_dir))
Example #3
Source File: base_net.py From One_Shot_Face_Reenactment with MIT License | 7 votes |
def get_scheduler(optimizer, opt): if opt.lr_policy == 'lambda': def lambda_rule(epoch): lr_l = 1.0 - max(0, epoch- opt.niter) / float(opt.niter_decay + 1) return lr_l scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) elif opt.lr_policy == 'step': scheduler = lr_scheduler.StepLR( optimizer, step_size=opt.lr_decay_iters, gamma=0.5) elif opt.lr_policy == 'plateau': scheduler = lr_scheduler.ReduceLROnPlateau( optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) elif opt.lr_policy == 'cosine': scheduler = lr_scheduler.CosineAnnealingLR( optimizer, T_max=opt.niter, eta_min=0) else: return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) return scheduler
Example #4
Source File: utility.py From MSRN-PyTorch with MIT License | 6 votes |
def make_scheduler(args, my_optimizer): if args.decay_type == 'step': scheduler = lrs.StepLR( my_optimizer, step_size=args.lr_decay, gamma=args.gamma ) elif args.decay_type.find('step') >= 0: milestones = args.decay_type.split('_') milestones.pop(0) milestones = list(map(lambda x: int(x), milestones)) scheduler = lrs.MultiStepLR( my_optimizer, milestones=milestones, gamma=args.gamma ) return scheduler
Example #5
Source File: ssds_train.py From ssds.pytorch with MIT License | 6 votes |
def configure_lr_scheduler(self, optimizer, cfg): if cfg.SCHEDULER == 'step': scheduler = lr_scheduler.StepLR(optimizer, step_size=cfg.STEPS[0], gamma=cfg.GAMMA) elif cfg.SCHEDULER == 'multi_step': scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=cfg.STEPS, gamma=cfg.GAMMA) elif cfg.SCHEDULER == 'exponential': scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=cfg.GAMMA) elif cfg.SCHEDULER == 'SGDR': scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=cfg.MAX_EPOCHS) else: AssertionError('scheduler can not be recognized.') return scheduler
Example #6
Source File: training.py From freesound-classification with Apache License 2.0 | 6 votes |
def make_scheduler(params, max_steps): name, *args = params.split("_") if name == "steplr": step_size, gamma = args step_size = int(step_size) gamma = float(gamma) return partial(StepLR, step_size=step_size, gamma=gamma) elif name == "1cycle": min_lr, max_lr = args min_lr = float(min_lr) max_lr = float(max_lr) return partial( OneCycleScheduler, min_lr=min_lr, max_lr=max_lr, max_steps=max_steps)
Example #7
Source File: train_rels.py From VCTree-Scene-Graph-Generation with MIT License | 6 votes |
def get_optim(lr): # Lower the learning rate on the VGG fully connected layers by 1/10th. It's a hack, but it helps # stabilize the models. fc_params = [p for n,p in detector.named_parameters() if n.startswith('roi_fmap') and p.requires_grad] non_fc_params = [p for n,p in detector.named_parameters() if not n.startswith('roi_fmap') and p.requires_grad] params = [{'params': fc_params, 'lr': lr / 10.0}, {'params': non_fc_params}] # params = [p for n,p in detector.named_parameters() if p.requires_grad] if conf.adam: optimizer = optim.Adadelta(params, weight_decay=conf.l2, lr=lr, eps=1e-3) else: optimizer = optim.SGD(params, weight_decay=conf.l2, lr=lr, momentum=0.9) #scheduler = StepLR(optimizer, step_size=1, gamma=0.5) scheduler = ReduceLROnPlateau(optimizer, 'max', patience=2, factor=0.5, verbose=True, threshold=0.0001, threshold_mode='abs', cooldown=1) return optimizer, scheduler
Example #8
Source File: utility.py From 3D_Appearance_SR with MIT License | 6 votes |
def make_scheduler(args, my_optimizer): if args.decay_type == 'step': scheduler = lrs.StepLR( my_optimizer, step_size=args.lr_decay, gamma=args.gamma ) elif args.decay_type.find('step') >= 0: milestones = args.decay_type.split('_') milestones.pop(0) milestones = list(map(lambda x: int(x), milestones)) scheduler = lrs.MultiStepLR( my_optimizer, milestones=milestones, gamma=args.gamma ) return scheduler
Example #9
Source File: utility.py From AWSRN with MIT License | 6 votes |
def make_scheduler(args, my_optimizer): if args.decay_type == 'step': scheduler = lrs.StepLR( my_optimizer, step_size=args.lr_decay, gamma=args.gamma ) if args.decay_type.find('step') >= 0: milestones = args.decay_type.split('_') milestones.pop(0) milestones = list(map(lambda x: int(x), milestones)) print(milestones) scheduler = lrs.MultiStepLR( my_optimizer, milestones=milestones, gamma=args.gamma ) if args.decay_type == 'restart': scheduler = lrs.LambdaLR(my_optimizer, lambda epoch: multistep_restart(args.period, epoch)) return scheduler
Example #10
Source File: utility.py From NTIRE2019_EDRN with MIT License | 6 votes |
def make_scheduler(args, my_optimizer): if args.decay_type == 'step': scheduler = lrs.StepLR( my_optimizer, step_size=args.lr_decay, gamma=args.gamma ) elif args.decay_type.find('step') >= 0: milestones = args.decay_type.split('_') milestones.pop(0) milestones = list(map(lambda x: int(x), milestones)) scheduler = lrs.MultiStepLR( my_optimizer, milestones=milestones, gamma=args.gamma ) return scheduler
Example #11
Source File: utility.py From 2018_subeesh_epsr_eccvw with MIT License | 6 votes |
def make_scheduler(args, my_optimizer): if args.decay_type == 'step': scheduler = lrs.StepLR( my_optimizer, step_size=args.lr_decay, gamma=args.gamma ) elif args.decay_type.find('step') >= 0: milestones = args.decay_type.split('_') milestones.pop(0) milestones = list(map(lambda x: int(x), milestones)) scheduler = lrs.MultiStepLR( my_optimizer, milestones=milestones, gamma=args.gamma ) return scheduler
Example #12
Source File: test_param_scheduler.py From ignite with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_create_lr_scheduler_with_warmup_with_real_model(dummy_model_factory): model = dummy_model_factory(with_grads=False, with_frozen_layer=False) init_lr = 0.01 optimizer = torch.optim.SGD(model.parameters(), lr=init_lr) scaled_lr = 0.02 warmup_duration = 5 step_size = 2 gamma = 0.97 output_simulated_values = [None] * 50 create_lr_scheduler_with_warmup( torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma), warmup_start_value=0.0, warmup_end_value=scaled_lr, warmup_duration=warmup_duration, output_simulated_values=output_simulated_values, ) assert output_simulated_values[0] == [0, 0.0] assert output_simulated_values[warmup_duration - 1] == [warmup_duration - 1, scaled_lr] assert output_simulated_values[warmup_duration] == [warmup_duration, init_lr] v = [warmup_duration + step_size, init_lr * gamma] assert output_simulated_values[warmup_duration + step_size] == v
Example #13
Source File: teaser.py From ignite with BSD 3-Clause "New" or "Revised" License | 6 votes |
def initialize(config): model = get_model(config["model"]) # Adapt model for distributed settings if configured model = idist.auto_model(model) optimizer = optim.SGD( model.parameters(), lr=config.get("learning_rate", 0.1), momentum=config.get("momentum", 0.9), weight_decay=config.get("weight_decay", 1e-5), nesterov=True, ) optimizer = idist.auto_optim(optimizer) criterion = nn.CrossEntropyLoss().to(idist.device()) le = config["num_iters_per_epoch"] lr_scheduler = StepLR(optimizer, step_size=le, gamma=0.9) return model, optimizer, criterion, lr_scheduler # slide 1 ####################################################################
Example #14
Source File: utility.py From OISR-PyTorch with BSD 2-Clause "Simplified" License | 5 votes |
def make_scheduler(args, my_optimizer): if args.decay_type == 'step': scheduler_function = lrs.StepLR kwargs = {'step_size': args.lr_decay, 'gamma': args.gamma} elif args.decay_type.find('step') >= 0: scheduler_function = lrs.MultiStepLR milestones = list(map(lambda x: int(x), args.decay_type.split('-')[1:])) kwarg = {'milestones': milestones, 'gamma': args.gamma} return scheduler_function(my_optimizer, **kwargs)
Example #15
Source File: scheduler_factory.py From kaggle-humpback with BSD 2-Clause "Simplified" License | 5 votes |
def step(optimizer, last_epoch, step_size=80, gamma=0.1, **_): return lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma, last_epoch=last_epoch)
Example #16
Source File: base_models.py From FCSR-GAN with Apache License 2.0 | 5 votes |
def _get_scheduler(self, optimizer, opt): if opt.lr_policy == 'lambda': def lambda_rule(epoch): lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1) return lr_l scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) elif opt.lr_policy == 'step': scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1) elif opt.lr_policy == 'plateau': scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) else: return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) return scheduler
Example #17
Source File: uniaae.py From BiAAE with MIT License | 5 votes |
def configure_optimizers(self): gen_params = torch.nn.ModuleList([self.enc_x, self.dec_x, self.enc_y, self.dec_y]) discr_params = torch.nn.ModuleList([self.discr_indep, self.discr]) gen_optim = torch.optim.Adam(gen_params.parameters(), lr=3e-4, betas=(0.5, 0.9)) discr_optim = torch.optim.Adam(discr_params.parameters(), lr=3e-4, betas=(0.5, 0.9)) discriminator_sched = StepLR(discr_optim, step_size=5000, gamma=0.5) return [gen_optim, discr_optim], [discriminator_sched]
Example #18
Source File: mnist_example.py From image-feature-learning-pytorch with MIT License | 5 votes |
def main(): train_dataset = datasets.MNIST( '../data', download=True, train=True, transform=transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] )) train_loader = DataLoader( train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers) model = LeNetPP(dim_hidden=args.dim_hidden) if args.cuda: model = model.cuda() nll_loss = nn.NLLLoss() if args.loss == 0: center_loss = CenterLoss(dim_hidden=args.dim_hidden, num_classes=10, lambda_c=args.lambda_c, use_cuda=args.cuda) if args.loss == 1: center_loss = ContrastiveCenterLoss(dim_hidden=args.dim_hidden, num_classes=10, lambda_c=args.lambda_c, use_cuda=args.cuda) if args.cuda: nll_loss, center_loss = nll_loss.cuda(), center_loss.cuda() criterion = [nll_loss, center_loss] optimizer_nn = optim.SGD( model.parameters(), lr=args.lr, momentum=args.momentum) scheduler = lr_scheduler.StepLR(optimizer_nn, step_size=50, gamma=0.2) optimizer_c = optim.SGD(center_loss.parameters(), lr=args.alpha) for epoch in range(args.epochs): scheduler.step() train(train_loader, model, criterion, [optimizer_nn, optimizer_c], epoch + 1)
Example #19
Source File: saae.py From BiAAE with MIT License | 5 votes |
def configure_optimizers(self): gen_params = torch.nn.ModuleList([self.enc_x, self.dec_x, self.enc_y]) discr_params = torch.nn.ModuleList([self.discr]) gen_optim = torch.optim.Adam(gen_params.parameters(), lr=3e-4, betas=(0.5, 0.9)) discr_optim = torch.optim.Adam(discr_params.parameters(), lr=3e-4, betas=(0.5, 0.9)) discriminator_sched = StepLR(discr_optim, step_size=5000, gamma=0.5) return [gen_optim, discr_optim], [discriminator_sched]
Example #20
Source File: utility.py From OISR-PyTorch with BSD 2-Clause "Simplified" License | 5 votes |
def make_scheduler(args, my_optimizer): if args.decay_type == 'step': scheduler_function = lrs.StepLR kwargs = {'step_size': args.lr_decay, 'gamma': args.gamma} elif args.decay_type.find('step') >= 0: scheduler_function = lrs.MultiStepLR milestones = list(map(lambda x: int(x), args.decay_type.split('-')[1:])) kwarg = {'milestones': milestones, 'gamma': args.gamma} return scheduler_function(my_optimizer, **kwargs)
Example #21
Source File: pcb_trainer.py From PAST-ReID with MIT License | 5 votes |
def create_lr_scheduler_promoting(self, num_train_loader): if self.args.phase == 'normal': self.args.lr_decay_steps_iters = [num_train_loader * ep for ep in self.args.lr_decay_epochs] self.lr_scheduler_promoting = MultiStepLR(self.optimizer_promoting, self.args.lr_decay_steps_iters) else: self.lr_scheduler_promoting = StepLR(self.optimizer_promoting, 10) return self.lr_scheduler_promoting
Example #22
Source File: pcb_trainer.py From PAST-ReID with MIT License | 5 votes |
def create_lr_scheduler(self): if self.args.phase == 'normal': self.args.lr_decay_steps = [self.args.num_train_loader * it * self.args.epochs for it in self.args.lr_decay_iters] self.lr_scheduler = MultiStepLR(self.optimizer, self.args.lr_decay_steps) else: self.lr_scheduler = StepLR(self.optimizer, 10) return self.lr_scheduler
Example #23
Source File: pix2pix_model.py From DeepMosaics with GNU General Public License v3.0 | 5 votes |
def get_scheduler(optimizer, opt): """Return a learning rate scheduler Parameters: optimizer -- the optimizer of the network opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions. opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine For 'linear', we keep the same learning rate for the first <opt.niter> epochs and linearly decay the rate to zero over the next <opt.niter_decay> epochs. For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers. See https://pytorch.org/docs/stable/optim.html for more details. """ if opt.lr_policy == 'linear': def lambda_rule(epoch): lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1) return lr_l scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) elif opt.lr_policy == 'step': scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1) elif opt.lr_policy == 'plateau': scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) elif opt.lr_policy == 'cosine': scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0) else: return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) return scheduler
Example #24
Source File: solvers.py From SpatioTemporalSegmentation with MIT License | 5 votes |
def initialize_scheduler(optimizer, config, last_step=-1): if config.scheduler == 'StepLR': return StepLR( optimizer, step_size=config.step_size, gamma=config.step_gamma, last_epoch=last_step) elif config.scheduler == 'PolyLR': return PolyLR(optimizer, max_iter=config.max_iter, power=config.poly_power, last_step=last_step) elif config.scheduler == 'SquaredLR': return SquaredLR(optimizer, max_iter=config.max_iter, last_step=last_step) elif config.scheduler == 'ExpLR': return ExpLR( optimizer, step_size=config.exp_step_size, gamma=config.exp_gamma, last_step=last_step) else: logging.error('Scheduler not supported')
Example #25
Source File: auto_pruners_torch.py From nni with MIT License | 5 votes |
def get_trained_model(args, device, train_loader, val_loader, criterion): if args.model == 'LeNet': model = LeNet().to(device) optimizer = torch.optim.Adadelta(model.parameters(), lr=1) scheduler = StepLR(optimizer, step_size=1, gamma=0.7) for epoch in range(args.pretrain_epochs): train(args, model, device, train_loader, criterion, optimizer, epoch) scheduler.step() elif args.model == 'vgg16': model = VGG(depth=16).to(device) optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) scheduler = MultiStepLR( optimizer, milestones=[int(args.pretrain_epochs*0.5), int(args.pretrain_epochs*0.75)], gamma=0.1) for epoch in range(args.pretrain_epochs): train(args, model, device, train_loader, criterion, optimizer, epoch) scheduler.step() elif args.model == 'resnet18': model = models.resnet18(pretrained=False, num_classes=10).to(device) optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) scheduler = MultiStepLR( optimizer, milestones=[int(args.pretrain_epochs*0.5), int(args.pretrain_epochs*0.75)], gamma=0.1) for epoch in range(args.pretrain_epochs): train(args, model, device, train_loader, criterion, optimizer, epoch) scheduler.step() elif args.model == 'mobilenet_v2': model = models.mobilenet_v2(pretrained=True).to(device) if args.save_model: torch.save(model.state_dict(), os.path.join( args.experiment_data_dir, 'model_trained.pth')) print('Model trained saved to %s', args.experiment_data_dir) return model, optimizer
Example #26
Source File: sklearn_api.py From pt-sdae with MIT License | 5 votes |
def __init__( self, dimensions: List[int], cuda: Optional[bool] = None, batch_size: int = 256, pretrain_epochs: int = 200, finetune_epochs: int = 500, corruption: Optional[float] = 0.2, optimiser_pretrain: Callable[ [torch.nn.Module], torch.optim.Optimizer ] = lambda x: SGD(x.parameters(), lr=0.1, momentum=0.9), optimiser_train: Callable[ [torch.nn.Module], torch.optim.Optimizer ] = lambda x: SGD(x.parameters(), lr=0.1, momentum=0.9), scheduler: Optional[Callable[[torch.optim.Optimizer], Any]] = lambda x: StepLR( x, 100, gamma=0.1 ), final_activation: Optional[torch.nn.Module] = None, ) -> None: self.cuda = torch.cuda.is_available() if cuda is None else cuda self.batch_size = batch_size self.dimensions = dimensions self.pretrain_epochs = pretrain_epochs self.finetune_epochs = finetune_epochs self.optimiser_pretrain = optimiser_pretrain self.optimiser_train = optimiser_train self.scheduler = scheduler self.corruption = corruption self.autoencoder = None self.final_activation = final_activation
Example #27
Source File: networks.py From MADAN with MIT License | 5 votes |
def get_scheduler(optimizer, opt): if opt.lr_policy == 'lambda': def lambda_rule(epoch): lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1) return lr_l scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) elif opt.lr_policy == 'step': scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1) elif opt.lr_policy == 'plateau': scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) else: return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) return scheduler
Example #28
Source File: utility.py From OISR-PyTorch with BSD 2-Clause "Simplified" License | 5 votes |
def make_scheduler(args, my_optimizer): if args.decay_type == 'step': scheduler_function = lrs.StepLR kwargs = {'step_size': args.lr_decay, 'gamma': args.gamma} elif args.decay_type.find('step') >= 0: scheduler_function = lrs.MultiStepLR milestones = list(map(lambda x: int(x), args.decay_type.split('-')[1:])) kwarg = {'milestones': milestones, 'gamma': args.gamma} return scheduler_function(my_optimizer, **kwargs)
Example #29
Source File: ranker.py From pt-ranking.github.io with MIT License | 5 votes |
def config_optimizer(self): if 'Adam' == self.opt: self.optimizer = optim.Adam(self.sf.parameters(), lr=self.lr, weight_decay=self.weight_decay) # use regularization elif 'RMS' == self.opt: self.optimizer = optim.RMSprop(self.sf.parameters(), lr=self.lr, weight_decay=self.weight_decay) # use regularization else: raise NotImplementedError self.scheduler = StepLR(self.optimizer, step_size=20, gamma=0.5)
Example #30
Source File: lr_scheduler.py From homura with Apache License 2.0 | 5 votes |
def StepLR(step_size, gamma=0.1, last_epoch=-1): return partial(_lr_scheduler.StepLR, **locals())