Python torch.optim.lr_scheduler.LambdaLR() Examples

The following are 30 code examples of torch.optim.lr_scheduler.LambdaLR(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.optim.lr_scheduler , or try the search function .
Example #1
Source File: networks.py    From deepsaber with GNU General Public License v3.0 8 votes vote down vote up
def get_scheduler(optimizer, opt):
    if opt.lr_policy == 'lambda':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.nepoch) / float(opt.nepoch_decay + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    elif opt.lr_policy == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.nepoch, eta_min=0)
    elif opt.lr_policy == 'cyclic':
        scheduler = CyclicLR(optimizer, base_lr=opt.learning_rate / 10, max_lr=opt.learning_rate,
                             step_size=opt.nepoch_decay, mode='triangular2')
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler


# learning rate schedules 
Example #2
Source File: base_net.py    From One_Shot_Face_Reenactment with MIT License 7 votes vote down vote up
def get_scheduler(optimizer, opt):
    if opt.lr_policy == 'lambda':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch-
                             opt.niter) / float(opt.niter_decay + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(
            optimizer, step_size=opt.lr_decay_iters, gamma=0.5)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(
            optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    elif opt.lr_policy == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(
            optimizer, T_max=opt.niter, eta_min=0)
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler 
Example #3
Source File: main.py    From Tricks-of-Semi-supervisedDeepLeanring-Pytorch with MIT License 7 votes vote down vote up
def create_lr_scheduler(optimizer, config):
    if config.lr_scheduler == 'cos':
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer,
                                                   T_max=config.epochs,
                                                   eta_min=config.min_lr)
    elif config.lr_scheduler == 'multistep':
        if config.steps is None: return None
        if isinstance(config.steps, int): config.steps = [config.steps]
        scheduler = lr_scheduler.MultiStepLR(optimizer,
                                             milestones=config.steps,
                                             gamma=config.gamma)
    elif config.lr_scheduler == 'exp-warmup':
        lr_lambda = exp_warmup(config.rampup_length,
                               config.rampdown_length,
                               config.epochs)
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
    elif config.lr_scheduler == 'none':
        scheduler = None
    else:
        raise ValueError("No such scheduler: {}".format(config.lr_scheduler))
    return scheduler 
Example #4
Source File: optimization.py    From exbert with Apache License 2.0 6 votes vote down vote up
def get_cosine_with_hard_restarts_schedule_with_warmup(
    optimizer, num_warmup_steps, num_training_steps, num_cycles=1.0, last_epoch=-1
):
    """ Create a schedule with a learning rate that decreases following the
    values of the cosine function with several hard restarts, after a warmup
    period during which it increases linearly between 0 and 1.
    """

    def lr_lambda(current_step):
        if current_step < num_warmup_steps:
            return float(current_step) / float(max(1, num_warmup_steps))
        progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
        if progress >= 1.0:
            return 0.0
        return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0))))

    return LambdaLR(optimizer, lr_lambda, last_epoch) 
Example #5
Source File: lr_scheduler.py    From XenonPy with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, *, lr_lambda, last_epoch=-1):
        """Sets the learning rate of each parameter group to the initial lr
        times a given function. When last_epoch=-1, sets initial lr as lr.

        Args:
            lr_lambda (function or list): A function which computes a multiplicative
                factor given an integer parameter epoch, or a list of such
                functions, one for each group in optimizer.param_groups.
            last_epoch (int): The index of last epoch. Default: -1.

        Example:
            >>> # Assuming optimizer has two groups.
            >>> lambda1 = lambda epoch: epoch // 30
            >>> lambda2 = lambda epoch: 0.95 ** epoch
            >>> scheduler = LambdaLR(lr_lambda=[lambda1, lambda2])
            >>> scheduler(optimizer)
            >>> for epoch in range(100):
            >>>     train(...)
            >>>     validate(...)
            >>>     scheduler.step(),
        """
        super().__init__(lr_scheduler.LambdaLR, lr_lambda=lr_lambda, last_epoch=last_epoch) 
Example #6
Source File: networks_pono.py    From PONO with MIT License 6 votes vote down vote up
def get_scheduler(optimizer, opt):
    """Return a learning rate scheduler
    Parameters:
        optimizer          -- the optimizer of the network
        opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions. 
                              opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
    For 'linear', we keep the same learning rate for the first <opt.niter> epochs
    and linearly decay the rate to zero over the next <opt.niter_decay> epochs.
    For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
    See https://pytorch.org/docs/stable/optim.html for more details.
    """
    if opt.lr_policy == 'linear':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    elif opt.lr_policy == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler 
Example #7
Source File: train_rcnn.py    From PointRCNN with MIT License 6 votes vote down vote up
def create_scheduler(optimizer, total_steps, last_epoch):
    def lr_lbmd(cur_epoch):
        cur_decay = 1
        for decay_step in cfg.TRAIN.DECAY_STEP_LIST:
            if cur_epoch >= decay_step:
                cur_decay = cur_decay * cfg.TRAIN.LR_DECAY
        return max(cur_decay, cfg.TRAIN.LR_CLIP / cfg.TRAIN.LR)

    def bnm_lmbd(cur_epoch):
        cur_decay = 1
        for decay_step in cfg.TRAIN.BN_DECAY_STEP_LIST:
            if cur_epoch >= decay_step:
                cur_decay = cur_decay * cfg.TRAIN.BN_DECAY
        return max(cfg.TRAIN.BN_MOMENTUM * cur_decay, cfg.TRAIN.BNM_CLIP)

    if cfg.TRAIN.OPTIMIZER == 'adam_onecycle':
        lr_scheduler = lsf.OneCycle(
            optimizer, total_steps, cfg.TRAIN.LR, list(cfg.TRAIN.MOMS), cfg.TRAIN.DIV_FACTOR, cfg.TRAIN.PCT_START
        )
    else:
        lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd, last_epoch=last_epoch)

    bnm_scheduler = train_utils.BNMomentumScheduler(model, bnm_lmbd, last_epoch=last_epoch)
    return lr_scheduler, bnm_scheduler 
Example #8
Source File: utility.py    From AWSRN with MIT License 6 votes vote down vote up
def make_scheduler(args, my_optimizer):
    if args.decay_type == 'step':
        scheduler = lrs.StepLR(
            my_optimizer,
            step_size=args.lr_decay,
            gamma=args.gamma
        )
    if args.decay_type.find('step') >= 0:
        milestones = args.decay_type.split('_')
        milestones.pop(0)
        milestones = list(map(lambda x: int(x), milestones))
        print(milestones)
        scheduler = lrs.MultiStepLR(
            my_optimizer,
            milestones=milestones,
            gamma=args.gamma
        )
        
    if args.decay_type == 'restart':
        scheduler = lrs.LambdaLR(my_optimizer, lambda epoch: multistep_restart(args.period, epoch))

    return scheduler 
Example #9
Source File: lr_schedulers.py    From srgan with MIT License 6 votes vote down vote up
def get_lr_scheduler(optimizer_conf, scheduler_name, optimizer, initial_epoch=-1):
  if scheduler_name == 'multistep':
    return lr_scheduler.MultiStepLR(optimizer,
                                    optimizer_conf.decay_steps,
                                    optimizer_conf.decay_factor,
                                    initial_epoch)
  elif scheduler_name == 'linear' or scheduler_name == 'polynomial':
    power = 1.0 if scheduler_name == 'linear' else optimizer_conf.decay_power
    lr_lambda = _get_polynomial_decay(optimizer_conf.learning_rate,
                                      optimizer_conf.end_learning_rate,
                                      optimizer_conf.decay_steps,
                                      optimizer_conf.get_attr('start_decay',
                                                              default=0),
                                      power)
    return lr_scheduler.LambdaLR(optimizer, lr_lambda, initial_epoch)
  else:
    raise ValueError('Unknown learning rate scheduler {}'.format(scheduler_name)) 
Example #10
Source File: utils.py    From sparse-sharing with MIT License 6 votes vote down vote up
def get_scheduler(args, optimizer):
    name, args = parse_dict_args(args.scheduler)
    if name == "fix":
        return None
    elif name == "inverse_sqrt":
        warmup = args.get('warmup', 4000)
        lr = get_lr(optimizer)
        lr_step = lr / warmup
        decay = lr * warmup ** 0.5

        def warm_decay(n):
            if n < warmup:
                return lr_step * n
            return decay * n ** -0.5

        return LambdaLR(optimizer, warm_decay)
    elif name == 'decay':
        return LambdaLR(optimizer, lambda ep: 1 / (1 + 0.05 * ep)) 
Example #11
Source File: inference_network.py    From pyprob with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def _create_lr_scheduler(self, state_dict=None):
        if self._learning_rate_scheduler_type is None:  # happens when loading pre-generated network
            return
        # print('Creating new learning rate scheduler')
        learning_rate_scheduler_type = self._learning_rate_scheduler_type
        iter_end = self._total_train_traces_end
        lr_init = self._learning_rate_init
        lr_end = self._learning_rate_end

        def _poly_decay(iter, power):
            return (lr_init - lr_end) * ((1 - iter/iter_end) ** power) + lr_end

        if self._optimizer is None:
            self._learning_rate_scheduler = None
        elif learning_rate_scheduler_type == LearningRateScheduler.POLY1:
            self._learning_rate_scheduler = lr_scheduler.LambdaLR(self._optimizer, lr_lambda=lambda iter: _poly_decay(iter, power=1.) / lr_init)
        elif learning_rate_scheduler_type == LearningRateScheduler.POLY2:
            self._learning_rate_scheduler = lr_scheduler.LambdaLR(self._optimizer, lr_lambda=lambda iter: _poly_decay(iter, power=2.) / lr_init)
        else:
            self._learning_rate_scheduler = None
        if self._learning_rate_scheduler is not None and state_dict is not None:
            # print('Setting learning rate scheduler state')
            self._learning_rate_scheduler.load_state_dict(state_dict) 
Example #12
Source File: train_and_eval.py    From Pointnet2.PyTorch with MIT License 6 votes vote down vote up
def train_and_eval(model, train_loader, eval_loader, tb_log, ckpt_dir, log_f):
    model.cuda()
    optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)

    def lr_lbmd(cur_epoch):
        cur_decay = 1
        for decay_step in args.decay_step_list:
            if cur_epoch >= decay_step:
                cur_decay = cur_decay * args.lr_decay
        return max(cur_decay, args.lr_clip / args.lr)

    lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd)

    total_it = 0
    for epoch in range(1, args.epochs + 1):
        lr_scheduler.step(epoch)
        total_it = train_one_epoch(model, train_loader, optimizer, epoch, lr_scheduler, total_it, tb_log, log_f)

        if epoch % args.ckpt_save_interval == 0:
            with torch.no_grad():
                avg_iou = eval_one_epoch(model, eval_loader, epoch, tb_log, log_f)
                ckpt_name = os.path.join(ckpt_dir, 'checkpoint_epoch_%d' % epoch)
                save_checkpoint(model, epoch, ckpt_name) 
Example #13
Source File: train_utils.py    From segmentation-networks-benchmark with MIT License 5 votes vote down vote up
def find_optimal_lr(model: nn.Module, criterion, optimizer: Optimizer, dataloader):
    min_lr = 1e-8
    lrs = []
    lr = min_lr
    for i in range(30):
        lrs.append(lr)
        lr *= 2.

    lrs = np.array(lrs, dtype=np.float32)
    print(lrs)

    loss = np.zeros_like(lrs)

    scheduler = LambdaLR(optimizer, lr_lambda=lambda x: lrs[x])

    with torch.set_grad_enabled(True):
        model.train()
        dataiter = iter(dataloader)
        for i, lr in enumerate(tqdm(lrs, total=len(lrs))):
            scheduler.step()
            x, y = next(dataiter)
            x, y = x.cuda(non_blocking=True), y.cuda(non_blocking=True)

            y_pred = model(x)
            batch_loss = criterion(y_pred, y)

            batch_size = x.size(0)
            (batch_size * batch_loss).backward()

            optimizer.step()

            loss[i] = batch_loss.cpu().item()

    return lrs, loss 
Example #14
Source File: unoMT_pytorch_model.py    From Benchmarks with MIT License 5 votes vote down vote up
def config_optimization(self):
        
        args = self.args
  

        # Optimizers, learning rate decay, and miscellaneous ######################
        self.update_l2regularizer(args.l2_regularization)
        
        
        self.resp_lr_decay = LambdaLR(optimizer=self.resp_opt,
                             lr_lambda=lambda e:
                             args.lr_decay_factor ** e)
        self.cl_clf_lr_decay = LambdaLR(optimizer=self.cl_clf_opt,
                               lr_lambda=lambda e:
                               args.lr_decay_factor ** e)
        self.drug_target_lr_decay = LambdaLR(optimizer=self.drug_target_opt,
                                    lr_lambda=lambda e:
                                    args.lr_decay_factor ** e)
        self.drug_qed_lr_decay = LambdaLR(optimizer=self.drug_qed_opt,
                                 lr_lambda=lambda e:
                                 args.lr_decay_factor ** e)

        self.resp_loss_func = F.l1_loss if args.resp_loss_func == 'l1' \
            else F.mse_loss
        self.drug_qed_loss_func = F.l1_loss if args.drug_qed_loss_func == 'l1' \
            else F.mse_loss 
Example #15
Source File: PPO.py    From bezos with MIT License 5 votes vote down vote up
def __init__(self,
                 actor_critic,
                 lambdalr,
                 clip_param=None,
                 ppo_epoch=None,
                 num_mini_batch=None,
                 value_loss_coef=None,
                 entropy_coef=None,
                 lr=None,
                 eps=None,
                 max_grad_norm=None,
                 use_clipped_value_loss=False):

        self.actor_critic = actor_critic

        self.clip_param = clip_param
        self.ppo_epoch = ppo_epoch
        self.num_mini_batch = num_mini_batch

        self.value_loss_coef = value_loss_coef
        self.entropy_coef = entropy_coef

        self.max_grad_norm = max_grad_norm
        self.use_clipped_value_loss = use_clipped_value_loss

        self.optimizer = optim.Adam(actor_critic.parameters(), lr=lr, eps=eps)
        if lambdalr is not None:
            self.scheduler = LambdaLR(self.optimizer, lr_lambda=[lambdalr]) 
Example #16
Source File: lr_scheduler.py    From albert_pytorch with Apache License 2.0 5 votes vote down vote up
def get_cosine_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_cycles=.5, last_epoch=-1):
    """ Create a schedule with a learning rate that decreases following the
    values of the cosine function between 0 and `pi * cycles` after a warmup
    period during which it increases linearly between 0 and 1.
    """
    def lr_lambda(current_step):
        if current_step < num_warmup_steps:
            return float(current_step) / float(max(1, num_warmup_steps))
        progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
        return max(0., 0.5 * (1. + math.cos(math.pi * float(num_cycles) * 2. * progress)))

    return LambdaLR(optimizer, lr_lambda, last_epoch) 
Example #17
Source File: lr_scheduler.py    From albert_pytorch with Apache License 2.0 5 votes vote down vote up
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
    """ Create a schedule with a learning rate that decreases linearly after
    linearly increasing during a warmup period.
    """
    def lr_lambda(current_step):
        if current_step < num_warmup_steps:
            return float(current_step) / float(max(1, num_warmup_steps))
        return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)))

    return LambdaLR(optimizer, lr_lambda, last_epoch) 
Example #18
Source File: lr_scheduler.py    From albert_pytorch with Apache License 2.0 5 votes vote down vote up
def get_constant_schedule_with_warmup(optimizer, num_warmup_steps, last_epoch=-1):
    """ Create a schedule with a constant learning rate preceded by a warmup
    period during which the learning rate increases linearly between 0 and 1.
    """
    def lr_lambda(current_step):
        if current_step < num_warmup_steps:
            return float(current_step) / float(max(1.0, num_warmup_steps))
        return 1.

    return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) 
Example #19
Source File: lr_scheduler.py    From albert_pytorch with Apache License 2.0 5 votes vote down vote up
def get_constant_schedule(optimizer, last_epoch=-1):
    """ Create a schedule with a constant learning rate.
    """
    return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch) 
Example #20
Source File: pix2pix_model.py    From DeepMosaics with GNU General Public License v3.0 5 votes vote down vote up
def get_scheduler(optimizer, opt):
    """Return a learning rate scheduler

    Parameters:
        optimizer          -- the optimizer of the network
        opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions. 
                              opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine

    For 'linear', we keep the same learning rate for the first <opt.niter> epochs
    and linearly decay the rate to zero over the next <opt.niter_decay> epochs.
    For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
    See https://pytorch.org/docs/stable/optim.html for more details.
    """
    if opt.lr_policy == 'linear':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    elif opt.lr_policy == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler 
Example #21
Source File: lr_schedulers.py    From Bert-Multi-Label-Text-Classification with MIT License 5 votes vote down vote up
def get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_cycles=1., last_epoch=-1):
    """ Create a schedule with a learning rate that decreases following the
    values of the cosine function with several hard restarts, after a warmup
    period during which it increases linearly between 0 and 1.
    """
    def lr_lambda(current_step):
        if current_step < num_warmup_steps:
            return float(current_step) / float(max(1, num_warmup_steps))
        progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
        if progress >= 1.:
            return 0.
        return max(0., 0.5 * (1. + math.cos(math.pi * ((float(num_cycles) * progress) % 1.))))

    return LambdaLR(optimizer, lr_lambda, last_epoch) 
Example #22
Source File: lr_schedulers.py    From Bert-Multi-Label-Text-Classification with MIT License 5 votes vote down vote up
def get_cosine_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_cycles=.5, last_epoch=-1):
    """ Create a schedule with a learning rate that decreases following the
    values of the cosine function between 0 and `pi * cycles` after a warmup
    period during which it increases linearly between 0 and 1.
    """
    def lr_lambda(current_step):
        if current_step < num_warmup_steps:
            return float(current_step) / float(max(1, num_warmup_steps))
        progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
        return max(0., 0.5 * (1. + math.cos(math.pi * float(num_cycles) * 2. * progress)))

    return LambdaLR(optimizer, lr_lambda, last_epoch) 
Example #23
Source File: lr_schedulers.py    From Bert-Multi-Label-Text-Classification with MIT License 5 votes vote down vote up
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
    """ Create a schedule with a learning rate that decreases linearly after
    linearly increasing during a warmup period.
    """
    def lr_lambda(current_step):
        if current_step < num_warmup_steps:
            return float(current_step) / float(max(1, num_warmup_steps))
        return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)))

    return LambdaLR(optimizer, lr_lambda, last_epoch) 
Example #24
Source File: lr_schedulers.py    From Bert-Multi-Label-Text-Classification with MIT License 5 votes vote down vote up
def get_constant_schedule_with_warmup(optimizer, num_warmup_steps, last_epoch=-1):
    """ Create a schedule with a constant learning rate preceded by a warmup
    period during which the learning rate increases linearly between 0 and 1.
    """
    def lr_lambda(current_step):
        if current_step < num_warmup_steps:
            return float(current_step) / float(max(1.0, num_warmup_steps))
        return 1.

    return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) 
Example #25
Source File: lr_schedulers.py    From Bert-Multi-Label-Text-Classification with MIT License 5 votes vote down vote up
def get_constant_schedule(optimizer, last_epoch=-1):
    """ Create a schedule with a constant learning rate.
    """
    return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch) 
Example #26
Source File: learning_rate.py    From attention-cnn with Apache License 2.0 5 votes vote down vote up
def linear_warmup_cosine_lr_scheduler(optimizer, warmup_time_ratio, T_max):
    T_warmup = int(T_max * warmup_time_ratio)

    def lr_lambda(epoch):
        # linear warm up
        if epoch < T_warmup:
            return epoch / T_warmup
        else:
            progress_0_1 = (epoch - T_warmup) / (T_max - T_warmup)
            cosine_decay = 0.5 * (1 + math.cos(math.pi * progress_0_1))
            return cosine_decay

    return LambdaLR(optimizer, lr_lambda=lr_lambda) 
Example #27
Source File: my_optim.py    From ACoL with MIT License 5 votes vote down vote up
def get_optimizer(args, model):
    lr = args.lr
    # opt = optim.SGD(params=model.parameters(), lr=lr, momentum=0.9, weight_decay=0.0001)
    opt = optim.SGD(params=[para for name, para in model.named_parameters() if 'features' not in name], lr=lr, momentum=0.9, weight_decay=0.0001)
    # lambda1 = lambda epoch: 0.1 if epoch in [85, 125, 165] else 1.0
    # scheduler = LambdaLR(opt, lr_lambda=lambda1)

    return opt 
Example #28
Source File: lr_schedulers.py    From Auto-PyTorch with Apache License 2.0 5 votes vote down vote up
def _get_scheduler(self, optimizer, config):
        maf = config['max_factor']
        mif = config['min_factor']
        cl = config['cycle_length']
        r = maf - mif
        def l(epoch):
            if int(epoch//cl) % 2 == 1:
                lr = mif + (r * (float(epoch % cl)/float(cl)))
            else:
                lr = maf - (r * (float(epoch % cl)/float(cl)))
            return lr
            
        return lr_scheduler.LambdaLR(optimizer=optimizer, lr_lambda=l, last_epoch=-1) 
Example #29
Source File: networks.py    From angularGAN with MIT License 5 votes vote down vote up
def get_scheduler(optimizer, opt):
    if opt.lr_policy == 'lambda':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler 
Example #30
Source File: ladder.py    From vel with MIT License 5 votes vote down vote up
def __init__(self, optimizer, ladder, last_epoch):
        self.schedule_limits = np.cumsum([x[0] for x in ladder])
        self.schedule_numbers = np.array([float(x[1]) for x in ladder])
        self.scheduler = scheduler.LambdaLR(optimizer, self.lambda_fn, last_epoch=last_epoch)