Python torch.optim.lr_scheduler.CyclicLR() Examples
The following are 11
code examples of torch.optim.lr_scheduler.CyclicLR().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.optim.lr_scheduler
, or try the search function
.
Example #1
Source File: test_lr_scheduler.py From skorch with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_lr_scheduler_record_batch_step(self, classifier_module, classifier_data): X, y = classifier_data batch_size = 128 scheduler = LRScheduler( TorchCyclicLR, base_lr=1, max_lr=5, step_size_up=4, step_every='batch' ) net = NeuralNetClassifier( classifier_module, max_epochs=1, lr=123., batch_size=batch_size, callbacks=[('scheduler', scheduler)] ) net.fit(X, y) new_lrs = scheduler.simulate( net.history[-1, 'train_batch_count'], initial_lr=123., ) assert np.all(net.history[-1, 'batches', :, 'event_lr'] == new_lrs)
Example #2
Source File: test_lr_scheduler.py From skorch with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_cyclic_lr_with_epoch_step_warning(self, classifier_module, classifier_data): msg = ("The LRScheduler now makes a step every epoch by default. " "To have the cyclic lr scheduler update " "every batch set step_every='batch'") with pytest.warns(FutureWarning, match=msg) as record: scheduler = LRScheduler( TorchCyclicLR, base_lr=123, max_lr=999) net = NeuralNetClassifier( classifier_module, max_epochs=0, callbacks=[('scheduler', scheduler)], ) net.initialize() assert len(record) == 1
Example #3
Source File: run.py From MobileNetV3-pytorch with MIT License | 5 votes |
def find_bounds_clr(model, loader, optimizer, criterion, device, dtype, min_lr=8e-6, max_lr=8e-5, step_size=2000, mode='triangular', save_path='.'): model.train() correct1, correct5 = 0, 0 scheduler = CyclicLR(optimizer, base_lr=min_lr, max_lr=max_lr, step_size_up=step_size, mode=mode) epoch_count = step_size // len(loader) # Assuming step_size is multiple of batch per epoch accuracy = [] for _ in trange(epoch_count): for batch_idx, (data, target) in enumerate(tqdm(loader)): if scheduler is not None: scheduler.step() data, target = data.to(device=device, dtype=dtype), target.to(device=device) optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() corr = correct(output, target) accuracy.append(corr[0] / data.shape[0]) lrs = np.linspace(min_lr, max_lr, step_size) plt.plot(lrs, accuracy) plt.show() plt.savefig(os.path.join(save_path, 'find_bounds_clr.pdf')) np.save(os.path.join(save_path, 'acc.npy'), accuracy) return
Example #4
Source File: optimizer_wrapper.py From MobileNetV3-pytorch with MIT License | 5 votes |
def batch_step(self, closure=None): if isinstance(self.scheduler, CyclicLR) or isinstance(self.scheduler, CosineLR): self.scheduler_step() self.optimizer_step(closure)
Example #5
Source File: optimizer_wrapper.py From MobileNetV3-pytorch with MIT License | 5 votes |
def epoch_step(self): if not isinstance(self.scheduler, CyclicLR) and not isinstance(self.scheduler, CosineLR): self.scheduler_step()
Example #6
Source File: lr_schedulers.py From argus with MIT License | 5 votes |
def __init__(self, base_lr, max_lr, step_size_up=2000, step_size_down=None, mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle', cycle_momentum=True, base_momentum=0.8, max_momentum=0.9, step_on_iteration=True): super().__init__( lambda opt: _scheduler.CyclicLR(opt, base_lr, max_lr, step_size_up=step_size_up, step_size_down=step_size_down, mode=mode, gamma=gamma, scale_fn=scale_fn, scale_mode=scale_mode, cycle_momentum=cycle_momentum, base_momentum=base_momentum, max_momentum=max_momentum), step_on_iteration=step_on_iteration )
Example #7
Source File: test_lr_scheduler.py From skorch with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_lr_scheduler_set_params(self, classifier_module, classifier_data): scheduler = LRScheduler( TorchCyclicLR, base_lr=123, max_lr=999, step_every='batch') net = NeuralNetClassifier( classifier_module, max_epochs=0, callbacks=[('scheduler', scheduler)], ) net.set_params(callbacks__scheduler__base_lr=456) net.fit(*classifier_data) # we need to trigger on_train_begin assert net.callbacks[0][1].lr_scheduler_.base_lrs[0] == 456
Example #8
Source File: lr_scheduler.py From skorch with BSD 3-Clause "New" or "Revised" License | 5 votes |
def initialize(self): self.policy_ = self._get_policy_cls() self.lr_scheduler_ = None self.batch_idx_ = 0 # TODO: Remove this warning on 0.10 release if (self.policy_ == TorchCyclicLR or self.policy_ == "TorchCyclicLR" and self.step_every == 'epoch'): warnings.warn( "The LRScheduler now makes a step every epoch by default. " "To have the cyclic lr scheduler update " "every batch set step_every='batch'", FutureWarning ) return self
Example #9
Source File: warmupLR.py From lidar-bonnetal with MIT License | 5 votes |
def __init__(self, optimizer, lr, warmup_steps, momentum, decay): # cyclic params self.optimizer = optimizer self.lr = lr self.warmup_steps = warmup_steps self.momentum = momentum self.decay = decay # cap to one if self.warmup_steps < 1: self.warmup_steps = 1 # cyclic lr self.initial_scheduler = toptim.CyclicLR(self.optimizer, base_lr=0, max_lr=self.lr, step_size_up=self.warmup_steps, step_size_down=self.warmup_steps, cycle_momentum=False, base_momentum=self.momentum, max_momentum=self.momentum) # our params self.last_epoch = -1 # fix for pytorch 1.1 and below self.finished = False # am i done super().__init__(optimizer)
Example #10
Source File: oneshot.py From bonnetal with MIT License | 5 votes |
def __init__(self, optimizer, base_lr, max_lr, step_size_up, step_size_down, cycle_momentum, base_momentum, max_momentum, post_decay): # cyclic params self.optimizer = optimizer self.initial_lr = base_lr self.max_lr = max_lr self.step_size_up = step_size_up self.step_size_down = step_size_down self.cycle_momentum = cycle_momentum self.base_momentum = base_momentum self.max_momentum = max_momentum self.post_decay = post_decay # cap to one if self.step_size_up < 1: self.step_size_up = 1 if self.step_size_down < 1: self.step_size_down = 1 # cyclic lr self.initial_scheduler = toptim.CyclicLR(self.optimizer, base_lr=self.initial_lr, max_lr=self.max_lr, step_size_up=self.step_size_up, step_size_down=self.step_size_down, cycle_momentum=self.cycle_momentum, base_momentum=self.base_momentum, max_momentum=self.max_momentum) # our params self.last_epoch = -1 # fix for pytorch 1.1 and below self.oneshot_n = self.step_size_up + self.step_size_down # steps to warm up for self.finished = False # am i done super().__init__(optimizer)
Example #11
Source File: warmupLR.py From SalsaNext with MIT License | 5 votes |
def __init__(self, optimizer, lr, warmup_steps, momentum, decay): # cyclic params self.optimizer = optimizer self.lr = lr self.warmup_steps = warmup_steps self.momentum = momentum self.decay = decay # cap to one if self.warmup_steps < 1: self.warmup_steps = 1 # cyclic lr self.initial_scheduler = toptim.CyclicLR(self.optimizer, base_lr=0, max_lr=self.lr, step_size_up=self.warmup_steps, step_size_down=self.warmup_steps, cycle_momentum=False, base_momentum=self.momentum, max_momentum=self.momentum) # our params self.last_epoch = -1 # fix for pytorch 1.1 and below self.finished = False # am i done super().__init__(optimizer)