Python optimizers.get_optimizer() Examples
The following are 4
code examples of optimizers.get_optimizer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
optimizers
, or try the search function
.
Example #1
Source File: train.py From kaggle-hpa with BSD 2-Clause "Simplified" License | 6 votes |
def run(config): train_dir = config.train.dir model = get_model(config).cuda() criterion = get_loss(config) optimizer = get_optimizer(config, model.parameters()) checkpoint = utils.checkpoint.get_initial_checkpoint(config) if checkpoint is not None: last_epoch, step = utils.checkpoint.load_checkpoint(model, optimizer, checkpoint) else: last_epoch, step = -1, -1 print('from checkpoint: {} last epoch:{}'.format(checkpoint, last_epoch)) scheduler = get_scheduler(config, optimizer, last_epoch) dataloaders = {split:get_dataloader(config, split, get_transform(config, split)) for split in ['train', 'val']} writer = SummaryWriter(config.train.dir) train(config, model, dataloaders, criterion, optimizer, scheduler, writer, last_epoch+1)
Example #2
Source File: train.py From kaggle-humpback with BSD 2-Clause "Simplified" License | 5 votes |
def run(config): train_dir = config.train.dir task = get_task(config) optimizer = get_optimizer(config, task.get_model().parameters()) checkpoint = utils.checkpoint.get_initial_checkpoint(config) if checkpoint is not None: last_epoch, step = utils.checkpoint.load_checkpoint(task.get_model(), optimizer, checkpoint) else: last_epoch, step = -1, -1 print('from checkpoint: {} last epoch:{}'.format(checkpoint, last_epoch)) scheduler = get_scheduler(config, optimizer, last_epoch) preprocess_opt = task.get_preprocess_opt() dataloaders = {split:get_dataloader(config, split, get_transform(config, split, **preprocess_opt)) for split in ['train', 'dev']} writer = SummaryWriter(config.train.dir) train(config, task, dataloaders, optimizer, scheduler, writer, last_epoch+1)
Example #3
Source File: aux_model.py From self-supervised-da with MIT License | 5 votes |
def __init__(self, args, logger): self.args = args self.logger = logger self.writer = SummaryWriter(args.log_dir) cudnn.enabled = True # set up model self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.model = get_aux_net(args.network.arch)(aux_classes=args.aux_classes + 1, classes=args.n_classes) self.model = self.model.to(self.device) if args.mode == 'train': # set up optimizer, lr scheduler and loss functions optimizer = get_optimizer(self.args.training.optimizer) optimizer_params = {k: v for k, v in self.args.training.optimizer.items() if k != "name"} self.optimizer = optimizer(self.model.parameters(), **optimizer_params) self.scheduler = get_scheduler(self.optimizer, self.args.training.lr_scheduler) self.class_loss_func = nn.CrossEntropyLoss() self.start_iter = 0 # resume if args.training.resume: self.load(args.model_dir + '/' + args.training.resume) cudnn.benchmark = True elif args.mode == 'val': self.load(os.path.join(args.model_dir, args.validation.model)) else: self.load(os.path.join(args.model_dir, args.testing.model))
Example #4
Source File: augmentation_search.py From kaggle-hpa with BSD 2-Clause "Simplified" License | 5 votes |
def search_once(config, policy): model = get_model(config).cuda() criterion = get_loss(config) optimizer = get_optimizer(config, model.parameters()) scheduler = get_scheduler(config, optimizer, -1) transforms = {'train': get_transform(config, 'train', params={'policies': policy}), 'val': get_transform(config, 'val')} dataloaders = {split:get_dataloader(config, split, transforms[split]) for split in ['train', 'val']} score_dict = train(config, model, dataloaders, criterion, optimizer, scheduler, None, 0) return score_dict['f1_mavg']