Python utility.timer() Examples
The following are 30
code examples of utility.timer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
utility
, or try the search function
.
Example #1
Source File: trainer.py From NAS_public with GNU General Public License v3.0 | 6 votes |
def __init__(self, opt, model, dataset): self.opt = opt self.model = model self.dataset = dataset self.device = torch.device("cuda" if opt.use_cuda else "cpu") self.timer = util.timer() self.epoch = 0 #build optimizer self.optimizer_dict = {} for scale, _ in model.scale_dict.items(): self.optimizer_dict[scale] = optim.Adam(model.networks[model.scale_dict[scale]].parameters(), lr=opt.lr, weight_decay=opt.weight_decay) self.loss_func = self._get_loss_func(opt.loss_type) #load a model on a target device self.model = self.model.to(self.device)
Example #2
Source File: trainer.py From NAS_public with GNU General Public License v3.0 | 5 votes |
def train_one_epoch(self): self.timer.tic() #decay learning rate self._adjust_learning_rate(self.epoch) #iterate over low-resolutions self.model.train() self.dataset.setDatasetType('train') train_dataloader = DataLoader(dataset=self.dataset, num_workers=self.opt.num_thread, batch_size=self.opt.num_batch, pin_memory=True, shuffle=True) for lr in self.opt.dash_lr: self.dataset.setTargetLR(lr) scale = self.dataset.getTargetScale() self.model.setTargetScale(scale) #iterate over training image patches for iteration, batch in enumerate(train_dataloader, 1): input, target = batch[0], batch[1] input, target = input.to(self.device), target.to(self.device) self.optimizer_dict[scale].zero_grad() loss = self.loss_func(self.model(input), target) loss.backward() self.optimizer_dict[scale].step() if iteration % 10 == 0: util.print_progress(iteration, len(self.dataset)/self.opt.num_batch, 'Train Progress ({}p):'.format(lr), 'Complete', 1, 50) self.epoch += 1 print('Epoch[{}-train](complete): {}sec'.format(self.epoch, self.timer.toc()))
Example #3
Source File: trainer.py From AWSRN with MIT License | 4 votes |
def test(self): epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.scale))) self.model.eval() timer_test = utility.timer() with torch.no_grad(): for idx_scale, scale in enumerate(self.scale): eval_psnr = 0 # eval_ssim = 0 self.loader_test.dataset.set_scale(idx_scale) tqdm_test = tqdm(self.loader_test, ncols=80) for idx_img, (lr, hr, filename, _) in enumerate(tqdm_test): filename = filename[0] no_eval = (hr.nelement() == 1) if not no_eval: lr, hr = self.prepare(lr, hr) else: lr, = self.prepare(lr) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] if not no_eval: eval_psnr += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark ) # eval_ssim += utility.calc_ssim(sr, hr) # save_list.extend([lr, hr]) save_list.extend([hr]) if self.args.save_results: self.ckp.save_results(filename, save_list, scale) self.ckp.log[-1, idx_scale] = eval_psnr / len(self.loader_test) # mean_ssim = eval_ssim / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best PSNR: {:.3f} @epoch {})'.format( self.args.data_test, scale, self.ckp.log[-1, idx_scale], best[0][idx_scale], best[1][idx_scale] + 1 ) ) self.ckp.write_log( 'Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
Example #4
Source File: trainer_finetune.py From 3D_Appearance_SR with MIT License | 4 votes |
def test(self): epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.scale))) self.model.eval() timer_test = utility.timer() with torch.no_grad(): for idx_scale, scale in enumerate(self.scale): eval_acc = 0 self.loader_test.dataset.set_scale(idx_scale) tqdm_test = tqdm(self.loader_test, ncols=80) for idx_img, (lr, nl, mk, hr, filename, _) in enumerate(tqdm_test): # print('FLAG') # print(filename) filename = filename[0] print(filename) no_eval = (hr.nelement() == 1) if not no_eval: lr, nl, mk, hr = self.prepare([lr, nl, mk, hr]) else: lr, nl, mk, = self.prepare([lr, nl, mk]) sr = self.model(idx_scale, lr, nl, mk) sr = utility.quantize(sr, self.args.rgb_range) # print(sr.shape) b, c, h, w = sr.shape hr = hr[:, :, :h, :w] save_list = [sr] if not no_eval: eval_acc += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark ) save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results(filename, save_list, scale) self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( self.args.data_test, scale, self.ckp.log[-1, idx_scale], best[0][idx_scale], best[1][idx_scale] + 1 ) ) self.ckp.write_log( 'Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
Example #5
Source File: trainer.py From 3D_Appearance_SR with MIT License | 4 votes |
def train(self): self.scheduler.step() self.loss.step() epoch = self.scheduler.last_epoch + 1 lr = self.scheduler.get_lr()[0] self.ckp.write_log( '[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr)) ) self.loss.start_log() self.model.train() # from IPython import embed; embed(); exit() timer_data, timer_model = utility.timer(), utility.timer() for batch, (lr, hr, _, idx_scale) in enumerate(self.loader_train): lr, hr = self.prepare([lr, hr]) timer_data.hold() timer_model.tic() # from IPython import embed; embed(); exit() self.optimizer.zero_grad() sr = self.model(idx_scale, lr) loss = self.loss(sr, hr) if loss.item() < self.args.skip_threshold * self.error_last: loss.backward() self.optimizer.step() else: print('Skip this batch {}! (Loss: {})'.format( batch + 1, loss.item() )) timer_model.hold() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}\t{:.3f}+{:.3f}s'.format( (batch + 1) * self.args.batch_size, len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release())) timer_data.tic() self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1]
Example #6
Source File: trainer.py From 3D_Appearance_SR with MIT License | 4 votes |
def test(self): epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.scale))) self.model.eval() timer_test = utility.timer() with torch.no_grad(): for idx_scale, scale in enumerate(self.scale): eval_acc = 0 self.loader_test.dataset.set_scale(idx_scale) tqdm_test = tqdm(self.loader_test, ncols=80) for idx_img, (lr, hr, filename, _) in enumerate(tqdm_test): # from IPython import embed; embed(); filename = filename[0] no_eval = (hr.nelement() == 1) if not no_eval: lr, hr = self.prepare([lr, hr]) else: lr = self.prepare([lr])[0] sr = self.model(idx_scale, lr) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] if not no_eval: eval_acc += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark ) save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results(filename, save_list, scale) self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( self.args.data_test, scale, self.ckp.log[-1, idx_scale], best[0][idx_scale], best[1][idx_scale] + 1 ) ) self.ckp.write_log( 'Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
Example #7
Source File: tester.py From NAS_public with GNU General Public License v3.0 | 4 votes |
def _analyze_baseline(self): timer = util.timer() dataloader = DataLoader(dataset=self.dataset, num_workers=self.opt.num_thread, batch_size=1, shuffle=False, pin_memory=True) result = {} #iterate over low resolutions for lr in opt.dash_lr: #setup (process & thread) process_list = [] output_queue = mp.Queue() input_queue = mp.Queue(1) for _ in range(PROCESS_NUM): process = mp.Process(target=measure_quality, args=(input_queue, output_queue)) process.start() process_list.append(process) #setup (variable) result[lr] = Result() result[lr].frameidx = [] result[lr].ssim = [] result[lr].psnr = [] print('start anaylze {}p/baseline quality'.format(lr)) self.dataset.setTargetLR(lr) self.model.setTargetScale(self.dataset.getTargetScale()) #iterate over test dataset for iteration, batch in enumerate(dataloader, 1): assert len(batch[0]) == 1 _, upscaled, target = batch[0], batch[1], batch[2] upscaled_np, target_np = torch.squeeze(upscaled, 0).permute(1, 2, 0).numpy(), torch.squeeze(target, 0).permute(1, 2, 0).numpy() upscaled_np= upscaled.data[0].permute(1,2,0).numpy() input_queue.put((iteration, upscaled_np, target_np)) elapsed_time = timer.toc() util.print_progress(iteration, len(self.dataset), 'Test Progress ({}p - {}sec):'.format(lr, round(elapsed_time, 2)), 'Complete', 1, 50) #terminate for _ in range(len(process_list)): input_queue.put(('end', )) #merge results quality_list = [] for process in process_list: quality_list.extend(output_queue.get()) for quality in quality_list: result[lr].frameidx.append(quality.idx) result[lr].ssim.append(quality.ssim) result[lr].psnr.append(quality.psnr) result[lr].frameidx, result[lr].ssim, result[lr].psnr = \ [list(x) for x in zip(*sorted(zip(result[lr].frameidx, result[lr].ssim, result[lr].psnr), key=lambda pair: pair[0]))] #PSNR, SSIM for original images result[self.opt.dash_hr] = Result() result[self.opt.dash_hr].psnr = 100 result[self.opt.dash_hr].ssim = 1 return result
Example #8
Source File: tester.py From NAS_public with GNU General Public License v3.0 | 4 votes |
def _generate_sr(self, output_node=None): with torch.no_grad(): timer = util.timer() if output_node == None : output_node = self.output_nodes[-1] dataloader = DataLoader(dataset=self.dataset, num_workers=6, batch_size=1, shuffle=False, pin_memory=True) target_res = self.node2res[output_node] process_list = [] input_queue = mp.Queue() for _ in range(PROCESS_NUM): process = mp.Process(target=save_img, args=(input_queue, )) process.start() process_list.append(process) #iterate over target resolutions for lr in target_res: self.dataset.setTargetLR(lr) self.model.setTargetScale(self.dataset.getTargetScale()) for iteration, batch in enumerate(dataloader, 1): assert len(batch[0]) == 1 #prepare input, upscaled, target = batch[0], batch[1], batch[2] input_np, pscaled_np, target_np = torch.squeeze(input, 0).permute(1, 2, 0).numpy(), torch.squeeze(upscaled, 0).permute(1, 2, 0).numpy(), torch.squeeze(target, 0).permute(1, 2, 0).numpy() input, upscaled, target = input.to(self.device), upscaled.to(self.device), target.to(self.device) output = self.model(input, output_node) torch.cuda.synchronize() output = torch.squeeze(torch.clamp(output, min=0, max=1.), 0).permute(1, 2, 0) output_np = output.to('cpu').numpy() ''' misc.imsave('{}/{}_{}_output.png'.format(self.opt.result_dir, lr, iteration), output_np) misc.imsave('{}/{}_{}_baseline.png'.format(self.opt.result_dir, lr, iteration), upscaled_np) misc.imsave('{}/{}_{}_target.png'.format(self.opt.result_dir, lr, iteration), target_np) ''' input_queue.put((lr, iteration, input_np, output_np, target_np)) elapsed_time = timer.toc() util.print_progress(iteration, len(self.dataset), 'Test Progress ({}p - {}sec):'.format(lr, round(elapsed_time, 2)), 'Complete', 1, 50) #terminate for _ in range(len(process_list)): input_queue.put(('end', )) #get psnr, ssim of super-resolution
Example #9
Source File: trainer.py From AWSRN with MIT License | 4 votes |
def train(self): self.scheduler.step() self.loss.step() epoch = self.scheduler.last_epoch + 1 lr = self.scheduler.get_lr()[0] self.ckp.write_log( '[Epoch {}]\tLearning rate: {:.6e}'.format(epoch, Decimal(lr)) ) self.loss.start_log() self.model.train() timer_data, timer_model = utility.timer(), utility.timer() for batch, (lr, hr, _, idx_scale) in enumerate(self.loader_train): lr, hr = self.prepare(lr, hr) timer_data.hold() timer_model.tic() self.optimizer.zero_grad() sr = self.model(lr, idx_scale) loss = self.loss(sr, hr) loss.backward() if self.args.gclip > 0: utils.clip_grad_value_( self.model.parameters(), self.args.gclip ) self.optimizer.step() timer_model.hold() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format( (batch + 1) * self.args.batch_size, len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release())) timer_data.tic() self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1]
Example #10
Source File: trainer_finetune.py From 3D_Appearance_SR with MIT License | 4 votes |
def train(self): self.scheduler.step() self.loss.step() epoch = self.scheduler.last_epoch + 1 lr = self.scheduler.get_lr()[0] self.ckp.write_log( '[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr)) ) self.loss.start_log() self.model.train() # from IPython import embed; embed(); exit() timer_data, timer_model = utility.timer(), utility.timer() for batch, (lr, nl, mk, hr, _, idx_scale) in enumerate(self.loader_train): # from IPython import embed; embed(); exit() lr, nl, mk, hr = self.prepare([lr, nl, mk, hr]) timer_data.hold() timer_model.tic() self.optimizer.zero_grad() sr = self.model(idx_scale, lr, nl, mk) # from IPython import embed; embed(); exit() loss = self.loss(sr, hr) if loss.item() < self.args.skip_threshold * self.error_last: loss.backward() self.optimizer.step() else: print('Skip this batch {}! (Loss: {})'.format( batch + 1, loss.item() )) timer_model.hold() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}\t{:.3f}+{:.3f}s'.format( (batch + 1) * self.args.batch_size, len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release())) timer_data.tic() # from IPython import embed; embed(); exit() self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1]
Example #11
Source File: trainer.py From NTIRE2019_EDRN with MIT License | 4 votes |
def train(self): self.scheduler.step() self.loss.step() epoch = self.scheduler.last_epoch + 1 lr = self.scheduler.get_lr()[0] self.ckp.write_log( '[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr)) ) self.loss.start_log() self.model.train() timer_data, timer_model = utility.timer(), utility.timer() for batch, (lr, hr, _, idx_scale) in enumerate(self.loader_train): #print(hr) lr, hr = self.prepare(lr, hr) timer_data.hold() timer_model.tic() self.optimizer.zero_grad() sr = self.model(lr, idx_scale) loss = self.loss(sr,hr) if loss.item() < self.args.skip_threshold * self.error_last: loss.backward() self.optimizer.step() else: print('Skip this batch {}! (Loss: {})'.format( batch + 1, loss.item() )) timer_model.hold() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format( (batch + 1) * self.args.batch_size, len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release())) timer_data.tic() self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1] if self.args.train_only: #self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch)) self.ckp.save(self, epoch, is_best=False)
Example #12
Source File: trainer.py From NTIRE2019_EDRN with MIT License | 4 votes |
def test(self): #if self.args.test_only: # self.scheduler.step() #just for remake the curve of psnr epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.scale))) self.model.eval() timer_test = utility.timer() with torch.no_grad(): for idx_scale, scale in enumerate(self.scale): eval_acc = 0 #eval_ssim = 0 self.loader_test.dataset.set_scale(idx_scale) tqdm_test = tqdm(self.loader_test, ncols=80) for idx_img, (lr, hr,filename, _) in enumerate(tqdm_test): filename = filename[0] no_eval = (hr.nelement() == 1) if not no_eval: lr, hr = self.prepare(lr, hr) else: lr, = self.prepare(lr) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] if not no_eval: eval_acc += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark ) #save_list.extend([lr, hr]) #eval_ssim += utility.calc_ssim( # sr, hr, scale #) if self.args.save_results: self.ckp.save_results(filename, save_list, scale,epoch) #del lr,hr,sr,idx_img,filename,save_list self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f}(Best: {:.3f} @epoch {})'.format( self.args.data_test, scale, self.ckp.log[-1, idx_scale], best[0][idx_scale], best[1][idx_scale] + 1 ) ) #del idx_scale,scale,tqdm_test self.ckp.write_log( 'Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch)) #self.ckp.save(self, epoch, is_best=False) else: self.ckp.save_for_test(epoch)
Example #13
Source File: trainer.py From 2018_subeesh_epsr_eccvw with MIT License | 4 votes |
def train(self): self.scheduler.step() self.loss.step() epoch = self.scheduler.last_epoch + 1 lr = self.scheduler.get_lr()[0] self.ckp.write_log( '[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr)) ) self.loss.start_log() self.model.train() timer_data, timer_model = utility.timer(), utility.timer() for batch, (lr, hr, _, idx_scale) in enumerate(self.loader_train): lr, hr = self.prepare([lr, hr]) timer_data.hold() timer_model.tic() self.optimizer.zero_grad() sr = self.model(lr, idx_scale) loss = self.loss(sr, hr) if loss.item() < self.args.skip_threshold * self.error_last: loss.backward() self.optimizer.step() else: print('Skip this batch {}! (Loss: {})'.format( batch + 1, loss.item() )) timer_model.hold() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format( (batch + 1) * self.args.batch_size, len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release())) timer_data.tic() self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1]
Example #14
Source File: trainer.py From 2018_subeesh_epsr_eccvw with MIT License | 4 votes |
def test(self): epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.scale))) self.model.eval() timer_test = utility.timer() with torch.no_grad(): for idx_scale, scale in enumerate(self.scale): eval_acc = 0 self.loader_test.dataset.set_scale(idx_scale) tqdm_test = tqdm(self.loader_test, ncols=80) for idx_img, (lr, hr, filename, _) in enumerate(tqdm_test): filename = filename[0] no_eval = (hr.nelement() == 1) if not no_eval: lr, hr = self.prepare([lr, hr]) else: lr = self.prepare([lr])[0] print(lr.shape) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] if not no_eval: eval_acc += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark ) save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results(filename, save_list, scale) self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( self.args.data_test, scale, self.ckp.log[-1, idx_scale], best[0][idx_scale], best[1][idx_scale] + 1 ) ) self.ckp.write_log( 'Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
Example #15
Source File: trainer.py From MSRN-PyTorch with MIT License | 4 votes |
def train(self): self.scheduler.step() self.loss.step() epoch = self.scheduler.last_epoch + 1 lr = self.scheduler.get_lr()[0] self.ckp.write_log( '[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr)) ) self.loss.start_log() self.model.train() timer_data, timer_model = utility.timer(), utility.timer() for batch, (lr, hr, _, idx_scale) in enumerate(self.loader_train): lr, hr = self.prepare([lr, hr]) timer_data.hold() timer_model.tic() self.optimizer.zero_grad() sr = self.model(lr, idx_scale) loss = self.loss(sr, hr) if loss.item() < self.args.skip_threshold * self.error_last: loss.backward() self.optimizer.step() else: print('Skip this batch {}! (Loss: {})'.format( batch + 1, loss.item() )) timer_model.hold() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format( (batch + 1) * self.args.batch_size, len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release())) timer_data.tic() self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1]
Example #16
Source File: trainer.py From MSRN-PyTorch with MIT License | 4 votes |
def test(self): epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.scale))) self.model.eval() timer_test = utility.timer() with torch.no_grad(): for idx_scale, scale in enumerate(self.scale): eval_acc = 0 self.loader_test.dataset.set_scale(idx_scale) tqdm_test = tqdm(self.loader_test, ncols=80) for idx_img, (lr, hr, filename, _) in enumerate(tqdm_test): filename = filename[0] no_eval = (hr.nelement() == 1) if not no_eval: lr, hr = self.prepare([lr, hr]) else: lr = self.prepare([lr])[0] sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] if not no_eval: eval_acc += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark ) save_list.extend([lr, hr]) if self.args.save_results: #self.ckp.save_results(filename, save_list, scale) self.ckp.save_results_nopostfix(filename, save_list, scale) self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( self.args.data_test, scale, self.ckp.log[-1, idx_scale], best[0][idx_scale], best[1][idx_scale] + 1 ) ) self.ckp.write_log( 'Total time: {:.2f}s, ave time: {:.2f}s\n'.format(timer_test.toc(), timer_test.toc()/len(self.loader_test)), refresh=True ) if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
Example #17
Source File: trainer.py From MSRN-PyTorch with MIT License | 4 votes |
def train(self): self.scheduler.step() self.loss.step() epoch = self.scheduler.last_epoch + 1 lr = self.scheduler.get_lr()[0] self.ckp.write_log( '[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr)) ) self.loss.start_log() self.model.train() timer_data, timer_model = utility.timer(), utility.timer() for batch, (lr, hr, _, idx_scale) in enumerate(self.loader_train): lr, hr = self.prepare(lr, hr) timer_data.hold() timer_model.tic() self.optimizer.zero_grad() sr = self.model(lr, idx_scale) loss = self.loss(sr, hr) if loss.item() < self.args.skip_threshold * self.error_last: loss.backward() self.optimizer.step() else: print('Skip this batch {}! (Loss: {})'.format( batch + 1, loss.item() )) timer_model.hold() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format( (batch + 1) * self.args.batch_size, len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release())) timer_data.tic() self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1]
Example #18
Source File: trainer.py From MSRN-PyTorch with MIT License | 4 votes |
def test(self): epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.scale))) self.model.eval() timer_test = utility.timer() with torch.no_grad(): for idx_scale, scale in enumerate(self.scale): eval_acc = 0 self.loader_test.dataset.set_scale(idx_scale) tqdm_test = tqdm(self.loader_test, ncols=80) for idx_img, (lr, hr, filename, _) in enumerate(tqdm_test): filename = filename[0] no_eval = (hr.nelement() == 1) if not no_eval: lr, hr = self.prepare(lr, hr) else: lr, = self.prepare(lr) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] if not no_eval: eval_acc += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark ) save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results(filename, save_list, scale) self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( self.args.data_test, scale, self.ckp.log[-1, idx_scale], best[0][idx_scale], best[1][idx_scale] + 1 ) ) self.ckp.write_log( 'Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
Example #19
Source File: trainer.py From OISR-PyTorch with BSD 2-Clause "Simplified" License | 4 votes |
def train(self): self.scheduler.step() self.loss.step() epoch = self.scheduler.last_epoch + 1 lr = self.scheduler.get_lr()[0] self.ckp.write_log( '[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr)) ) self.loss.start_log() self.model.train() timer_data, timer_model = utility.timer(), utility.timer() for batch, (lr, hr, _, idx_scale) in enumerate(self.loader_train): lr, hr = self.prepare(lr, hr) timer_data.hold() timer_model.tic() self.optimizer.zero_grad() sr = self.model(lr, idx_scale) loss = self.loss(sr, hr) loss.backward() if self.args.gclip > 0: utils.clip_grad_value_( self.model.parameters(), self.args.gclip ) self.optimizer.step() timer_model.hold() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format( (batch + 1) * self.args.batch_size, len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release())) timer_data.tic() self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1]
Example #20
Source File: trainer.py From OISR-PyTorch with BSD 2-Clause "Simplified" License | 4 votes |
def train(self): self.scheduler.step() self.loss.step() epoch = self.scheduler.last_epoch + 1 lr = self.scheduler.get_lr()[0] self.ckp.write_log( '[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr)) ) self.loss.start_log() self.model.train() timer_data, timer_model = utility.timer(), utility.timer() for batch, (lr, hr, _, idx_scale) in enumerate(self.loader_train): lr, hr = self.prepare(lr, hr) timer_data.hold() timer_model.tic() self.optimizer.zero_grad() sr = self.model(lr, idx_scale) loss = self.loss(sr, hr) loss.backward() if self.args.gclip > 0: utils.clip_grad_value_( self.model.parameters(), self.args.gclip ) self.optimizer.step() timer_model.hold() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format( (batch + 1) * self.args.batch_size, len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release())) timer_data.tic() self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1]
Example #21
Source File: trainer.py From OISR-PyTorch with BSD 2-Clause "Simplified" License | 4 votes |
def test(self): torch.set_grad_enabled(False) epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log( torch.zeros(1, len(self.loader_test), len(self.scale)) ) self.model.eval() timer_test = utility.timer() if self.args.save_results: self.ckp.begin_background() for idx_data, d in enumerate(self.loader_test): for idx_scale, scale in enumerate(self.scale): d.dataset.set_scale(idx_scale) for lr, hr, filename, _ in tqdm(d, ncols=80): lr, hr = self.prepare(lr, hr) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, dataset=d ) if self.args.save_gt: save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results(d, filename[0], save_list, scale) self.ckp.log[-1, idx_data, idx_scale] /= len(d) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( d.dataset.name, scale, self.ckp.log[-1, idx_data, idx_scale], best[0][idx_data, idx_scale], best[1][idx_data, idx_scale] + 1 ) ) self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc())) self.ckp.write_log('Saving...') if self.args.save_results: self.ckp.end_background() if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch)) self.ckp.write_log( 'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) torch.set_grad_enabled(True)
Example #22
Source File: videotester.py From OISR-PyTorch with BSD 2-Clause "Simplified" License | 4 votes |
def test(self): torch.set_grad_enabled(False) self.ckp.write_log('\nEvaluation on video:') self.model.eval() timer_test = utility.timer() for idx_scale, scale in enumerate(self.scale): vidcap = cv2.VideoCapture(self.args.dir_demo) total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) vidwri = cv2.VideoWriter( self.ckp.get_path('{}_x{}.avi'.format(self.filename, scale)), cv2.VideoWriter_fourcc(*'XVID'), vidcap.get(cv2.CAP_PROP_FPS), ( int(scale * vidcap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(scale * vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT)) ) ) tqdm_test = tqdm(range(total_frames), ncols=80) for _ in tqdm_test: success, lr = vidcap.read() if not success: break lr, = common.set_channel(lr, n_channels=self.args.n_colors) lr, = common.np2Tensor(lr, rgb_range=self.args.rgb_range) lr, = self.prepare(lr.unsqueeze(0)) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range).squeeze(0) normalized = sr * 255 / self.args.rgb_range ndarr = normalized.byte().permute(1, 2, 0).cpu().numpy() vidwri.write(ndarr) vidcap.release() vidwri.release() self.ckp.write_log( 'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) torch.set_grad_enabled(True)
Example #23
Source File: trainer.py From OISR-PyTorch with BSD 2-Clause "Simplified" License | 4 votes |
def train(self): self.scheduler.step() self.loss.step() epoch = self.scheduler.last_epoch + 1 lr = self.scheduler.get_lr()[0] self.ckp.write_log( '[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr)) ) self.loss.start_log() self.model.train() timer_data, timer_model = utility.timer(), utility.timer() for batch, (lr, hr, _, idx_scale) in enumerate(self.loader_train): lr, hr = self.prepare(lr, hr) timer_data.hold() timer_model.tic() self.optimizer.zero_grad() sr = self.model(lr, idx_scale) loss = self.loss(sr, hr) loss.backward() if self.args.gclip > 0: utils.clip_grad_value_( self.model.parameters(), self.args.gclip ) self.optimizer.step() timer_model.hold() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format( (batch + 1) * self.args.batch_size, len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release())) timer_data.tic() self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1]
Example #24
Source File: videotester.py From OISR-PyTorch with BSD 2-Clause "Simplified" License | 4 votes |
def test(self): torch.set_grad_enabled(False) self.ckp.write_log('\nEvaluation on video:') self.model.eval() timer_test = utility.timer() for idx_scale, scale in enumerate(self.scale): vidcap = cv2.VideoCapture(self.args.dir_demo) total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) vidwri = cv2.VideoWriter( self.ckp.get_path('{}_x{}.avi'.format(self.filename, scale)), cv2.VideoWriter_fourcc(*'XVID'), vidcap.get(cv2.CAP_PROP_FPS), ( int(scale * vidcap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(scale * vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT)) ) ) tqdm_test = tqdm(range(total_frames), ncols=80) for _ in tqdm_test: success, lr = vidcap.read() if not success: break lr, = common.set_channel(lr, n_channels=self.args.n_colors) lr, = common.np2Tensor(lr, rgb_range=self.args.rgb_range) lr, = self.prepare(lr.unsqueeze(0)) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range).squeeze(0) normalized = sr * 255 / self.args.rgb_range ndarr = normalized.byte().permute(1, 2, 0).cpu().numpy() vidwri.write(ndarr) vidcap.release() vidwri.release() self.ckp.write_log( 'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) torch.set_grad_enabled(True)
Example #25
Source File: trainer.py From OISR-PyTorch with BSD 2-Clause "Simplified" License | 4 votes |
def train(self): self.scheduler.step() self.loss.step() epoch = self.scheduler.last_epoch + 1 lr = self.scheduler.get_lr()[0] self.ckp.write_log( '[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr)) ) self.loss.start_log() self.model.train() timer_data, timer_model = utility.timer(), utility.timer() for batch, (lr, hr, _, idx_scale) in enumerate(self.loader_train): lr, hr = self.prepare(lr, hr) timer_data.hold() timer_model.tic() self.optimizer.zero_grad() sr = self.model(lr, idx_scale) loss = self.loss(sr, hr) loss.backward() if self.args.gclip > 0: utils.clip_grad_value_( self.model.parameters(), self.args.gclip ) self.optimizer.step() timer_model.hold() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format( (batch + 1) * self.args.batch_size, len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release())) timer_data.tic() self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1]
Example #26
Source File: trainer.py From OISR-PyTorch with BSD 2-Clause "Simplified" License | 4 votes |
def test(self): torch.set_grad_enabled(False) epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log( torch.zeros(1, len(self.loader_test), len(self.scale)) ) self.model.eval() timer_test = utility.timer() if self.args.save_results: self.ckp.begin_background() for idx_data, d in enumerate(self.loader_test): for idx_scale, scale in enumerate(self.scale): d.dataset.set_scale(idx_scale) for lr, hr, filename, _ in tqdm(d, ncols=80): lr, hr = self.prepare(lr, hr) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, dataset=d ) if self.args.save_gt: save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results(d, filename[0], save_list, scale) self.ckp.log[-1, idx_data, idx_scale] /= len(d) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( d.dataset.name, scale, self.ckp.log[-1, idx_data, idx_scale], best[0][idx_data, idx_scale], best[1][idx_data, idx_scale] + 1 ) ) self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc())) self.ckp.write_log('Saving...') if self.args.save_results: self.ckp.end_background() if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch)) self.ckp.write_log( 'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) torch.set_grad_enabled(True)
Example #27
Source File: videotester.py From OISR-PyTorch with BSD 2-Clause "Simplified" License | 4 votes |
def test(self): torch.set_grad_enabled(False) self.ckp.write_log('\nEvaluation on video:') self.model.eval() timer_test = utility.timer() for idx_scale, scale in enumerate(self.scale): vidcap = cv2.VideoCapture(self.args.dir_demo) total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) vidwri = cv2.VideoWriter( self.ckp.get_path('{}_x{}.avi'.format(self.filename, scale)), cv2.VideoWriter_fourcc(*'XVID'), vidcap.get(cv2.CAP_PROP_FPS), ( int(scale * vidcap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(scale * vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT)) ) ) tqdm_test = tqdm(range(total_frames), ncols=80) for _ in tqdm_test: success, lr = vidcap.read() if not success: break lr, = common.set_channel(lr, n_channels=self.args.n_colors) lr, = common.np2Tensor(lr, rgb_range=self.args.rgb_range) lr, = self.prepare(lr.unsqueeze(0)) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range).squeeze(0) normalized = sr * 255 / self.args.rgb_range ndarr = normalized.byte().permute(1, 2, 0).cpu().numpy() vidwri.write(ndarr) vidcap.release() vidwri.release() self.ckp.write_log( 'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) torch.set_grad_enabled(True)
Example #28
Source File: videotester.py From OISR-PyTorch with BSD 2-Clause "Simplified" License | 4 votes |
def test(self): torch.set_grad_enabled(False) self.ckp.write_log('\nEvaluation on video:') self.model.eval() timer_test = utility.timer() for idx_scale, scale in enumerate(self.scale): vidcap = cv2.VideoCapture(self.args.dir_demo) total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) vidwri = cv2.VideoWriter( self.ckp.get_path('{}_x{}.avi'.format(self.filename, scale)), cv2.VideoWriter_fourcc(*'XVID'), vidcap.get(cv2.CAP_PROP_FPS), ( int(scale * vidcap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(scale * vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT)) ) ) tqdm_test = tqdm(range(total_frames), ncols=80) for _ in tqdm_test: success, lr = vidcap.read() if not success: break lr, = common.set_channel(lr, n_channels=self.args.n_colors) lr, = common.np2Tensor(lr, rgb_range=self.args.rgb_range) lr, = self.prepare(lr.unsqueeze(0)) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range).squeeze(0) normalized = sr * 255 / self.args.rgb_range ndarr = normalized.byte().permute(1, 2, 0).cpu().numpy() vidwri.write(ndarr) vidcap.release() vidwri.release() self.ckp.write_log( 'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) torch.set_grad_enabled(True)
Example #29
Source File: videotester.py From OISR-PyTorch with BSD 2-Clause "Simplified" License | 4 votes |
def test(self): torch.set_grad_enabled(False) self.ckp.write_log('\nEvaluation on video:') self.model.eval() timer_test = utility.timer() for idx_scale, scale in enumerate(self.scale): vidcap = cv2.VideoCapture(self.args.dir_demo) total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) vidwri = cv2.VideoWriter( self.ckp.get_path('{}_x{}.avi'.format(self.filename, scale)), cv2.VideoWriter_fourcc(*'XVID'), vidcap.get(cv2.CAP_PROP_FPS), ( int(scale * vidcap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(scale * vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT)) ) ) tqdm_test = tqdm(range(total_frames), ncols=80) for _ in tqdm_test: success, lr = vidcap.read() if not success: break lr, = common.set_channel(lr, n_channels=self.args.n_colors) lr, = common.np2Tensor(lr, rgb_range=self.args.rgb_range) lr, = self.prepare(lr.unsqueeze(0)) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range).squeeze(0) normalized = sr * 255 / self.args.rgb_range ndarr = normalized.byte().permute(1, 2, 0).cpu().numpy() vidwri.write(ndarr) vidcap.release() vidwri.release() self.ckp.write_log( 'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) torch.set_grad_enabled(True)
Example #30
Source File: trainer.py From OISR-PyTorch with BSD 2-Clause "Simplified" License | 4 votes |
def train(self): self.scheduler.step() self.loss.step() epoch = self.scheduler.last_epoch + 1 lr = self.scheduler.get_lr()[0] self.ckp.write_log( '[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr)) ) self.loss.start_log() self.model.train() timer_data, timer_model = utility.timer(), utility.timer() for batch, (lr, hr, _, idx_scale) in enumerate(self.loader_train): lr, hr = self.prepare(lr, hr) timer_data.hold() timer_model.tic() self.optimizer.zero_grad() sr = self.model(lr, idx_scale) loss = self.loss(sr, hr) loss.backward() if self.args.gclip > 0: utils.clip_grad_value_( self.model.parameters(), self.args.gclip ) self.optimizer.step() timer_model.hold() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format( (batch + 1) * self.args.batch_size, len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release())) timer_data.tic() self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1]