Python utils.loadData() Examples
The following are 21
code examples of utils.loadData().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
utils
, or try the search function
.
Example #1
Source File: train.py From crnn.pytorch with MIT License | 6 votes |
def trainBatch(net, criterion, optimizer): data = train_iter.next() cpu_images, cpu_texts = data batch_size = cpu_images.size(0) utils.loadData(image, cpu_images) t, l = converter.encode(cpu_texts) utils.loadData(text, t) utils.loadData(length, l) preds = crnn(image) preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size)) cost = criterion(preds, text, preds_size, length) / batch_size crnn.zero_grad() cost.backward() optimizer.step() return cost
Example #2
Source File: crnn_main_seg.py From enctc.crnn with MIT License | 6 votes |
def trainBatch(net, optimizer): data = train_iter.next() cpu_images, cpu_texts = data batch_size = cpu_images.size(0) utils.loadData(image, cpu_images) t, l = converter.encode(cpu_texts) utils.loadData(text, t) utils.loadData(length, l) preds = crnn(image) preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size)) H, cost = seg_ctc_ent_cost(preds, text, preds_size, length, uni_rate=opt.uni_rate) h_cost = (1-opt.h_rate)*cost-opt.h_rate*H cost_sum = h_cost.data.sum() inf = float("inf") if cost_sum == inf or cost_sum == -inf or cost_sum > 200*batch_size: print("Warning: received an inf loss, setting loss value to 0") return torch.zeros(H.size()), torch.zeros(cost.size()), torch.zeros(h_cost.size()) crnn.zero_grad() h_cost.backward() torch.nn.utils.clip_grad_norm(crnn.parameters(), opt.max_norm) optimizer.step() return H / batch_size, cost / batch_size, h_cost / batch_size
Example #3
Source File: crnn_main.py From basicOCR with GNU General Public License v3.0 | 6 votes |
def trainBatch(net, criterion, optimizer): data = train_iter.next() cpu_images, cpu_texts = data batch_size = cpu_images.size(0) utils.loadData(image, cpu_images) t, l = converter.encode(cpu_texts) utils.loadData(text, t) utils.loadData(length, l) preds = crnn(image) preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size)) cost = criterion(preds, text, preds_size, length) / batch_size crnn.zero_grad() cost.backward() optimizer.step() return cost
Example #4
Source File: crnn_main.py From ctpn-crnn with MIT License | 6 votes |
def trainBatch(net, criterion, optimizer, train_iter): data = train_iter.next() cpu_images, cpu_texts = data # print('----cpu_images-----') # print(cpu_images.shape) batch_size = cpu_images.size(0) utils.loadData(image, cpu_images) t, l = converter.encode(cpu_texts) utils.loadData(text, t) utils.loadData(length, l) # print('----image-----') # print(image.shape) preds = crnn(image) preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size)) cost = criterion(preds, text, preds_size, length) / batch_size crnn.zero_grad() cost.backward() optimizer.step() return cost
Example #5
Source File: crnn_main.py From crnn with MIT License | 6 votes |
def trainBatch(net, criterion, optimizer): data = train_iter.next() cpu_images, cpu_texts = data batch_size = cpu_images.size(0) utils.loadData(image, cpu_images) t, l = converter.encode(cpu_texts) utils.loadData(text, t) utils.loadData(length, l) preds = crnn(image) preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size)) cost = criterion(preds, text, preds_size, length) / batch_size crnn.zero_grad() cost.backward() optimizer.step() return cost
Example #6
Source File: train_pytorch_ctc.py From ocr.pytorch with MIT License | 6 votes |
def trainBatch(net, criterion, optimizer): data = train_iter.next() cpu_images, cpu_texts = data batch_size = cpu_images.size(0) image = cpu_images.to(device) text, length = converter.encode(cpu_texts) # utils.loadData(text, t) # utils.loadData(length, l) preds = net(image) # seqLength x batchSize x alphabet_size preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size)) # seqLength x batchSize cost = criterion(preds.log_softmax(2).cpu(), text, preds_size, length) / batch_size if torch.isnan(cost): print(batch_size,cpu_texts) else: net.zero_grad() cost.backward() optimizer.step() return cost
Example #7
Source File: train_warp_ctc_v2.py From ocr.pytorch with MIT License | 6 votes |
def trainBatch(net, criterion, optimizer): data = train_iter.next() cpu_images, cpu_texts = data batch_size = cpu_images.size(0) image = cpu_images.to(device) text, length = converter.encode(cpu_texts) # utils.loadData(text, t) # utils.loadData(length, l) preds = net(image) # seqLength x batchSize x alphabet_size preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size)) # seqLength x batchSize cost = criterion(preds, text, preds_size, length) / batch_size if torch.isnan(cost): print(batch_size,cpu_texts) else: net.zero_grad() cost.backward() optimizer.step() return cost
Example #8
Source File: train_warp_ctc.py From ocr.pytorch with MIT License | 6 votes |
def trainBatch(net, criterion, optimizer): data = train_iter.next() cpu_images, cpu_texts = data batch_size = cpu_images.size(0) image = cpu_images.to(device) text, length = converter.encode(cpu_texts) # utils.loadData(text, t) # utils.loadData(length, l) preds = net(image) # seqLength x batchSize x alphabet_size preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size)) # seqLength x batchSize cost = criterion(preds, text, preds_size, length) / batch_size if torch.isnan(cost): print(batch_size,cpu_texts) else: net.zero_grad() cost.backward() optimizer.step() return cost
Example #9
Source File: train.py From PAN-PSEnet with Apache License 2.0 | 6 votes |
def trainBatch(net, criterion, optimizer): data = train_iter.next() cpu_images, cpu_texts = data batch_size = cpu_images.size(0) utils.loadData(image, cpu_images) t, l = converter.encode(cpu_texts) utils.loadData(text, t) utils.loadData(length, l) optimizer.zero_grad() preds = crnn(image) preds = preds.log_softmax(2) preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size)) cost = criterion(preds, text, preds_size, length) # crnn.zero_grad() cost.backward() optimizer.step() return cost
Example #10
Source File: train.py From crnn-pytorch with MIT License | 6 votes |
def train(net, criterion, optimizer, train_iter): for p in crnn.parameters(): p.requires_grad = True crnn.train() data = train_iter.next() cpu_images, cpu_texts = data batch_size = cpu_images.size(0) utils.loadData(image, cpu_images) t, l = converter.encode(cpu_texts) utils.loadData(text, t) utils.loadData(length, l) optimizer.zero_grad() preds = crnn(image) preds_size = Variable(torch.LongTensor([preds.size(0)] * batch_size)) cost = criterion(preds, text, preds_size, length) / batch_size # crnn.zero_grad() cost.backward() optimizer.step() return cost
Example #11
Source File: train.py From ICDAR-2019-SROIE with MIT License | 6 votes |
def trainBatch(net, criterion, optimizer): data = train_iter.next() cpu_images, cpu_texts = data batch_size = cpu_images.size(0) utils.loadData(image, cpu_images) t, l = converter.encode(cpu_texts) utils.loadData(text, t) utils.loadData(length, l) preds = crnn(image) preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size)) cost = criterion(preds, text, preds_size, length) / batch_size crnn.zero_grad() cost.backward() optimizer.step() return cost
Example #12
Source File: train.py From efficientdensenet_crnn with MIT License | 5 votes |
def trainBatch(net, criterion, optimizer,flage=False): n_correct = 0 train_distance=0 data = train_iter.next() cpu_images, cpu_texts = data##decode utf-8 to unicode if ifUnicode: cpu_texts = [ clean_txt(tx.decode('utf-8')) for tx in cpu_texts] batch_size = cpu_images.size(0) utils.loadData(image, cpu_images) t, l = converter.encode(cpu_texts) utils.loadData(text, t) utils.loadData(length, l) preds = crnn(image) preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size)) cost = criterion(preds, text, preds_size, length) / batch_size crnn.zero_grad() cost.backward() _, preds = preds.max(2) preds = preds.transpose(1, 0).contiguous().view(-1) sim_preds = converter.decode(preds.data, preds_size.data, raw=False) for pred, target in zip(sim_preds, cpu_texts): if pred.strip() == target.strip(): n_correct += 1 train_distance +=distance.nlevenshtein(pred.strip(),target.strip(),method=2) train_accuracy = n_correct / float(batch_size) train_distance=train_distance/float(batch_size) if flage: lr = 0.0001 optimizer = optim.Adadelta(crnn.parameters(), lr=lr) optimizer.step() return cost,train_accuracy,train_distance
Example #13
Source File: train.py From crnn.pytorch with MIT License | 4 votes |
def val(net, dataset, criterion, max_iter=100): print('Start val') for p in crnn.parameters(): p.requires_grad = False net.eval() data_loader = torch.utils.data.DataLoader( dataset, shuffle=True, batch_size=opt.batchSize, num_workers=int(opt.workers)) val_iter = iter(data_loader) i = 0 n_correct = 0 loss_avg = utils.averager() max_iter = min(max_iter, len(data_loader)) for i in range(max_iter): data = val_iter.next() i += 1 cpu_images, cpu_texts = data batch_size = cpu_images.size(0) utils.loadData(image, cpu_images) t, l = converter.encode(cpu_texts) utils.loadData(text, t) utils.loadData(length, l) preds = crnn(image) preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size)) cost = criterion(preds, text, preds_size, length) / batch_size loss_avg.add(cost) _, preds = preds.max(2) preds = preds.squeeze(2) preds = preds.transpose(1, 0).contiguous().view(-1) sim_preds = converter.decode(preds.data, preds_size.data, raw=False) for pred, target in zip(sim_preds, cpu_texts): if pred == target.lower(): n_correct += 1 raw_preds = converter.decode(preds.data, preds_size.data, raw=True)[:opt.n_test_disp] for raw_pred, pred, gt in zip(raw_preds, sim_preds, cpu_texts): print('%-20s => %-20s, gt: %-20s' % (raw_pred, pred, gt)) accuracy = n_correct / float(max_iter * opt.batchSize) print('Test loss: %f, accuray: %f' % (loss_avg.val(), accuracy))
Example #14
Source File: train.py From ICDAR-2019-SROIE with MIT License | 4 votes |
def val(net, dataset, criterion, max_iter=100): print('Start val') for p in crnn.parameters(): p.requires_grad = False net.eval() data_loader = torch.utils.data.DataLoader( dataset, shuffle=True, batch_size=opt.batchSize, num_workers=int(opt.workers)) val_iter = iter(data_loader) i = 0 n_correct = 0 loss_avg = utils.averager() max_iter = min(max_iter, len(data_loader)) for i in range(max_iter): data = val_iter.next() i += 1 cpu_images, cpu_texts = data batch_size = cpu_images.size(0) utils.loadData(image, cpu_images) t, l = converter.encode(cpu_texts) utils.loadData(text, t) utils.loadData(length, l) preds = crnn(image) # size = 26, 64, 96 # print(preds.size()) preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size)) cost = criterion(preds, text, preds_size, length) / batch_size loss_avg.add(cost) _, preds = preds.max(2) # size = 26, 64 # print(preds.size()) # preds = preds.squeeze(2) preds = preds.transpose(1, 0).contiguous().view(-1) sim_preds = converter.decode(preds.data, preds_size.data, raw=False) for pred, target in zip(sim_preds, cpu_texts): if pred == target.lower(): n_correct += 1 raw_preds = converter.decode(preds.data, preds_size.data, raw=True)[:opt.n_test_disp] for raw_pred, pred, gt in zip(raw_preds, sim_preds, cpu_texts): print('%-30s => %-30s, gt: %-30s' % (raw_pred, pred, gt)) accuracy = n_correct / float(max_iter * opt.batchSize) print('Test loss: %f, accuray: %f' % (loss_avg.val(), accuracy))
Example #15
Source File: train.py From efficientdensenet_crnn with MIT License | 4 votes |
def val(net, test_dataset, criterion, max_iter=2): print('Start val') for p in crnn.parameters(): p.requires_grad = False net.eval() data_loader = torch.utils.data.DataLoader( test_dataset, batch_size=opt.batchSize, num_workers=int(opt.workers), sampler=dataset.randomSequentialSampler(test_dataset, opt.batchSize), collate_fn=dataset.alignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio=opt.keep_ratio)) val_iter = iter(data_loader) i = 0 n_correct = 0 loss_avg = utils.averager() test_distance=0 max_iter = min(max_iter, len(data_loader)) for i in range(max_iter): data = val_iter.next() i += 1 cpu_images, cpu_texts = data batch_size = cpu_images.size(0) utils.loadData(image, cpu_images) if ifUnicode: cpu_texts = [ clean_txt(tx.decode('utf-8')) for tx in cpu_texts] t, l = converter.encode(cpu_texts) utils.loadData(text, t) utils.loadData(length, l) preds = crnn(image) preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size)) cost = criterion(preds, text, preds_size, length) / batch_size loss_avg.add(cost) _, preds = preds.max(2) # preds = preds.squeeze(2) preds = preds.transpose(1, 0).contiguous().view(-1) sim_preds = converter.decode(preds.data, preds_size.data, raw=False) for pred, target in zip(sim_preds, cpu_texts): if pred.strip() == target.strip(): n_correct += 1 # print(distance.levenshtein(pred.strip(), target.strip())) test_distance +=distance.nlevenshtein(pred.strip(), target.strip(),method=2) raw_preds = converter.decode(preds.data, preds_size.data, raw=True)[:opt.n_test_disp] for raw_pred, pred, gt in zip(raw_preds, sim_preds, cpu_texts): print('%-20s => %-20s, gt: %-20s' % (raw_pred, pred, gt)) accuracy = n_correct / float(max_iter * opt.batchSize) test_distance=test_distance/float(max_iter * opt.batchSize) testLoss = loss_avg.val() #print('Test loss: %f, accuray: %f' % (testLoss, accuracy)) return testLoss,accuracy,test_distance
Example #16
Source File: crnn_main.py From crnn with MIT License | 4 votes |
def val(net, dataset, criterion, max_iter=100): print('Start val') for p in crnn.parameters(): p.requires_grad = False net.eval() data_loader = torch.utils.data.DataLoader( dataset, shuffle=True, batch_size=opt.batchSize, num_workers=int(opt.workers)) val_iter = iter(data_loader) i = 0 n_correct = 0 loss_avg = utils.averager() max_iter = min(max_iter, len(data_loader)) for i in range(max_iter): data = val_iter.next() i += 1 cpu_images, cpu_texts = data batch_size = cpu_images.size(0) utils.loadData(image, cpu_images) t, l = converter.encode(cpu_texts) utils.loadData(text, t) utils.loadData(length, l) preds = crnn(image) preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size)) cost = criterion(preds, text, preds_size, length) / batch_size loss_avg.add(cost) _, preds = preds.max(2) preds = preds.squeeze(2) preds = preds.transpose(1, 0).contiguous().view(-1) sim_preds = converter.decode(preds.data, preds_size.data, raw=False) for pred, target in zip(sim_preds, cpu_texts): if pred == target: n_correct += 1 raw_preds = converter.decode(preds.data, preds_size.data, raw=True)[:opt.n_test_disp] for raw_pred, pred, gt in zip(raw_preds, sim_preds, cpu_texts): print('%-20s => %-20s, gt: %-20s' % (raw_pred.encode('utf-8'), pred.encode('utf-8'), gt.encode('utf-8'))) accuracy = n_correct / float(max_iter * opt.batchSize) print('Test loss: %f, accuray: %f' % (loss_avg.val(), accuracy))
Example #17
Source File: train.py From PAN-PSEnet with Apache License 2.0 | 4 votes |
def val(net, dataset, criterion, max_iter=100): #print('Start val') for p in crnn.parameters(): p.requires_grad = False net.eval() data_loader = torch.utils.data.DataLoader(dataset, shuffle=True, batch_size=params.batchSize, num_workers=int(params.workers)) val_iter = iter(data_loader) i = 0 n_correct = 0 loss_avg = utils.averager() max_iter = min(max_iter, len(data_loader)) for i in range(max_iter): data = val_iter.next() i += 1 cpu_images, cpu_texts = data batch_size = cpu_images.size(0) utils.loadData(image, cpu_images) t, l = converter.encode(cpu_texts) utils.loadData(text, t) utils.loadData(length, l) preds = crnn(image) preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size)) cost = criterion(preds, text, preds_size, length) loss_avg.add(cost) _, preds = preds.max(2) preds = preds.transpose(1, 0).contiguous().view(-1) sim_preds = converter.decode(preds.data, preds_size.data, raw=False) #cpu_texts_decode = [] #for i in cpu_texts: # cpu_texts_decode.append(i.decode('utf-8', 'strict')) for pred, target in zip(sim_preds, cpu_texts): if pred == target: n_correct += 1 raw_preds = converter.decode(preds.data, preds_size.data, raw=True)[:params.n_test_disp] for raw_pred, pred, gt in zip(raw_preds, sim_preds, cpu_texts): print('%-20s => %-20s, gt: %-20s' % (raw_pred, pred, gt)) accuracy = n_correct / float(max_iter * params.batchSize) print('loss: %f, accuray: %f' % (loss_avg.val(), accuracy)) return loss_avg.val(), accuracy
Example #18
Source File: crnn_main.py From ctpn-crnn with MIT License | 4 votes |
def val(net, dataset, criterion, max_iter=100): print('Start val') for p in crnn.parameters(): p.requires_grad = False net.eval() data_loader = torch.utils.data.DataLoader( dataset, shuffle=True, batch_size=params.batchSize, num_workers=int(params.workers)) val_iter = iter(data_loader) i = 0 n_correct = 0 loss_avg = utils.averager() max_iter = min(max_iter, len(data_loader)) for i in range(max_iter): data = val_iter.next() i += 1 cpu_images, cpu_texts = data batch_size = cpu_images.size(0) utils.loadData(image, cpu_images) t, l = converter.encode(cpu_texts) utils.loadData(text, t) utils.loadData(length, l) preds = crnn(image) # print('-----preds-----') # print(preds) preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size)) # print('-----preds_size-----') # print(preds_size) cost = criterion(preds, text, preds_size, length) / batch_size loss_avg.add(cost) _, preds = preds.max(2) # print('-----preds.max(2)-----') # print(preds) preds = preds.transpose(1, 0).contiguous().view(-1) # print('-----preds.transpose(1, 0)-----') # print(preds) sim_preds = converter.decode(preds.data, preds_size.data, raw=False) list_1 = [] for m in cpu_texts: list_1.append(m.decode('utf-8', 'strict')) # if (i - 1) % 10 == 0: # print('-----sim_preds-----list_1-----') # print(sim_preds, list_1) for pred, target in zip(sim_preds, list_1): if pred == target: n_correct += 1 # else: # print('%-20s, gt: %-20s' % (pred, target)) raw_preds = converter.decode(preds.data, preds_size.data, raw=True)[:params.n_test_disp] for raw_pred, pred, gt in zip(raw_preds, sim_preds, list_1): print('%-20s => %-20s, gt: %-20s' % (raw_pred, pred, gt)) print(n_correct) print(max_iter * params.batchSize) accuracy = n_correct / float(max_iter * params.batchSize) print('Test loss: %f, accuray: %f' % (loss_avg.val(), accuracy))
Example #19
Source File: crnn_main.py From basicOCR with GNU General Public License v3.0 | 4 votes |
def val(net, dataset, criterion, max_iter=100): print('Start val') for p in crnn.parameters(): p.requires_grad = False net.eval() data_loader = torch.utils.data.DataLoader( dataset, shuffle=True, batch_size=opt.batchSize, num_workers=int(opt.workers)) val_iter = iter(data_loader) i = 0 n_correct = 0 loss_avg = utils.averager() max_iter = min(max_iter, len(data_loader)) for i in range(max_iter): data = val_iter.next() i += 1 cpu_images, cpu_texts = data batch_size = cpu_images.size(0) utils.loadData(image, cpu_images) t, l = converter.encode(cpu_texts) utils.loadData(text, t) utils.loadData(length, l) preds = crnn(image) preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size)) cost = criterion(preds, text, preds_size, length) / batch_size loss_avg.add(cost) _, preds = preds.max(2) preds = preds.squeeze(2) preds = preds.transpose(1, 0).contiguous().view(-1) sim_preds = converter.decode(preds.data, preds_size.data, raw=False) for pred, target in zip(sim_preds, cpu_texts): if pred == target: n_correct += 1 raw_preds = converter.decode(preds.data, preds_size.data, raw=True)[:opt.n_test_disp] for raw_pred, pred, gt in zip(raw_preds, sim_preds, cpu_texts): print('%-20s => %-20s, gt: %-20s' % (raw_pred.encode('utf-8'), pred.encode('utf-8'), gt.encode('utf-8'))) accuracy = n_correct / float(max_iter * opt.batchSize) print('Test loss: %f, accuray: %f' % (loss_avg.val(), accuracy))
Example #20
Source File: train.py From crnn-pytorch with MIT License | 4 votes |
def val(net, criterion): print('Start val') for p in crnn.parameters(): p.requires_grad = False net.eval() val_iter = iter(val_loader) i = 0 n_correct = 0 loss_avg = utils.averager() # The blobal loss_avg is used by train max_iter = len(val_loader) for i in range(max_iter): data = val_iter.next() i += 1 cpu_images, cpu_texts = data batch_size = cpu_images.size(0) utils.loadData(image, cpu_images) t, l = converter.encode(cpu_texts) utils.loadData(text, t) utils.loadData(length, l) preds = crnn(image) preds_size = Variable(torch.LongTensor([preds.size(0)] * batch_size)) cost = criterion(preds, text, preds_size, length) / batch_size loss_avg.add(cost) _, preds = preds.max(2) preds = preds.transpose(1, 0).contiguous().view(-1) sim_preds = converter.decode(preds.data, preds_size.data, raw=False) cpu_texts_decode = [] for i in cpu_texts: cpu_texts_decode.append(i.decode('utf-8', 'strict')) for pred, target in zip(sim_preds, cpu_texts_decode): if pred == target: n_correct += 1 raw_preds = converter.decode(preds.data, preds_size.data, raw=True)[:params.n_val_disp] for raw_pred, pred, gt in zip(raw_preds, sim_preds, cpu_texts_decode): print('%-20s => %-20s, gt: %-20s' % (raw_pred, pred, gt)) accuracy = n_correct / float(max_iter * params.batchSize) print('Val loss: %f, accuray: %f' % (loss_avg.val(), accuracy))
Example #21
Source File: crnn_main_seg.py From enctc.crnn with MIT License | 4 votes |
def val(net, dataset, max_iter=100): print('Start val') for p in crnn.parameters(): p.requires_grad = False net.eval() data_loader = torch.utils.data.DataLoader( dataset, shuffle=True, batch_size=opt.batchSize, num_workers=int(opt.workers)) val_iter = iter(data_loader) i = 0 n_correct = 0 # loss averager avg_h_val = utils.averager() avg_cost_val = utils.averager() avg_h_cost_val = utils.averager() if opt.eval_all: max_iter = len(data_loader) else: max_iter = min(max_iter, len(data_loader)) for i in range(max_iter): data = val_iter.next() i += 1 cpu_images, cpu_texts = data batch_size = cpu_images.size(0) utils.loadData(image, cpu_images) t, l = converter.encode(cpu_texts) utils.loadData(text, t) utils.loadData(length, l) preds = crnn(image) preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size)) H, cost = seg_ctc_ent_cost(preds, text, preds_size, length, uni_rate=opt.uni_rate) h_cost = (1-opt.h_rate)*cost-opt.h_rate*H avg_h_val.add(H / batch_size) avg_cost_val.add(cost / batch_size) avg_h_cost_val.add(h_cost / batch_size) _, preds = preds.max(2) preds = preds.transpose(1, 0).contiguous().view(-1) sim_preds = converter.decode(preds.data, preds_size.data, raw=False) for idx, (pred, target) in enumerate(zip(sim_preds, cpu_texts)): if pred == target.lower(): n_correct += 1 raw_preds = converter.decode(preds.data, preds_size.data, raw=True)[:opt.n_test_disp] for raw_pred, pred, gt in zip(raw_preds, sim_preds, cpu_texts): print('%-20s => %-20s, gt: %-20s' % (raw_pred, pred, gt)) accuracy = n_correct / float(max_iter * opt.batchSize) print('Test H: %f, Cost: %f, H Cost: %f, accuray: %f' % (avg_h_val.val(), avg_cost_val.val(), avg_h_cost_val.val(), accuracy))