Python chainer.functions.accuracy() Examples
The following are 30
code examples of chainer.functions.accuracy().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.functions
, or try the search function
.
Example #1
Source File: mdl_rgb_d.py From MultimodalDL with MIT License | 6 votes |
def __call__(self, x, t): self.clear() h = F.max_pooling_2d(F.relu( F.local_response_normalization(self.conv1(x))), 3, stride=2) h = F.max_pooling_2d(F.relu( F.local_response_normalization(self.conv2(h))), 3, stride=2) h = F.relu(self.conv3(h)) h = F.relu(self.conv4(h)) h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2) h = F.dropout(F.relu(self.fc6(h)), train=self.train) h = F.dropout(F.relu(self.fc7(h)), train=self.train) h = self.fc8(h) self.loss = F.softmax_cross_entropy(h, t) self.accuracy = F.accuracy(h, t) return self.loss
Example #2
Source File: copy_transformer_updater.py From models with MIT License | 6 votes |
def update_net(self): batch = next(self.get_iterator('main')) batch = self.converter(batch, self.device) optimizer = self.get_optimizer('main') net = optimizer.target # for training we need one label less, since we right shift the output of the network predictions = net(batch['data'], batch['label'][:, :-1]) batch_size, num_steps, vocab_size = predictions.shape predictions = F.reshape(predictions, (-1, vocab_size)) labels = batch['label'][:, 1:].ravel() loss = F.softmax_cross_entropy(predictions, labels) accuracy = F.accuracy(F.softmax(predictions), labels) net.cleargrads() loss.backward() optimizer.update() chainer.reporter.report({ "loss": loss, "train/accuracy": accuracy })
Example #3
Source File: fcn8s.py From portrait_matting with GNU General Public License v3.0 | 6 votes |
def __call__(self, x, t=None): score = self.forward(x) if t is None: assert not chainer.config.train return loss = F.softmax_cross_entropy(score, t, normalize=True) if np.isnan(float(loss.data)): raise ValueError('Loss is nan.') chainer.report({'loss': loss}, self) accuracy = F.accuracy(score, t) chainer.report({'accuracy': accuracy}, self) return loss
Example #4
Source File: pointnet_cls.py From chainer-pointnet with MIT License | 6 votes |
def __call__(self, x, t): h, t1, t2 = self.calc(x) cls_loss = functions.softmax_cross_entropy(h, t) reporter.report({'cls_loss': cls_loss}, self) loss = cls_loss # Enforce the transformation as orthogonal matrix if self.trans and self.trans_lam1 >= 0: trans_loss1 = self.trans_lam1 * calc_trans_loss(t1) reporter.report({'trans_loss1': trans_loss1}, self) loss = loss + trans_loss1 if self.trans and self.trans_lam2 >= 0: trans_loss2 = self.trans_lam2 * calc_trans_loss(t2) reporter.report({'trans_loss2': trans_loss2}, self) loss = loss + trans_loss2 reporter.report({'loss': loss}, self) if self.compute_accuracy: acc = functions.accuracy(h, t) reporter.report({'accuracy': acc}, self) return loss
Example #5
Source File: transformer_text_updater.py From kiss with GNU General Public License v3.0 | 6 votes |
def update_recognizer(self): recognizer_optimizer = self.get_optimizer('opt_rec') batch = next(self.get_iterator('main')) batch = self.converter(batch, self.device) recognizer_output = self.recognizer( batch['image'], batch['words'].squeeze() ) loss = self.recognizer.calc_loss(recognizer_output, batch['words']) batch_size, num_chars, num_classes = recognizer_output.shape recognizer_output = F.reshape(recognizer_output, (-1, num_classes)) char_accuracy = F.accuracy(F.softmax(recognizer_output, axis=1), batch['words'].ravel()) self.recognizer.cleargrads() loss.backward() recognizer_optimizer.update() recognizer_losses = { 'loss': loss, 'char_accuracy': char_accuracy, } return recognizer_losses
Example #6
Source File: test_accuracy.py From chainer with MIT License | 6 votes |
def accuracy(x, t, ignore_label): x_ = numpy.rollaxis(x, 1, x.ndim).reshape(t.size, -1) t_ = t.ravel() if ignore_label is not None: count = 0 for i in six.moves.range(t_.size): pred = x_[i].argmax() if t_[i] != ignore_label and pred == t_[i]: count += 1 total = (t_ != ignore_label).sum() else: count = 0 for i in six.moves.range(t_.size): pred = x_[i].argmax() if pred == t_[i]: count += 1 total = t_.size if total == 0: return 0.0 else: return float(count) / total
Example #7
Source File: alex.py From chainer-compiler with MIT License | 6 votes |
def forward(self, x, t): h = F.max_pooling_2d(F.local_response_normalization( F.relu(self.conv1(x))), 3, stride=2) h = F.max_pooling_2d(F.local_response_normalization( F.relu(self.conv2(h))), 3, stride=2) h = F.relu(self.conv3(h)) h = F.relu(self.conv4(h)) h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2) h = F.dropout(F.relu(self.fc6(h))) h = F.dropout(F.relu(self.fc7(h))) h = self.fc8(h) # EDIT(hamaji): ONNX-chainer cannot output SoftmaxCrossEntropy. # loss = F.softmax_cross_entropy(h, t) loss = self.softmax_cross_entropy(h, t) if self.compute_accuracy: chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self) else: chainer.report({'loss': loss}, self) return loss
Example #8
Source File: resnet50.py From chainer-compiler with MIT License | 6 votes |
def forward(self, x, t): h = self.bn1(self.conv1(x)) h = F.max_pooling_2d(F.relu(h), 3, stride=2) h = self.res2(h) h = self.res3(h) h = self.res4(h) h = self.res5(h) h = F.average_pooling_2d(h, 7, stride=1) h = self.fc(h) #loss = F.softmax_cross_entropy(h, t) loss = self.softmax_cross_entropy(h, t) if self.compute_accuracy: chainer.report({'loss': loss, 'accuracy': F.accuracy(h, np.argmax(t, axis=1))}, self) else: chainer.report({'loss': loss}, self) return loss
Example #9
Source File: Alex.py From chainer-compiler with MIT License | 6 votes |
def forward(self, x, t): # def forward(self, x): h = F.max_pooling_2d(F.local_response_normalization( F.relu(self.conv1(x))), 3, stride=2) h = F.max_pooling_2d(F.local_response_normalization( F.relu(self.conv2(h))), 3, stride=2) h = F.relu(self.conv3(h)) h = F.relu(self.conv4(h)) h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2) h = F.dropout(F.relu(self.fc6(h))) h = F.dropout(F.relu(self.fc7(h))) h = self.fc8(h) loss = F.softmax_cross_entropy(h, t) #loss = h # chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self) return loss
Example #10
Source File: test_classifier.py From chainer-chemistry with MIT License | 6 votes |
def test_report_key(self, metrics_fun, compute_metrics): repo = chainer.Reporter() link = Classifier(predictor=DummyPredictor(), metrics_fun=metrics_fun) link.compute_metrics = compute_metrics repo.add_observer('target', link) with repo: observation = {} with reporter.report_scope(observation): link(self.x, self.t) # print('observation ', observation) actual_keys = set(observation.keys()) if compute_metrics: if metrics_fun is None: assert set(['target/loss']) == actual_keys elif isinstance(metrics_fun, dict): assert set(['target/loss', 'target/user_key']) == actual_keys elif callable(metrics_fun): assert set(['target/loss', 'target/accuracy']) == actual_keys else: raise TypeError() else: assert set(['target/loss']) == actual_keys
Example #11
Source File: Alex_with_loss.py From chainer-compiler with MIT License | 6 votes |
def forward(self, x, t): # def forward(self, x): h = F.max_pooling_2d(F.local_response_normalization( F.relu(self.conv1(x))), 3, stride=2) h = F.max_pooling_2d(F.local_response_normalization( F.relu(self.conv2(h))), 3, stride=2) h = F.relu(self.conv3(h)) h = F.relu(self.conv4(h)) h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2) h = F.dropout(F.relu(self.fc6(h))) h = F.dropout(F.relu(self.fc7(h))) h = self.fc8(h) loss = F.softmax_cross_entropy(h, t) #loss = h # chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self) return loss # from https://github.com/chainer/chainer/blob/master/examples/imagenet/alex.py
Example #12
Source File: ResNet.py From gconv_experiments with MIT License | 6 votes |
def __call__(self, x, t, train=True, finetune=False): h = x # First conv layer h = self[0](h) # Residual blocks for i in range(1, len(self) - 2): h = self[i](h, train, finetune) # BN, relu, pool, final layer h = self[-2](h) h = F.relu(h) h = F.average_pooling_2d(h, ksize=h.data.shape[2:]) h = self[-1](h) h = F.reshape(h, h.data.shape[:2]) return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
Example #13
Source File: P4MAllCNNC.py From gconv_experiments with MIT License | 6 votes |
def __call__(self, x, t, train=True, finetune=False): h = x h = F.dropout(h, ratio=0.2, train=train) h = self.l1(h, train, finetune) h = self.l2(h, train, finetune) h = self.l3(h, train, finetune) h = F.dropout(h, ratio=0.5, train=train) h = self.l4(h, train, finetune) h = self.l5(h, train, finetune) h = self.l6(h, train, finetune) h = F.dropout(h, ratio=0.5, train=train) h = self.l7(h, train, finetune) h = self.l8(h, train, finetune) h = self.l9(h, train, finetune) h = F.sum(h, axis=-1) h = F.sum(h, axis=-1) h = F.sum(h, axis=-1) h /= 8 * 8 * 8 return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
Example #14
Source File: AllCNNC.py From gconv_experiments with MIT License | 6 votes |
def __call__(self, x, t, train=True, finetune=False): h = x h = F.dropout(h, ratio=0.2, train=train) h = self.l1(h, train, finetune) h = self.l2(h, train, finetune) h = self.l3(h, train, finetune) h = F.dropout(h, ratio=0.5, train=train) h = self.l4(h, train, finetune) h = self.l5(h, train, finetune) h = self.l6(h, train, finetune) h = F.dropout(h, ratio=0.5, train=train) h = self.l7(h, train, finetune) h = self.l8(h, train, finetune) h = self.l9(h, train, finetune) h = F.sum(h, axis=-1) h = F.sum(h, axis=-1) h /= 8 * 8 return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
Example #15
Source File: P4MResNet.py From gconv_experiments with MIT License | 6 votes |
def __call__(self, x, t, train=True, finetune=False): # First conv layer h = self[0](x) # Residual blocks for i in range(1, len(self) - 2): h = self[i](h, train, finetune) # BN, relu, pool, final layer h = self[-2](h) h = F.relu(h) n, nc, ns, nx, ny = h.data.shape h = F.reshape(h, (n, nc * ns, nx, ny)) h = F.average_pooling_2d(h, ksize=h.data.shape[2:]) h = self[-1](h) h = F.reshape(h, h.data.shape[:2]) return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
Example #16
Source File: P4AllCNNC.py From gconv_experiments with MIT License | 6 votes |
def __call__(self, x, t, train=True, finetune=False): h = x h = F.dropout(h, ratio=0.2, train=train) h = self.l1(h, train, finetune) h = self.l2(h, train, finetune) h = self.l3(h, train, finetune) h = F.dropout(h, ratio=0.5, train=train) h = self.l4(h, train, finetune) h = self.l5(h, train, finetune) h = self.l6(h, train, finetune) h = F.dropout(h, ratio=0.5, train=train) h = self.l7(h, train, finetune) h = self.l8(h, train, finetune) h = self.l9(h, train, finetune) h = F.sum(h, axis=-1) h = F.sum(h, axis=-1) h = F.sum(h, axis=-1) h /= 8 * 8 * 4 return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
Example #17
Source File: pointnet2_cls_ssg.py From chainer-pointnet with MIT License | 5 votes |
def __call__(self, x, t): h = self.calc(x) cls_loss = functions.softmax_cross_entropy(h, t) # reporter.report({'cls_loss': cls_loss}, self) loss = cls_loss reporter.report({'loss': loss}, self) if self.compute_accuracy: acc = functions.accuracy(h, t) reporter.report({'accuracy': acc}, self) return loss
Example #18
Source File: pointnet2_seg_ssg.py From chainer-pointnet with MIT License | 5 votes |
def __call__(self, x, t): h = self.calc(x) bs, ch, n = h.shape h = functions.reshape(functions.transpose(h, (0, 2, 1)), (bs * n, ch)) t = functions.reshape(t, (bs * n,)) cls_loss = functions.softmax_cross_entropy(h, t) # reporter.report({'cls_loss': cls_loss}, self) loss = cls_loss reporter.report({'loss': loss}, self) if self.compute_accuracy: acc = functions.accuracy(h, t) reporter.report({'accuracy': acc}, self) return loss
Example #19
Source File: trainer.py From GUINNESS with GNU General Public License v2.0 | 5 votes |
def __forward(self, batch_x, batch_t, train=True): xp = self.xp x = Variable(xp.asarray(batch_x), volatile=not train) t = Variable(xp.asarray(batch_t), volatile=not train) y = self.net(x, train=train) # print(type(y.data)) # print(type(t.data)) loss = F.softmax_cross_entropy(y, t) acc = F.accuracy(y, t) return loss, acc
Example #20
Source File: mdl_rgb_d.py From MultimodalDL with MIT License | 5 votes |
def clear(self): self.loss = None self.accuracy = None
Example #21
Source File: ram.py From ram with MIT License | 5 votes |
def __call__(self, x, t, train=True): x = chainer.Variable(self.xp.asarray(x), volatile=not train) t = chainer.Variable(self.xp.asarray(t), volatile=not train) bs = x.data.shape[0] # batch size self.clear(bs, train) # init mean location l = np.random.uniform(-1, 1, size=(bs,2)).astype(np.float32) l = chainer.Variable(self.xp.asarray(l), volatile=not train) # forward n_steps time sum_ln_pi = 0 self.forward(x, train, action=False, init_l=l) for i in range(1, self.n_steps): action = True if (i == self.n_steps - 1) else False l, ln_pi, y, b = self.forward(x, train, action) if train: sum_ln_pi += ln_pi # loss with softmax cross entropy self.loss_action = F.softmax_cross_entropy(y, t) self.loss = self.loss_action self.accuracy = F.accuracy(y, t) if train: # reward conditions = self.xp.argmax(y.data, axis=1) == t.data r = self.xp.where(conditions, 1., 0.).astype(np.float32) # squared error between reward and baseline self.loss_base = F.mean_squared_error(r, b) self.loss += self.loss_base # loss with reinforce rule mean_ln_pi = sum_ln_pi / (self.n_steps - 1) self.loss_reinforce = F.sum(-mean_ln_pi * (r-b))/bs self.loss += self.loss_reinforce return self.loss
Example #22
Source File: ram.py From ram with MIT License | 5 votes |
def clear(self, bs, train): self.loss = None self.accuracy = None # init internal state of core RNN if self.use_lstm: self.core_lstm.reset_state() else: self.h = self.xp.zeros(shape=(bs,self.d_core), dtype=np.float32) self.h = chainer.Variable(self.h, volatile=not train)
Example #23
Source File: modeling.py From models with MIT License | 5 votes |
def compute_loss(self, input_ids, input_mask, token_type_ids, start_positions, end_positions): (start_logits, end_logits) = self.__call__( input_ids, input_mask, token_type_ids) start_loss = F.softmax_cross_entropy(start_logits, start_positions) end_loss = F.softmax_cross_entropy(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2.0 chainer.report({'loss': total_loss.array}, self) accuracy = (check_answers(start_logits, start_positions) * check_answers(end_logits, end_positions, start_positions)).mean() chainer.report({'accuracy': accuracy}, self) return total_loss
Example #24
Source File: modeling.py From models with MIT License | 5 votes |
def __call__(self, input_ids, input_mask, token_type_ids, labels): output_layer = self.bert.get_pooled_output( input_ids, input_mask, token_type_ids) output_layer = F.dropout(output_layer, 0.1) logits = self.output(output_layer) loss = F.softmax_cross_entropy(logits, labels) chainer.report({'loss': loss.array}, self) chainer.report({'accuracy': F.accuracy(logits, labels)}, self) return loss # For showing SQuAD accuracy with heuristics
Example #25
Source File: images.py From models with MIT License | 5 votes |
def __call__(self, x, t, get_accuracy=False): logit = self.logit(x) loss = F.softmax_cross_entropy(logit, t) acc = F.accuracy(logit, t).item() if get_accuracy: return loss, acc else: return loss
Example #26
Source File: train_mnist.py From see with GNU General Public License v3.0 | 5 votes |
def mnist_accuracy(x, t): xp = cuda.get_array_module(x[0].data, t.data) batch_predictions, _, _ = x accuracies = [] for predictions, labels in zip(F.split_axis(batch_predictions, args.timesteps, axis=1), F.separate(t, axis=1)): batch_size, _, num_classes = predictions.data.shape predictions = F.reshape(F.flatten(predictions), (batch_size, num_classes)) accuracies.append(F.accuracy(predictions, labels)) return sum(accuracies) / max(len(accuracies), 1)
Example #27
Source File: multi_accuracy_classifier.py From see with GNU General Public License v3.0 | 5 votes |
def __call__(self, *args): """Computes the loss value for an input and label pair. It also computes accuracy and stores it to the attribute. Args: args (list of ~chainer.Variable): Input minibatch. The all elements of ``args`` but last one are features and the last element corresponds to ground truth labels. It feeds features to the predictor and compare the result with ground truth labels. Returns: ~chainer.Variable: Loss value. """ assert len(args) >= 2 x = args[:-1] t = args[-1] self.y = None self.loss = None if self.provide_label_during_forward: self.y = self.predictor(*x, t) else: self.y = self.predictor(*x) self.loss = self.lossfun(self.y, t) reporter.report({'loss': self.loss}, self) if self.compute_accuracy: reported_accuracies = self.accfun(self.y, t) if len(self.accuracy_types) == 1: reported_accuracies = reported_accuracies, report = {accuracy_type: reported_accuracy for accuracy_type, reported_accuracy in zip(self.accuracy_types, reported_accuracies)} reporter.report(report, self) return self.loss
Example #28
Source File: multi_accuracy_classifier.py From see with GNU General Public License v3.0 | 5 votes |
def __init__(self, predictor, accuracy_types, lossfun=softmax_cross_entropy.softmax_cross_entropy, accfun=accuracy, provide_label_during_forward=False): super(Classifier, self).__init__(predictor, lossfun=lossfun, accfun=accfun) assert type(accuracy_types) is tuple, "accuracy_types must be a tuple of strings" self.accuracy_types = accuracy_types self.provide_label_during_forward = provide_label_during_forward
Example #29
Source File: lstm_per_step_metrics.py From see with GNU General Public License v3.0 | 5 votes |
def calc_accuracy(self, x, t): batch_predictions, _, _ = x self.xp = cuda.get_array_module(batch_predictions[0], t) accuracies = [] for prediction, label in zip(batch_predictions, F.separate(t, axis=1)): recognition_accuracy = F.accuracy(prediction, label) accuracies.append(recognition_accuracy) return sum(accuracies) / len(accuracies)
Example #30
Source File: test_accuracy.py From chainer with MIT License | 5 votes |
def forward_expected(self, inputs): x, t = inputs expected = accuracy(x, t, self.ignore_label) expected = force_array(expected, self.dtype) return expected,