Python chainer.functions.sigmoid_cross_entropy() Examples
The following are 17
code examples of chainer.functions.sigmoid_cross_entropy().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.functions
, or try the search function
.
Example #1
Source File: MyUpdater.py From HFT-CNN with MIT License | 6 votes |
def update_core(self): batch = self._iterators['main'].next() x = chainer.cuda.to_gpu(np.array([i[0] for i in batch])) labels = [l[1] for l in batch] row_idx, col_idx, val_idx = [], [], [] for i in range(len(labels)): l_list = list(set(labels[i])) for y in l_list: row_idx.append(i) col_idx.append(y) val_idx.append(1) m = len(labels) n = self.class_dim t = sp.csr_matrix((val_idx, (row_idx, col_idx)), shape=(m, n), dtype=np.int8).todense() t = chainer.cuda.to_gpu(t) optimizer = self._optimizers['main'] optimizer.target.cleargrads() loss = F.sigmoid_cross_entropy(optimizer.target(x), t) chainer.reporter.report({'main/loss':loss}) loss.backward() optimizer.update()
Example #2
Source File: test_sigmoid_cross_entropy.py From chainer with MIT License | 6 votes |
def check_forward_no_reduction(self, x_data, t_data): x_val = chainer.Variable(x_data) t_val = chainer.Variable(t_data) loss = functions.sigmoid_cross_entropy( x_val, t_val, self.normalize, reduce='no') self.assertEqual(loss.data.shape, self.x.shape) self.assertEqual(loss.data.dtype, self.dtype) loss_value = cuda.to_cpu(loss.data) # Compute expected value if not getattr(self, 'ignore_all', False): for i in six.moves.range(self.x.shape[0]): for j in six.moves.range(self.x.shape[1]): xd, td = self.x[i, j], self.t[i, j] if td == -1: loss_expect = 0 else: loss_expect = -( xd * (td - (xd >= 0)) - math.log(1 + math.exp(-numpy.abs(xd)))) self.assertAlmostEqual( loss_expect, loss_value[i, j], places=self.places)
Example #3
Source File: updater.py From Guided-Attention-Inference-Network with MIT License | 6 votes |
def update_core(self): image, labels = self.converter(self.get_iterator('main').next()) assert image.shape[0] == 1, "Batchsize of only 1 is allowed for now" image = Variable(image) if self.device >= 0: image.to_gpu(self.device) cl_output = self._optimizers['main'].target.classify(image) xp = get_array_module(cl_output.data) target = xp.asarray([[0]*(self.no_of_classes)]*cl_output.shape[0]) for i in range(labels.shape[0]): gt_labels = np.unique(labels[i]).astype(np.int32)[2:] - 1 # Not considering -1 & 0 target[i][gt_labels] = 1 loss = F.sigmoid_cross_entropy(cl_output, target, normalize=True) report({'Loss':loss}, self.get_optimizer('main').target) self._optimizers['main'].target.cleargrads() loss.backward() self._optimizers['main'].update()
Example #4
Source File: test_sigmoid_cross_entropy.py From chainer with MIT License | 6 votes |
def check_double_backward(self, x_data, t_data, y_grad, gx_grad, normalize=True, reduce='mean'): # Skip too large case. That requires a long time. if self.shape[0] == 65536: return if reduce == 'mean': y_grad = utils.force_array(y_grad.sum()) def f(x, t): return chainer.functions.sigmoid_cross_entropy( x, t, normalize=normalize, reduce=reduce) gradient_check.check_double_backward( f, (x_data, t_data), y_grad, (gx_grad,), **self.check_double_backward_options)
Example #5
Source File: multi_label_classifier.py From models with MIT License | 6 votes |
def __call__(self, x, labels): x = BatchTransform(self.model.mean)(x) x = self.xp.array(x) scores = self.model(x) B, n_class = scores.shape[:2] one_hot_labels = self.xp.zeros((B, n_class), dtype=np.int32) for i, label in enumerate(labels): one_hot_labels[i, label] = 1 # sigmoid_cross_entropy normalizes the loss # by the size of batch and the number of classes. # It works better to remove the normalization factor # of the number of classes. loss = self.loss_scale * F.sigmoid_cross_entropy( scores, one_hot_labels) result = calc_accuracy(scores, labels) reporter.report({'loss': loss}, self) reporter.report({'accuracy': result['accuracy']}, self) reporter.report({'n_pred': result['n_pred']}, self) reporter.report({'n_pos': result['n_pos']}, self) return loss
Example #6
Source File: test_loss.py From chainer with MIT License | 5 votes |
def forward_chainerx(self, inputs): x, = inputs # TODO(aksub99): Improve implementation to avoid non-differentiability # wrt targets t = self.backend_config.get_array(self.t) out = chainerx.sigmoid_cross_entropy(x, t) return out,
Example #7
Source File: MyEvaluator.py From HFT-CNN with MIT License | 5 votes |
def evaluate(self): iterator = self._iterators['main'] eval_func = self.eval_func or self._targets['main'] if self.eval_hook: self.eval_hook(self) if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) summary = reporter_module.DictSummary() for batch in it: observation = {} with reporter_module.report_scope(observation): row_idx, col_idx, val_idx = [], [], [] x = cuda.to_gpu(np.array([i[0] for i in batch])) labels = [l[1] for l in batch] for i in range(len(labels)): l_list = list(set(labels[i])) for y in l_list: row_idx.append(i) col_idx.append(y) val_idx.append(1) m = len(labels) n = self.class_dim t = sp.csr_matrix((val_idx, (row_idx, col_idx)), shape=(m, n), dtype=np.int8).todense() t = cuda.to_gpu(t) with function.no_backprop_mode(): loss = F.sigmoid_cross_entropy(eval_func(x), t) summary.add({MyEvaluator.default_name + '/main/loss':loss}) summary.add(observation) return summary.compute_mean()
Example #8
Source File: updater.py From Guided-Attention-Inference-Network with MIT License | 5 votes |
def update_core(self): image, labels = self.converter(self.get_iterator('main').next()) image = Variable(image) assert image.shape[0] == 1, "Batchsize of only 1 is allowed for now" if self.device >= 0: image.to_gpu(self.device) xp = get_array_module(image.data) to_substract = np.array((-1, 0)) noise_classes = np.unique(labels[0]).astype(np.int32) target = xp.asarray([[0] * (self.no_of_classes)]) gt_labels = np.setdiff1d(noise_classes, to_substract) - 1 # np.unique(labels[0]).astype(np.int32)[2:] - 1 target[0][gt_labels] = 1 gcam, cl_scores, class_id = self._optimizers['main'].target.stream_cl(image, gt_labels) mask = self._optimizers['main'].target.get_mask(gcam) masked_image = self._optimizers['main'].target.mask_image(image, mask) masked_output = self._optimizers['main'].target.stream_am(masked_image) masked_output = F.sigmoid(masked_output) cl_loss = F.sigmoid_cross_entropy(cl_scores, target, normalize=True) am_loss = masked_output[0][class_id][0] labels = Variable(labels) if self.device >= 0: labels.to_gpu(self.device) segment_loss = self._optimizers['main'].target(image, labels) total_loss = self.lambd1 * cl_loss + self.lambd2 * am_loss + self.lambd3*segment_loss report({'AM_Loss': am_loss}, self.get_optimizer('main').target) report({'CL_Loss': cl_loss}, self.get_optimizer('main').target) report({'SG_Loss': segment_loss}, self.get_optimizer('main').target) report({'TotalLoss': total_loss}, self.get_optimizer('main').target) self._optimizers['main'].target.cleargrads() total_loss.backward() self._optimizers['main'].update()
Example #9
Source File: tgan_updater_vanilla.py From tgan with MIT License | 5 votes |
def update_core(self): xp = self.fsgen.xp fsgen_optimizer = self.get_optimizer('fsgen') vgen_optimizer = self.get_optimizer('vgen') vdis_optimizer = self.get_optimizer('vdis') real_video, fake_video, dis_fake, dis_real = self.forward() batchsize = real_video.shape[0] loss_dis_fake = F.sigmoid_cross_entropy( dis_fake, xp.ones((batchsize, 1, 1, 1), dtype="i")) loss_dis_real = F.sigmoid_cross_entropy( dis_real, xp.zeros((batchsize, 1, 1, 1), dtype="i")) loss_gen = F.sigmoid_cross_entropy( dis_fake, xp.zeros((batchsize, 1, 1, 1), dtype="i")) chainer.report({'loss_dis_fake': loss_dis_fake}, self.vdis) chainer.report({'loss_dis_real': loss_dis_real}, self.vdis) fsgen_optimizer.target.zerograds() vgen_optimizer.target.zerograds() loss_gen.backward() fsgen_optimizer.update() vgen_optimizer.update() fake_video.unchain_backward() vdis_optimizer.target.zerograds() (loss_dis_fake + loss_dis_real).backward() vdis_optimizer.update()
Example #10
Source File: mask_head.py From chainercv with MIT License | 5 votes |
def mask_head_loss_post(segms, mask_roi_indices, gt_segms, gt_mask_labels, batchsize): """Loss function for Mask Head (post). Args: segms (array): An array whose shape is :math:`(R, n\_class, M, M)`, where :math:`R` is the total number of RoIs in the given batch. mask_roi_indices (array): A list of arrays returned by :func:`mask_head_loss_pre`. gt_segms (list of arrays): A list of arrays returned by :func:`mask_head_loss_pre`. gt_mask_labels (list of arrays): A list of arrays returned by :func:`mask_head_loss_pre`. batchsize (int): The size of batch. Returns: chainer.Variable: Mask loss. """ xp = cuda.get_array_module(segms.array) mask_roi_indices = xp.hstack(mask_roi_indices).astype(np.int32) gt_segms = xp.vstack(gt_segms) gt_mask_labels = xp.hstack(gt_mask_labels).astype(np.int32) mask_loss = F.sigmoid_cross_entropy( segms[np.arange(len(gt_mask_labels)), gt_mask_labels], gt_segms.astype(np.int32)) return mask_loss
Example #11
Source File: test_loss.py From chainer with MIT License | 5 votes |
def forward_chainer(self, inputs): x, = inputs t = self.t out = F.sigmoid_cross_entropy(x, t, normalize=False, reduce='no') return out,
Example #12
Source File: MnihCNN_single.py From ssai-cnn with MIT License | 5 votes |
def __call__(self, x, t): h = F.relu(self.conv1(x)) h = F.relu(self.conv2(h)) h = F.relu(self.conv3(h)) h = F.dropout(F.relu(self.fc4(h)), train=self.train) h = self.fc5(h) self.pred = F.reshape(h, (x.data.shape[0], 16, 16)) if t is not None: self.loss = F.sigmoid_cross_entropy(self.pred, t, normalize=False) return self.loss else: self.pred = F.sigmoid(self.pred) return self.pred
Example #13
Source File: test_sigmoid_cross_entropy.py From chainer with MIT License | 5 votes |
def check_backward_no_reduction( self, x_data, t_data, y_grad): # Skip too large case. That requires a long time. if self.shape[0] == 65536: return def f(x, t): return chainer.functions.sigmoid_cross_entropy(x, t, reduce='no') gradient_check.check_backward( f, (x_data, t_data), y_grad, **self.check_backward_options)
Example #14
Source File: test_sigmoid_cross_entropy.py From chainer with MIT License | 5 votes |
def check_backward(self, x_data, t_data): # Skip too large case. That requires a long time. if self.shape[0] == 65536: return gradient_check.check_backward( functions.sigmoid_cross_entropy, (x_data, t_data), None, **self.check_backward_options)
Example #15
Source File: test_sigmoid_cross_entropy.py From chainer with MIT License | 5 votes |
def check_forward(self, x_data, t_data, use_cudnn='always'): x_val = chainer.Variable(x_data) t_val = chainer.Variable(t_data) with chainer.using_config('use_cudnn', use_cudnn): loss = functions.sigmoid_cross_entropy(x_val, t_val, self.normalize) self.assertEqual(loss.data.shape, ()) self.assertEqual(loss.data.dtype, self.dtype) loss_value = float(cuda.to_cpu(loss.data)) # Compute expected value loss_expect = 0 non_ignore_count = 0 for i in six.moves.range(self.x.shape[0]): for j in six.moves.range(self.x.shape[1]): xd, td = self.x[i, j], self.t[i, j] if td == -1: continue loss_expect -= xd * (td - (xd >= 0)) \ - math.log(1 + math.exp(-numpy.abs(xd))) non_ignore_count += 1 if non_ignore_count == 0: loss_expect = 0 elif self.normalize: loss_expect /= non_ignore_count else: loss_expect /= self.t.shape[0] self.assertAlmostEqual(loss_expect, loss_value, places=self.places)
Example #16
Source File: models.py From EEND with MIT License | 5 votes |
def pit_loss(pred, label, label_delay=0): """ Permutation-invariant training (PIT) cross entropy loss function. Args: pred: (T,C)-shaped pre-activation values label: (T,C)-shaped labels in {0,1} label_delay: if label_delay == 5: pred: 0 1 2 3 4 | 5 6 ... 99 100 | label: x x x x x | 0 1 ... 94 95 | 96 97 98 99 100 calculated area: | <------------> | Returns: min_loss: (1,)-shape mean cross entropy label_perms[min_index]: permutated labels """ # label permutations along the speaker axis label_perms = [label[..., list(p)] for p in permutations(range(label.shape[-1]))] losses = F.stack( [F.sigmoid_cross_entropy( pred[label_delay:, ...], l[:len(l) - label_delay, ...]) for l in label_perms]) xp = cuda.get_array_module(losses) min_loss = F.min(losses) * (len(label) - label_delay) min_index = cuda.to_cpu(xp.argmin(losses.data)) return min_loss, label_perms[min_index]
Example #17
Source File: mask_head.py From chainer-compiler with MIT License | 5 votes |
def mask_head_loss_post(segms, mask_roi_indices, gt_segms, gt_mask_labels, batchsize): """Loss function for Mask Head (post). Args: segms (array): An array whose shape is :math:`(R, n\_class, M, M)`, where :math:`R` is the total number of RoIs in the given batch. mask_roi_indices (array): A list of arrays returned by :func:`mask_head_loss_pre`. gt_segms (list of arrays): A list of arrays returned by :func:`mask_head_loss_pre`. gt_mask_labels (list of arrays): A list of arrays returned by :func:`mask_head_loss_pre`. batchsize (int): The size of batch. Returns: chainer.Variable: Mask loss. """ xp = cuda.get_array_module(segms.array) mask_roi_indices = xp.hstack(mask_roi_indices).astype(np.int32) gt_segms = xp.vstack(gt_segms) gt_mask_labels = xp.hstack(gt_mask_labels).astype(np.int32) mask_loss = F.sigmoid_cross_entropy( segms[np.arange(len(gt_mask_labels)), gt_mask_labels], gt_segms.astype(np.int32)) return mask_loss