Python chainer.functions.absolute() Examples
The following are 16
code examples of chainer.functions.absolute().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.functions
, or try the search function
.
Example #1
Source File: block.py From Deep_VoiceChanger with MIT License | 6 votes |
def __call__(self, x): if self.dr: with chainer.using_config('train', True): x = F.dropout(x, self.dr) if self.gap: x = F.sum(x, axis=(2,3)) N = x.shape[0] #Below code copyed from https://github.com/pfnet-research/chainer-gan-lib/blob/master/minibatch_discrimination/net.py feature = F.reshape(F.leaky_relu(x), (N, -1)) m = F.reshape(self.md(feature), (N, self.B * self.C, 1)) m0 = F.broadcast_to(m, (N, self.B * self.C, N)) m1 = F.transpose(m0, (2, 1, 0)) d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N))) d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1 h = F.concat([feature, d]) h = self.l(h) return h
Example #2
Source File: net.py From chainer-gan-lib with MIT License | 6 votes |
def __call__(self, x): N = x.data.shape[0] h = F.leaky_relu(self.c0_0(x)) h = F.leaky_relu(self.bn0_1(self.c0_1(h))) h = F.leaky_relu(self.bn1_0(self.c1_0(h))) h = F.leaky_relu(self.bn1_1(self.c1_1(h))) h = F.leaky_relu(self.bn2_0(self.c2_0(h))) h = F.leaky_relu(self.bn2_1(self.c2_1(h))) feature = F.reshape(F.leaky_relu(self.c3_0(h)), (N, 8192)) m = F.reshape(self.md(feature), (N, self.B * self.C, 1)) m0 = F.broadcast_to(m, (N, self.B * self.C, N)) m1 = F.transpose(m0, (2, 1, 0)) d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N))) d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1 h = F.concat([feature, d]) return self.l4(h)
Example #3
Source File: utils.py From kiss with GNU General Public License v3.0 | 6 votes |
def calc_loss(self, grids, image_size, **kwargs): normalize = kwargs.get('normalize', True) corner_coordinates = self.get_corners(grids, image_size, scale_to_image_size=False) # determine whether a point is out of the image, image range is [-1, 1] # everything outside of this increases the loss! bbox = F.concat(corner_coordinates, axis=0) top_loss = bbox + 1.5 bottom_loss = bbox - 1.5 # do not penalize anything inside the image top_loss = F.absolute(F.minimum(top_loss, self.xp.zeros_like(top_loss.array))) top_loss = F.reshape(top_loss, (len(corner_coordinates), -1)) bottom_loss = F.maximum(bottom_loss, self.xp.zeros_like(bottom_loss.array)) bottom_loss = F.reshape(bottom_loss, (len(corner_coordinates), -1)) loss = F.sum(F.concat([top_loss, bottom_loss], axis=0), axis=0) if normalize: loss = F.sum(loss) return loss
Example #4
Source File: block_1d.py From Deep_VoiceChanger with MIT License | 5 votes |
def __call__(self, x): N = x.shape[0] #Below code copyed from https://github.com/pfnet-research/chainer-gan-lib/blob/master/minibatch_discrimination/net.py feature = F.reshape(x, (N, -1)) m = F.reshape(self.md(feature), (N, self.B * self.C, 1)) m0 = F.broadcast_to(m, (N, self.B * self.C, N)) m1 = F.transpose(m0, (2, 1, 0)) d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N))) d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1 h = F.concat([feature, d]) h = self.l(h) return h
Example #5
Source File: dqn.py From chainerrl with MIT License | 5 votes |
def _compute_loss(self, exp_batch, errors_out=None): """Compute the Q-learning loss for a batch of experiences Args: exp_batch (dict): A dict of batched arrays of transitions Returns: Computed loss from the minibatch of experiences """ y, t = self._compute_y_and_t(exp_batch) if errors_out is not None: del errors_out[:] delta = F.absolute(y - t) if delta.ndim == 2: delta = F.sum(delta, axis=1) delta = cuda.to_cpu(delta.array) for e in delta: errors_out.append(e) if 'weights' in exp_batch: return compute_weighted_value_loss( y, t, exp_batch['weights'], clip_delta=self.clip_delta, batch_accumulator=self.batch_accumulator) else: return compute_value_loss(y, t, clip_delta=self.clip_delta, batch_accumulator=self.batch_accumulator)
Example #6
Source File: MathMisc.py From chainer-compiler with MIT License | 5 votes |
def forward(self, x): y1 = F.absolute(x) return y1
Example #7
Source File: MathMisc.py From chainer-compiler with MIT License | 5 votes |
def forward(self, x): y1 = np.absolute(x) return y1
Example #8
Source File: light_head_rcnn_train_chain.py From chainercv with MIT License | 5 votes |
def _smooth_l1_loss_base(x, t, in_weight, sigma): sigma2 = sigma ** 2 diff = in_weight * (x - t) abs_diff = F.absolute(diff) flag = (abs_diff.array < (1. / sigma2)).astype(np.float32) y = (flag * (sigma2 / 2.) * F.square(diff) + (1 - flag) * (abs_diff - 0.5 / sigma2)) return F.sum(y, axis=1)
Example #9
Source File: faster_rcnn_train_chain.py From chainercv with MIT License | 5 votes |
def _smooth_l1_loss(x, t, in_weight, sigma): sigma2 = sigma ** 2 diff = in_weight * (x - t) abs_diff = F.absolute(diff) flag = (abs_diff.array < (1. / sigma2)).astype(np.float32) y = (flag * (sigma2 / 2.) * F.square(diff) + (1 - flag) * (abs_diff - 0.5 / sigma2)) return F.sum(y)
Example #10
Source File: mask_rcnn_train_chain.py From chainer-mask-rcnn with MIT License | 5 votes |
def _smooth_l1_loss(x, t, in_weight, sigma): sigma2 = sigma ** 2 diff = in_weight * (x - t) abs_diff = F.absolute(diff) flag = (abs_diff.data < (1. / sigma2)).astype(np.float32) y = (flag * (sigma2 / 2.) * F.square(diff) + (1 - flag) * (abs_diff - 0.5 / sigma2)) return F.sum(y)
Example #11
Source File: megnet_softplus.py From chainer-chemistry with MIT License | 5 votes |
def megnet_softplus(x): """Modified softplus function used by MEGNet The original implemantation is below. https://github.com/materialsvirtuallab/megnet/blob/f91773f0f3fa8402b494638af9ef2ed2807fcba7/megnet/activations.py#L6 Args: x (Variable): Input variable Returns: output (Variable): Output variable whose shape is same with `x` """ return functions.relu(x) + \ functions.log(0.5 * functions.exp(-functions.absolute(x)) + 0.5)
Example #12
Source File: coupling.py From graph-nvp with MIT License | 5 votes |
def __call__(self, adj): masked_adj = adj[:, :, self.mask] log_s, t = self._s_t_functions(masked_adj) t = F.broadcast_to(t, adj.shape) s = F.sigmoid(log_s + 2) s = F.broadcast_to(s, adj.shape) adj = adj * self.mask + adj * (s * ~self.mask) + t * (~self.mask) log_det_jacobian = F.sum(F.log(F.absolute(s)), axis=(1, 2, 3)) return adj, log_det_jacobian
Example #13
Source File: coupling.py From graph-nvp with MIT License | 5 votes |
def __call__(self, x, adj): masked_x = self.mask * x s, t = self._s_t_functions(masked_x, adj) x = masked_x + x * (s * ~self.mask) + t * ~self.mask log_det_jacobian = F.sum(F.log(F.absolute(s)), axis=(1, 2)) return x, log_det_jacobian
Example #14
Source File: dqfd.py From baselines with MIT License | 5 votes |
def _compute_ddqn_losses(self, exp_batch, errors_out=None): """Compute the Q-learning losses for a batch of experiences Args: exp_batch (dict): A dict of batched arrays of transitions Returns: Computed loss from the minibatch of experiences """ y, t = self._compute_y_and_ts(exp_batch) n_branches = exp_batch['action'].shape[1] # Calculate the errors_out for priorities with the 1-step err del errors_out[:] delta = F.absolute(y - t) if delta.ndim == 2: delta = F.sum(delta, axis=1) delta = cuda.to_cpu(delta.array) for e in delta: errors_out.append(e) is_1_step = self.xp.abs(1. - exp_batch["is_n_step"]).reshape(-1, 1) is_1_step = self.xp.tile(is_1_step, (1, n_branches)).reshape(-1) is_n_step = exp_batch['is_n_step'].reshape(-1, 1) is_n_step = self.xp.tile(is_n_step, (1, n_branches)).reshape(-1) weights = exp_batch['weights'].reshape(-1, 1) weights = F.tile(weights, (1, n_branches)).reshape(-1) loss_1step = compute_weighted_value_loss( y, t, weights, mask=is_1_step, clip_delta=self.clip_delta, batch_accumulator=self.batch_accumulator) loss_nstep = compute_weighted_value_loss( y, t, weights, mask=is_n_step, clip_delta=self.clip_delta, batch_accumulator=self.batch_accumulator) return loss_nstep, loss_1step
Example #15
Source File: fcis_train_chain.py From chainer-fcis with MIT License | 5 votes |
def _smooth_l1_loss(x, t, in_weight, sigma): sigma2 = sigma ** 2 diff = in_weight * (x - t) abs_diff = F.absolute(diff) flag = (abs_diff.array < (1. / sigma2)).astype(np.float32) y = (flag * (sigma2 / 2.) * F.square(diff) + (1 - flag) * (abs_diff - 0.5 / sigma2)) return F.sum(y)
Example #16
Source File: train.py From style_transfer_3d with MIT License | 5 votes |
def compute_tv_loss(images, masks): # s1 = cf.absolute(images[:, :, 1:, :-1] - images[:, :, :-1, :-1]) # s2 = cf.absolute(images[:, :, :-1, 1:] - images[:, :, :-1, :-1]) s1 = cf.square(images[:, :, 1:, :-1] - images[:, :, :-1, :-1]) s2 = cf.square(images[:, :, :-1, 1:] - images[:, :, :-1, :-1]) masks = cf.broadcast_to(masks[:, None, :-1, :-1], s1.shape) masks = masks.data == 1 return cf.sum(masks * (s1 + s2))