Python chainer.functions.sqrt() Examples

The following are 30 code examples of chainer.functions.sqrt(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module chainer.functions , or try the search function .
Example #1
Source File: net.py    From chainer-gan-lib with MIT License 5 votes vote down vote up
def __init__(self, in_ch, out_ch):
        w = chainer.initializers.Normal(1.0) # equalized learning rate
        self.inv_c = np.sqrt(2.0/in_ch)
        super(EqualizedLinear, self).__init__()
        with self.init_scope():
            self.c = L.Linear(in_ch, out_ch, initialW=w) 
Example #2
Source File: lifted_struct_loss.py    From deep_metric_learning with MIT License 5 votes vote down vote up
def lifted_struct_loss(f_a, f_p, alpha=1.0):
    """Lifted struct loss function.

    Args:
        f_a (~chainer.Variable): Feature vectors as anchor examples.
            All examples must be different classes each other.
        f_p (~chainer.Variable): Positive examples corresponding to f_a.
            Each example must be the same class for each example in f_a.
        alpha (~float): The margin parameter.

    Returns:
        ~chainer.Variable: Loss value.

    See: `Deep Metric Learning via Lifted Structured Feature Embedding \
        <http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/\
        Song_Deep_Metric_Learning_CVPR_2016_paper.pdf>`_

    """
    assert f_a.shape == f_p.shape, 'f_a and f_p must have same shape.'
    n = 2 * f_a.shape[0]  # use shape[0] due to len(Variable) returns its size
    f = F.vstack((f_a, f_p))
    D_sq = squared_distance_matrix(f)

    pairs_p = np.arange(n).reshape(2, -1)  # indexes of positive pairs
    row = []
    col = []
    for i, j in pairs_p.T:
        row.append([i] * (n - 2) + [j] * (n - 2))
        col.append(np.tile(np.delete(np.arange(n), (i, j)), 2))
    row = np.ravel(row)
    col = np.ravel(col)
    pairs_n = np.vstack((row, col))

    distances_p = F.sqrt(D_sq[pairs_p[0], pairs_p[1]])
    distances_n = F.sqrt(D_sq[pairs_n[0], pairs_n[1]])
    distances_n = distances_n.reshape((n // 2, -1))
    loss_ij = F.logsumexp(alpha - distances_n, axis=1) + distances_p
    return F.sum(F.relu(loss_ij) ** 2) / n 
Example #3
Source File: updater.py    From 3dpose_gan with MIT License 5 votes vote down vote up
def calculate_rotation(xy_real, z_pred):
        xy_split = F.split_axis(xy_real, xy_real.data.shape[1], axis=1)
        z_split = F.split_axis(z_pred, z_pred.data.shape[1], axis=1)
        # Vector v0 (neck -> nose) on zx-plain. v0=(a0, b0).
        a0 = z_split[9] - z_split[8]
        b0 = xy_split[9 * 2] - xy_split[8 * 2]
        n0 = F.sqrt(a0 * a0 + b0 * b0)
        # Vector v1 (right shoulder -> left shoulder) on zx-plain. v1=(a1, b1).
        a1 = z_split[14] - z_split[11]
        b1 = xy_split[14 * 2] - xy_split[11 * 2]
        n1 = F.sqrt(a1 * a1 + b1 * b1)
        # Return sine value of the angle between v0 and v1.
        return (a0 * b1 - a1 * b0) / (n0 * n1) 
Example #4
Source File: main.py    From deep_dream_3d with MIT License 5 votes vote down vote up
def get_var_line_length_loss(vertices, faces):
    vertices = vertices[faces]
    num_faces = vertices.shape[0]
    v01 = vertices[:, 1] - vertices[:, 0]
    v12 = vertices[:, 2] - vertices[:, 1]
    v20 = vertices[:, 0] - vertices[:, 2]
    n01_square = cf.sum(cf.square(v01), axis=1)
    n12_square = cf.sum(cf.square(v12), axis=1)
    n20_square = cf.sum(cf.square(v20), axis=1)
    n01 = cf.sqrt(n01_square)
    n12 = cf.sqrt(n12_square)
    n20 = cf.sqrt(n20_square)
    mean_of_square = (cf.sum(n01_square) + cf.sum(n12_square) + cf.sum(n20_square)) / (3. * num_faces)
    square_of_mean = cf.square((cf.sum(n01) + cf.sum(n12) + cf.sum(n20)) / (3. * num_faces))
    return (mean_of_square - square_of_mean) * num_faces 
Example #5
Source File: train_own_dataset.py    From chainer-chemistry with MIT License 5 votes vote down vote up
def rmse(x0, x1):
    return F.sqrt(F.mean_squared_error(x0, x1)) 
Example #6
Source File: train_qm9.py    From chainer-chemistry with MIT License 5 votes vote down vote up
def rmse(x0, x1):
    return F.sqrt(F.mean_squared_error(x0, x1)) 
Example #7
Source File: molnet_config.py    From chainer-chemistry with MIT License 5 votes vote down vote up
def rmse(x, t):
    return F.sqrt(mse(x, t)) 
Example #8
Source File: utils.py    From kiss with GNU General Public License v3.0 5 votes vote down vote up
def get_bbox_side_lengths(self, grids, image_size):
        x0, x1, x2, _, y0, y1, y2, _ = self.get_corners(grids, image_size)

        width = F.sqrt(
            F.square(x1 - x0) + F.square(y1 - y0)
        )

        height = F.sqrt(
            F.square(x2 - x0) + F.square(y2 - y0)
        )
        return width, height 
Example #9
Source File: nets.py    From dynamic_routing_between_capsules with MIT License 5 votes vote down vote up
def get_norm(vs):
    return F.sqrt(F.sum(vs ** 2, axis=1)) 
Example #10
Source File: nets.py    From dynamic_routing_between_capsules with MIT License 5 votes vote down vote up
def squash(ss):
    ss_norm2 = F.sum(ss ** 2, axis=1, keepdims=True)
    """
    # ss_norm2 = F.broadcast_to(ss_norm2, ss.shape)
    # vs = ss_norm2 / (1. + ss_norm2) * ss / F.sqrt(ss_norm2): naive
    """
    norm_div_1pnorm2 = F.sqrt(ss_norm2) / (1. + ss_norm2)
    norm_div_1pnorm2 = F.broadcast_to(norm_div_1pnorm2, ss.shape)
    vs = norm_div_1pnorm2 * ss  # :efficient
    # (batchsize, 16, 10)
    return vs 
Example #11
Source File: net.py    From chainer-gan-lib with MIT License 5 votes vote down vote up
def minibatch_std(x):
    m = F.mean(x, axis=0, keepdims=True)
    v = F.mean((x - F.broadcast_to(m, x.shape))*(x - F.broadcast_to(m, x.shape)), axis=0, keepdims=True)
    std = F.mean(F.sqrt(v + 1e-8), keepdims=True)
    std = F.broadcast_to(std, (x.shape[0], 1, x.shape[2], x.shape[3]))
    return F.concat([x, std], axis=1) 
Example #12
Source File: net.py    From chainer-gan-lib with MIT License 5 votes vote down vote up
def __init__(self, in_ch, out_ch, ksize, stride, pad):
        w = chainer.initializers.Normal(1.0) # equalized learning rate
        self.inv_c = np.sqrt(2.0/(in_ch))
        super(EqualizedDeconv2d, self).__init__()
        with self.init_scope():
            self.c = L.Deconvolution2D(in_ch, out_ch, ksize, stride, pad, initialW=w) 
Example #13
Source File: pggan.py    From chainer-stylegan with MIT License 5 votes vote down vote up
def feature_vector_normalization(x, eps=1e-8):
    # x: (B, C, H, W)
    alpha = 1.0 / F.sqrt(F.mean(x * x, axis=1, keepdims=True) + eps)
    return F.broadcast_to(alpha, x.data.shape) * x 
Example #14
Source File: net.py    From chainer-gan-lib with MIT License 5 votes vote down vote up
def __init__(self, in_ch, out_ch, ksize, stride, pad):
        w = chainer.initializers.Normal(1.0) # equalized learning rate
        self.inv_c = np.sqrt(2.0/(in_ch*ksize**2))
        super(EqualizedConv2d, self).__init__()
        with self.init_scope():
            self.c = L.Convolution2D(in_ch, out_ch, ksize, stride, pad, initialW=w) 
Example #15
Source File: net.py    From chainer-gan-lib with MIT License 5 votes vote down vote up
def feature_vector_normalization(x, eps=1e-8):
    # x: (B, C, H, W)
    alpha = 1.0 / F.sqrt(F.mean(x*x, axis=1, keepdims=True) + eps)
    return F.broadcast_to(alpha, x.data.shape) * x 
Example #16
Source File: pggan.py    From chainer-stylegan with MIT License 5 votes vote down vote up
def __init__(self, in_ch, out_ch, ksize, stride, pad, nobias=False, gain=np.sqrt(2), lrmul=1):
        w = chainer.initializers.Normal(1.0/lrmul)  # equalized learning rate
        self.inv_c = gain * np.sqrt(1.0 / (in_ch * ksize ** 2))
        self.inv_c = self.inv_c * lrmul
        super(EqualizedConv2d, self).__init__()
        with self.init_scope():
            self.c = L.Convolution2D(in_ch, out_ch, ksize, stride, pad, initialW=w, nobias=nobias) 
Example #17
Source File: pggan.py    From chainer-stylegan with MIT License 5 votes vote down vote up
def minibatch_std(x):
    m = F.mean(x, axis=0, keepdims=True)
    v = F.mean((x - F.broadcast_to(m, x.shape)) * (x - F.broadcast_to(m, x.shape)), axis=0, keepdims=True)
    std = F.mean(F.sqrt(v + 1e-8), keepdims=True)
    std = F.broadcast_to(std, (x.shape[0], 1, x.shape[2], x.shape[3]))
    return F.concat([x, std], axis=1) 
Example #18
Source File: MathMisc.py    From chainer-compiler with MIT License 5 votes vote down vote up
def forward(self, x):
        y1 = F.sqrt(x)
        y2 = np.sqrt(x)
        return y1, y2 
Example #19
Source File: MathMisc.py    From chainer-compiler with MIT License 5 votes vote down vote up
def main():
    np.random.seed(314)

    x = np.random.rand(6, 4).astype(np.float32)
    s_int = np.array(-10)
    s_float = np.array(10.0)

    testtools.generate_testcase(Sin(), [x], subname='sin')
    testtools.generate_testcase(Sinh(), [x], subname='sinh')
    testtools.generate_testcase(Sign(), [x], subname='sign')
    testtools.generate_testcase(Cos(), [x], subname='cos')
    testtools.generate_testcase(Cosh(), [x], subname='cosh')
    testtools.generate_testcase(Tan(), [x], subname='tan')
    testtools.generate_testcase(Tanh(), [x], subname='tanh')
    testtools.generate_testcase(ArcSin(), [x], subname='arcsin')
    testtools.generate_testcase(ArcCos(), [x], subname='arccos')
    testtools.generate_testcase(ArcTan(), [x], subname='arctan')
    testtools.generate_testcase(Exp(), [x], subname='exp')
    testtools.generate_testcase(Log(), [x], subname='log')
    testtools.generate_testcase(Clip(), [x], subname='clip')
    testtools.generate_testcase(ClipNp(), [x], subname='clip_np')
    testtools.generate_testcase(Abs(), [x], subname='abs')
    testtools.generate_testcase(AbsNp(), [x], subname='abs_np')
    testtools.generate_testcase(Sqrt(), [x], subname='sqrt')
    testtools.generate_testcase(Round(), [x], subname='round')
    testtools.generate_testcase(AbsBuiltin(), [x], subname='abs_builtin')
    testtools.generate_testcase(AbsBuiltin(), [s_float], subname='abs_builtin_scalar_float')
    testtools.generate_testcase(AbsBuiltin(), [s_int], subname='abs_builtin_scalar_int') 
Example #20
Source File: model_py.py    From models with MIT License 5 votes vote down vote up
def _attn(self, q, k, v):
        w = F.batch_matmul(q.reshape(-1, *q.shape[-2:]),
                           k.reshape(-1, *k.shape[-2:]))
        if self.scale:
            w = w / math.sqrt(v.shape[-1])
        # TF implem method: mask_attn_weights
        w = w * self.b.array[0] + -1e9 * (1 - self.b.array[0])
        w = F.softmax(w, axis=2)
        w = self.attn_dropout(w)
        return F.batch_matmul(w, v.reshape(-1, *v.shape[-2:]))\
                .reshape(v.shape[0], v.shape[1], v.shape[2], -1) 
Example #21
Source File: test_sqrt.py    From chainer with MIT License 5 votes vote down vote up
def rsqrt(x, dtype):
    return numpy.reciprocal(numpy.sqrt(x, dtype=dtype), dtype=dtype) 
Example #22
Source File: loss_metrics.py    From see with GNU General Public License v3.0 5 votes vote down vote up
def get_bbox_side_lengths(self, grids):
        x0, x1, x2, y0, y1, y2 = self.get_corners(grids)

        width = F.sqrt(
            F.square(x1 - x0) + F.square(y1 - y0)
        )

        height = F.sqrt(
            F.square(x2 - x0) + F.square(y2 - y0)
        )
        return width, height 
Example #23
Source File: pggan.py    From chainer-stylegan with MIT License 5 votes vote down vote up
def __init__(self, in_ch, out_ch, initial_bias=None, nobias=False, gain=np.sqrt(2), lrmul=1):
        w = chainer.initializers.Normal(1.0/lrmul) # equalized learning rate
        self.inv_c = gain * np.sqrt(1.0 / in_ch)
        self.inv_c = self.inv_c * lrmul
        super(EqualizedLinear, self).__init__()
        with self.init_scope():
            self.c = L.Linear(in_ch, out_ch, initialW=w, initial_bias=initial_bias, nobias=nobias) 
Example #24
Source File: pggan.py    From chainer-stylegan with MIT License 5 votes vote down vote up
def __init__(self, in_ch, out_ch, ksize, stride, pad, nobias=False, gain=np.sqrt(2), lrmul=1):
        w = chainer.initializers.Normal(1.0/lrmul)  # equalized learning rate
        self.inv_c = gain * np.sqrt(1.0 / (in_ch))
        self.inv_c = self.inv_c * lrmul
        super(EqualizedDeconv2d, self).__init__()
        with self.init_scope():
            self.c = L.Deconvolution2D(in_ch, out_ch, ksize, stride, pad, initialW=w, nobias=nobias) 
Example #25
Source File: net.py    From models with MIT License 5 votes vote down vote up
def get_normalized_vector(d, xp=None, shape=None):
    if shape is None:
        shape = tuple(range(1, len(d.shape)))
    d_norm = d
    if xp is not None:
        d_norm = d / (1e-12 + xp.max(xp.abs(d), shape, keepdims=True))
        d_norm = d_norm / xp.sqrt(1e-6 + xp.sum(d_norm ** 2, shape, keepdims=True))
    else:
        d_term = 1e-12 + F.max(F.absolute(d), shape, keepdims=True)
        d_norm = d / F.broadcast_to(d_term, d.shape)
        d_term = F.sqrt(1e-6 + F.sum(d ** 2, shape, keepdims=True))
        d_norm = d / F.broadcast_to(d_term, d.shape)
    return d_norm 
Example #26
Source File: lm_nets.py    From models with MIT License 5 votes vote down vote up
def get_normalized_vector(d, xp=None):
    shape = tuple(range(1, len(d.shape)))
    if xp is not None:
        d /= (1e-12 + xp.max(xp.abs(d), shape, keepdims=True))
        d /= xp.sqrt(1e-6 + xp.sum(d ** 2, shape, keepdims=True))
    else:
        d_term = 1e-12 + F.max(F.absolute(d), shape, keepdims=True)
        d /= F.broadcast_to(d_term, d.shape)
        d_term = F.sqrt(1e-6 + F.sum(d ** 2, shape, keepdims=True))
        d /= F.broadcast_to(d_term, d.shape)
    return d 
Example #27
Source File: lm_nets.py    From models with MIT License 5 votes vote down vote up
def norm_by_freq(self, freq):
        word_embs = self.W
        mean = F.sum(freq * word_embs, axis=0, keepdims=True)
        mean = F.broadcast_to(mean, word_embs.shape)
        var = F.sum(freq * ((word_embs - mean) ** 2), axis=0, keepdims=True)
        var = F.broadcast_to(var, word_embs.shape)

        stddev = F.sqrt(1e-6 + var)
        word_embs_norm = (word_embs - mean) / stddev
        return word_embs_norm 
Example #28
Source File: mdn.py    From models with MIT License 5 votes vote down vote up
def negative_log_likelihood(self, x, y):
        pi, mu, log_var = self.get_gaussian_params(x)

        # Likelihood over different Gaussians
        y = F.tile(y[:, None, :], (1, self.gaussian_mixtures, 1))
        pi = F.tile(F.expand_dims(pi, 2), (1, 1, self.input_dim))
        
        squared_sigma = F.exp(log_var)
        sigma = F.sqrt(squared_sigma)
        prob = F.sum(pi * distributions.Normal(mu, sigma).prob(y), axis=1)
        
        negative_log_likelihood = -F.log(prob)
        return F.mean(negative_log_likelihood) 
Example #29
Source File: model_py.py    From models with MIT License 5 votes vote down vote up
def gelu(x):
    return 0.5 * x * (1 + F.tanh(math.sqrt(2 / math.pi)
                                 * (x + 0.044715 * (x ** 3)))) 
Example #30
Source File: updater.py    From chainer-PGGAN with MIT License 4 votes vote down vote up
def update_core(self):
        opt_g = self.get_optimizer('gen')
        opt_d = self.get_optimizer('dis')

        xp = self.gen.xp

        # update discriminator
        x = self.get_iterator('main').next()
        x = xp.array(x)
        m = len(x)

        z = self.gen.z(m)
        x_tilde = self.gen(z, self.alpha).data
        
        epsilon = xp.random.rand(m, 1, 1, 1).astype('f')
        x_hat = Variable(epsilon * x + (1 - epsilon) * x_tilde)

        dis_x = self.dis(x, self.alpha)
        
        loss_d = self.dis(x_tilde, self.alpha) - dis_x

        g_d, = chainer.grad([self.dis(x_hat, self.alpha)], [x_hat], enable_double_backprop=True)
        g_d_norm = F.sqrt(F.batch_l2_norm_squared(g_d) + 1e-6)
        g_d_norm_delta = g_d_norm - 1
        loss_l = self.lam * g_d_norm_delta * g_d_norm_delta
        
        loss_dr = self.epsilon_drift * dis_x * dis_x

        dis_loss = F.mean(loss_d + loss_l + loss_dr)

        self.dis.cleargrads()
        dis_loss.backward()
        opt_d.update()
        
        # update generator
        z = self.gen.z(m)
        x = self.gen(z, self.alpha)
        gen_loss = F.average(-self.dis(x, self.alpha))

        self.gen.cleargrads()
        gen_loss.backward()
        opt_g.update()

        reporter.report({'loss_d': F.mean(loss_d), 'loss_l': F.mean(loss_l), 'loss_dr': F.mean(loss_dr), 'dis_loss': dis_loss, 'gen_loss': gen_loss, 'alpha': self.alpha})

        self.alpha = self.alpha + self.delta