Python chainer.functions.exp() Examples

The following are 29 code examples of chainer.functions.exp(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module chainer.functions , or try the search function .
Example #1
Source File: yolov2.py    From YOLOv2 with MIT License 6 votes vote down vote up
def predict(self, input_x):
        output = self.predictor(input_x)
        batch_size, input_channel, input_h, input_w = input_x.shape
        batch_size, _, grid_h, grid_w = output.shape
        x, y, w, h, conf, prob = F.split_axis(F.reshape(output, (batch_size, self.predictor.n_boxes, self.predictor.n_classes+5, grid_h, grid_w)), (1, 2, 3, 4, 5), axis=2)
        x = F.sigmoid(x) # xのactivation
        y = F.sigmoid(y) # yのactivation
        conf = F.sigmoid(conf) # confのactivation
        prob = F.transpose(prob, (0, 2, 1, 3, 4))
        prob = F.softmax(prob) # probablitiyのacitivation
        prob = F.transpose(prob, (0, 2, 1, 3, 4))

        # x, y, w, hを絶対座標へ変換
        x_shift = Variable(np.broadcast_to(np.arange(grid_w, dtype=np.float32), x.shape))
        y_shift = Variable(np.broadcast_to(np.arange(grid_h, dtype=np.float32).reshape(grid_h, 1), y.shape))
        w_anchor = Variable(np.broadcast_to(np.reshape(np.array(self.anchors, dtype=np.float32)[:, 0], (self.predictor.n_boxes, 1, 1, 1)), w.shape))
        h_anchor = Variable(np.broadcast_to(np.reshape(np.array(self.anchors, dtype=np.float32)[:, 1], (self.predictor.n_boxes, 1, 1, 1)), h.shape))
        #x_shift.to_gpu(), y_shift.to_gpu(), w_anchor.to_gpu(), h_anchor.to_gpu()
        box_x = (x + x_shift) / grid_w
        box_y = (y + y_shift) / grid_h
        box_w = F.exp(w) * w_anchor / grid_w
        box_h = F.exp(h) * h_anchor / grid_h

        return box_x, box_y, box_w, box_h, conf, prob 
Example #2
Source File: state_q_functions.py    From chainerrl with MIT License 6 votes vote down vote up
def __call__(self, state):
        h = state
        for layer in self.hidden_layers:
            h = F.relu(layer(h))
        v = self.v(h)
        mu = self.mu(h)

        if self.scale_mu:
            mu = scale_by_tanh(mu, high=self.action_space.high,
                               low=self.action_space.low)

        mat_diag = F.exp(self.mat_diag(h))
        if hasattr(self, 'mat_non_diag'):
            mat_non_diag = self.mat_non_diag(h)
            tril = lower_triangular_matrix(mat_diag, mat_non_diag)
            mat = F.matmul(tril, tril, transb=True)
        else:
            mat = F.expand_dims(mat_diag ** 2, axis=2)
        return QuadraticActionValue(
            mu, mat, v, min_action=self.action_space.low,
            max_action=self.action_space.high) 
Example #3
Source File: state_q_functions.py    From chainerrl with MIT License 6 votes vote down vote up
def __call__(self, state):
        h = self.hidden_layers(state)
        v = self.v(h)
        mu = self.mu(h)

        if self.scale_mu:
            mu = scale_by_tanh(mu, high=self.action_space.high,
                               low=self.action_space.low)

        mat_diag = F.exp(self.mat_diag(h))
        if hasattr(self, 'mat_non_diag'):
            mat_non_diag = self.mat_non_diag(h)
            tril = lower_triangular_matrix(mat_diag, mat_non_diag)
            mat = F.matmul(tril, tril, transb=True)
        else:
            mat = F.expand_dims(mat_diag ** 2, axis=2)
        return QuadraticActionValue(
            mu, mat, v, min_action=self.action_space.low,
            max_action=self.action_space.high) 
Example #4
Source File: coupling.py    From graph-nvp with MIT License 6 votes vote down vote up
def _s_t_functions(self, x, adj):
        y = self.rgcn(x, adj)
        batch_size = x.shape[0]
        if self.apply_batch_norm:
            y = self.batch_norm(y)
        y = self.lin1(y)
        y = F.tanh(y)
        y = self.lin2(y) * F.exp(self.scale_factor*2)
        s = y[:, :self.out_size]
        t = y[:, self.out_size:]
        s = F.sigmoid(s + 2)

        t = F.reshape(t, [batch_size, 1, self.out_size])
        t = F.broadcast_to(t, [batch_size, int(self.num_nodes / self.num_masked_cols), self.out_size])
        s = F.reshape(s, [batch_size, 1, self.out_size])
        s = F.broadcast_to(s, [batch_size, int(self.num_nodes / self.num_masked_cols), self.out_size])
        return s, t 
Example #5
Source File: agents.py    From EPG with MIT License 5 votes vote down vote up
def _pi_logp(self, obs, acts):
        mean, logstd = self._pi_f(obs)
        return (
                - 0.5 * np.log(2.0 * np.pi) * acts.shape[1]
                - 0.5 * F.sum(F.square((acts - mean) / (F.exp(logstd)) + 1e-8), axis=1)
                - F.sum(logstd, axis=1)
        ) 
Example #6
Source File: coupling.py    From graph-nvp with MIT License 5 votes vote down vote up
def _s_t_functions(self, x, adj):
        y = self.rgcn(x, adj)
        if self.apply_batch_norm:
            y = self.batch_norm(y)
        y = self.lin1(y)
        y = F.tanh(y)
        y = self.lin2(y) * F.exp(self.scale_factor*2)
        return y 
Example #7
Source File: coupling.py    From graph-nvp with MIT License 5 votes vote down vote up
def _s_t_functions(self, adj):
        adj = F.reshape(adj, (adj.shape[0], -1))
        x = adj
        if self.apply_batch_norm:
            x = self.batch_norm(x)
        y = self.mlp(x)
        y = F.tanh(y)
        y = self.lin(y) * F.exp(self.scale_factor * 2)

        y = F.reshape(y, [y.shape[0], self.num_relations, self.num_nodes, 1])
        return y 
Example #8
Source File: coupling.py    From graph-nvp with MIT License 5 votes vote down vote up
def _s_t_functions(self, adj):
        x = F.reshape(adj, (adj.shape[0], -1))
        if self.apply_batch_norm:
            x = self.batch_norm(x)
        y = self.mlp(x)
        y = F.tanh(y)
        y = self.lin(y) * F.exp(self.scale_factor * 2)
        s = y[:, :self.out_size]
        t = y[:, self.out_size:]
        s = F.reshape(s, [y.shape[0], self.num_relations, self.num_nodes, 1])
        t = F.reshape(t, [y.shape[0], self.num_relations, self.num_nodes, 1])
        return s, t 
Example #9
Source File: megnet_softplus.py    From chainer-chemistry with MIT License 5 votes vote down vote up
def megnet_softplus(x):
    """Modified softplus function used by MEGNet

    The original implemantation is below.
    https://github.com/materialsvirtuallab/megnet/blob/f91773f0f3fa8402b494638af9ef2ed2807fcba7/megnet/activations.py#L6

    Args:
        x (Variable): Input variable
    Returns:
        output (Variable): Output variable whose shape is same with `x`
    """
    return functions.relu(x) + \
        functions.log(0.5 * functions.exp(-functions.absolute(x)) + 0.5) 
Example #10
Source File: schnet_update.py    From chainer-chemistry with MIT License 5 votes vote down vote up
def __call__(self, h, dist):
        """main calculation

        Args:
            h (numpy.ndarray): axis 0 represents minibatch index,
                axis 1 represents atom_index and axis2 represents
                feature dimension.
            dist (numpy.ndarray): axis 0 represents minibatch index,
                axis 1 and 2 represent distance between atoms.
        """
        mb, atom, ch = h.shape
        if ch != self.hidden_dim:
            raise ValueError('h.shape[2] {} and hidden_dim {} must be same!'
                             .format(ch, self.hidden_dim))
        embedlist = self.xp.arange(
            self.num_rbf).astype('f') * self.radius_resolution
        dist = functions.reshape(dist, (mb, atom, atom, 1))
        dist = functions.broadcast_to(dist, (mb, atom, atom, self.num_rbf))
        dist = functions.exp(- self.gamma * (dist - embedlist) ** 2)
        dist = functions.reshape(dist, (-1, self.num_rbf))
        dist = self.dense1(dist)
        dist = shifted_softplus(dist)
        dist = self.dense2(dist)
        dist = shifted_softplus(dist)
        dist = functions.reshape(dist, (mb, atom, atom, self.hidden_dim))
        h = functions.reshape(h, (mb, atom, 1, self.hidden_dim))
        h = functions.broadcast_to(h, (mb, atom, atom, self.hidden_dim))
        h = functions.sum(h * dist, axis=1)
        return h 
Example #11
Source File: convolution_rbm.py    From SeRanet with MIT License 5 votes vote down vote up
def sigmoid(x):
        xp = cuda.get_array_module(x.data)
        return 1. / (1 + xp.exp(-x)) 
Example #12
Source File: convolution_rbm.py    From SeRanet with MIT License 5 votes vote down vote up
def free_energy(self, v):
        """
        :param Variable (batch_size, in_channels, image_height, image_width) - input data (training data)
        :return: scalar
        """
        batch_size = v.data.shape[0]
        in_channels = self.in_channels
        real = self.real
        if real == 0:
            '''
            visible layer is 0, 1 (bit)
            vbias_term = 1 * SUM(a(i) * v(i))
            '''
            v_sum = F.sum(v, axis=(2, 3))  # sum over image_height & image_width
            # Originally, it should return sum for each batch.
            # but it returns scalar, which is sum over batches, since sum is used at the end anyway.
            vbias_term = F.sum(F.matmul(v_sum, self.conv.a))
            wx_b = self.conv(v)

        else:
            '''
            visible layer takes real value
            vbias_term = 0.5 * SUM((v(i)-a(i)) * (v(i) - a(i)))
            '''
            #TODO: check
            #m = Variable(xp.ones((batch_size, 1), dtype=xp.float32))
            n = F.reshape(self.conv.a, (1, in_channels, 1, 1))
            xp = cuda.get_array_module(n.data)
            std_ch = xp.reshape(self.std, (1, in_channels, 1, 1))

            #v_ = v - F.matmul(m, n)
            v_ = (v - F.broadcast_to(n, v.data.shape)) / std_ch
            vbias_term = F.sum(0.5 * v_ * v_)
            wx_b = self.conv(v / std_ch)


        hidden_term = F.sum(F.log(1 + F.exp(wx_b)))
        # print('vbias = ', vbias_term.data, ', hidden = ', hidden_term.data, 'F.exp(wx_b) = ', F.exp(wx_b).data)
        return - vbias_term - hidden_term 
Example #13
Source File: mdn.py    From models with MIT License 5 votes vote down vote up
def negative_log_likelihood(self, x, y):
        pi, mu, log_var = self.get_gaussian_params(x)

        # Likelihood over different Gaussians
        y = F.tile(y[:, None, :], (1, self.gaussian_mixtures, 1))
        pi = F.tile(F.expand_dims(pi, 2), (1, 1, self.input_dim))
        
        squared_sigma = F.exp(log_var)
        sigma = F.sqrt(squared_sigma)
        prob = F.sum(pi * distributions.Normal(mu, sigma).prob(y), axis=1)
        
        negative_log_likelihood = -F.log(prob)
        return F.mean(negative_log_likelihood) 
Example #14
Source File: test_exponential.py    From chainer with MIT License 5 votes vote down vote up
def forward_expected(self, inputs):
        x, = inputs
        expected = numpy.exp(x)
        expected = utils.force_array(expected)
        return expected, 
Example #15
Source File: test_exponential.py    From chainer with MIT License 5 votes vote down vote up
def forward(self, inputs, device):
        x, = inputs
        return functions.exp(x), 
Example #16
Source File: test_cupy_memory_profile.py    From chainer with MIT License 5 votes vote down vote up
def setUp(self):
        cuda.memory_pool.free_all_blocks()
        self.h = function_hooks.CupyMemoryProfileHook()
        f1 = functions.exp
        f2 = functions.relu
        self.x = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
        x = cuda.to_gpu(self.x)
        with self.h:
            f1(chainer.Variable(x))
            f1(chainer.Variable(x))
            f2(chainer.Variable(x))
            f2(chainer.Variable(x)) 
Example #17
Source File: agents.py    From EPG with MIT License 5 votes vote down vote up
def _logp(self, params, acts):
        mean, logstd = params
        return (
                - 0.5 * np.log(2.0 * np.pi) * acts.shape[1]
                - 0.5 * F.sum(F.square((acts - mean) / (F.exp(logstd)) + 1e-8), axis=1)
                - F.sum(logstd, axis=1)
        ) 
Example #18
Source File: agents.py    From EPG with MIT License 5 votes vote down vote up
def _compute_ppo_loss(self, obs, acts, at, vt, old_params):
        params = self._pi_f(obs)
        cv = F.flatten(self._vf_f(obs))
        ratio = F.exp(self._logp(params, acts) - self._logp(old_params, acts))
        surr1 = ratio * at
        surr2 = F.clip(ratio, 1 - self._ppo_clipparam, 1 + self._ppo_clipparam) * at
        ppo_surr_loss = (
                -sym_mean(F.minimum(surr1, surr2))
                + self._ppo_klcoeff * sym_mean(self.kl(old_params, params))
                + sym_mean(F.square(cv - vt))
        )
        return ppo_surr_loss 
Example #19
Source File: utils.py    From EPG with MIT License 5 votes vote down vote up
def categorical_kl(params0, params1):
    params0 = params0[0]
    params1 = params1[0]
    assert params0.shape == params1.shape
    a0 = params0 - F.tile(F.max(params0, axis=1, keepdims=True), (1, 4))
    a1 = params1 - F.tile(F.max(params1, axis=1, keepdims=True), (1, 4))
    ea0 = F.exp(a0)
    ea1 = F.exp(a1)
    z0 = F.tile(F.sum(ea0, axis=1, keepdims=True), (1, 4))
    z1 = F.tile(F.sum(ea1, axis=1, keepdims=True), (1, 4))
    p0 = ea0 / z0
    return F.sum(p0 * (a0 - F.log(z0) - a1 + F.log(z1)), axis=1) 
Example #20
Source File: utils.py    From EPG with MIT License 5 votes vote down vote up
def gaussian_kl(params0, params1):
    (mean0, logstd0), (mean1, logstd1) = params0, params1
    assert mean0.shape == logstd0.shape == mean1.shape == logstd1.shape
    return F.sum(
        logstd1 - logstd0 + (F.square(F.exp(logstd0)) + F.square(mean0 - mean1)) / (
                2.0 * F.square(F.exp(logstd1))) - 0.5,
        axis=1
    ) 
Example #21
Source File: cn_agents.py    From cryptotrader with MIT License 5 votes vote down vote up
def prob(self, x):
        return F.exp(self.log_prob(x)) 
Example #22
Source File: MathMisc.py    From chainer-compiler with MIT License 5 votes vote down vote up
def main():
    np.random.seed(314)

    x = np.random.rand(6, 4).astype(np.float32)
    s_int = np.array(-10)
    s_float = np.array(10.0)

    testtools.generate_testcase(Sin(), [x], subname='sin')
    testtools.generate_testcase(Sinh(), [x], subname='sinh')
    testtools.generate_testcase(Sign(), [x], subname='sign')
    testtools.generate_testcase(Cos(), [x], subname='cos')
    testtools.generate_testcase(Cosh(), [x], subname='cosh')
    testtools.generate_testcase(Tan(), [x], subname='tan')
    testtools.generate_testcase(Tanh(), [x], subname='tanh')
    testtools.generate_testcase(ArcSin(), [x], subname='arcsin')
    testtools.generate_testcase(ArcCos(), [x], subname='arccos')
    testtools.generate_testcase(ArcTan(), [x], subname='arctan')
    testtools.generate_testcase(Exp(), [x], subname='exp')
    testtools.generate_testcase(Log(), [x], subname='log')
    testtools.generate_testcase(Clip(), [x], subname='clip')
    testtools.generate_testcase(ClipNp(), [x], subname='clip_np')
    testtools.generate_testcase(Abs(), [x], subname='abs')
    testtools.generate_testcase(AbsNp(), [x], subname='abs_np')
    testtools.generate_testcase(Sqrt(), [x], subname='sqrt')
    testtools.generate_testcase(Round(), [x], subname='round')
    testtools.generate_testcase(AbsBuiltin(), [x], subname='abs_builtin')
    testtools.generate_testcase(AbsBuiltin(), [s_float], subname='abs_builtin_scalar_float')
    testtools.generate_testcase(AbsBuiltin(), [s_int], subname='abs_builtin_scalar_int') 
Example #23
Source File: MathMisc.py    From chainer-compiler with MIT License 5 votes vote down vote up
def forward(self, x):
        y1 = F.exp(x)
        return y1 
Example #24
Source File: trpo.py    From chainerrl with MIT License 5 votes vote down vote up
def _compute_gain(self, log_prob, log_prob_old, entropy, advs):
        """Compute a gain to maximize."""
        prob_ratio = F.exp(log_prob - log_prob_old)
        mean_entropy = F.mean(entropy)
        surrogate_gain = F.mean(prob_ratio * advs)
        return surrogate_gain + self.entropy_coef * mean_entropy 
Example #25
Source File: soft_actor_critic.py    From chainerrl with MIT License 5 votes vote down vote up
def __call__(self):
        """Return a temperature as a chainer.Variable."""
        return F.exp(self.log_temperature) 
Example #26
Source File: ppo.py    From chainerrl with MIT License 5 votes vote down vote up
def _lossfun(self,
                 entropy, vs_pred, log_probs,
                 vs_pred_old, log_probs_old,
                 advs, vs_teacher):

        prob_ratio = F.exp(log_probs - log_probs_old)

        loss_policy = - F.mean(F.minimum(
            prob_ratio * advs,
            F.clip(prob_ratio, 1 - self.clip_eps, 1 + self.clip_eps) * advs))

        if self.clip_eps_vf is None:
            loss_value_func = F.mean_squared_error(vs_pred, vs_teacher)
        else:
            loss_value_func = F.mean(F.maximum(
                F.square(vs_pred - vs_teacher),
                F.square(_elementwise_clip(vs_pred,
                                           vs_pred_old - self.clip_eps_vf,
                                           vs_pred_old + self.clip_eps_vf)
                         - vs_teacher)
            ))
        loss_entropy = -F.mean(entropy)

        self.value_loss_record.append(float(loss_value_func.array))
        self.policy_loss_record.append(float(loss_policy.array))

        loss = (
            loss_policy
            + self.value_func_coef * loss_value_func
            + self.entropy_coef * loss_entropy
        )

        return loss 
Example #27
Source File: distribution.py    From chainerrl with MIT License 5 votes vote down vote up
def prob(self, x):
        return F.exp(self.log_prob(x)) 
Example #28
Source File: test_pretrained_models.py    From chainerrl with MIT License 4 votes vote down vote up
def test_load_trpo(self):
        winit = chainerrl.initializers.Orthogonal(1.)
        winit_last = chainerrl.initializers.Orthogonal(1e-2)
        action_size = 3
        policy = chainer.Sequential(
            L.Linear(None, 64, initialW=winit),
            F.tanh,
            L.Linear(None, 64, initialW=winit),
            F.tanh,
            L.Linear(None, action_size, initialW=winit_last),
            policies.GaussianHeadWithStateIndependentCovariance(
                action_size=action_size,
                var_type='diagonal',
                var_func=lambda x: F.exp(2 * x),  # Parameterize log std
                var_param_init=0,  # log std = 0 => std = 1
            ),
        )

        vf = chainer.Sequential(
            L.Linear(None, 64, initialW=winit),
            F.tanh,
            L.Linear(None, 64, initialW=winit),
            F.tanh,
            L.Linear(None, 1, initialW=winit),
        )
        vf_opt = chainer.optimizers.Adam()
        vf_opt.setup(vf)

        agent = agents.TRPO(
            policy=policy,
            vf=vf,
            vf_optimizer=vf_opt,
            update_interval=5000,
            max_kl=0.01,
            conjugate_gradient_max_iter=20,
            conjugate_gradient_damping=1e-1,
            gamma=0.995,
            lambd=0.97,
            vf_epochs=5,
            entropy_coef=0)

        model, exists = download_model("TRPO", "Hopper-v2",
                                       model_type=self.pretrained_type)
        agent.load(model)
        if os.environ.get('CHAINERRL_ASSERT_DOWNLOADED_MODEL_IS_CACHED'):
            assert exists 
Example #29
Source File: test_pretrained_models.py    From chainerrl with MIT License 4 votes vote down vote up
def _test_load_ppo(self, gpu):
        winit = chainerrl.initializers.Orthogonal(1.)
        winit_last = chainerrl.initializers.Orthogonal(1e-2)
        action_size = 3
        policy = chainer.Sequential(
            L.Linear(None, 64, initialW=winit),
            F.tanh,
            L.Linear(None, 64, initialW=winit),
            F.tanh,
            L.Linear(None, action_size, initialW=winit_last),
            policies.GaussianHeadWithStateIndependentCovariance(
                action_size=action_size,
                var_type='diagonal',
                var_func=lambda x: F.exp(2 * x),  # Parameterize log std
                var_param_init=0,  # log std = 0 => std = 1
            ),
        )

        vf = chainer.Sequential(
            L.Linear(None, 64, initialW=winit),
            F.tanh,
            L.Linear(None, 64, initialW=winit),
            F.tanh,
            L.Linear(None, 1, initialW=winit))

        model = links.Branched(policy, vf)

        opt = chainer.optimizers.Adam(3e-4, eps=1e-5)
        opt.setup(model)

        agent = agents.PPO(
            model,
            opt,
            obs_normalizer=None,
            gpu=gpu,
            update_interval=2048,
            minibatch_size=64,
            epochs=10,
            clip_eps_vf=None,
            entropy_coef=0,
            standardize_advantages=True,
            gamma=0.995,
            lambd=0.97)

        model, exists = download_model("PPO", "Hopper-v2",
                                       model_type=self.pretrained_type)
        agent.load(model)
        if os.environ.get('CHAINERRL_ASSERT_DOWNLOADED_MODEL_IS_CACHED'):
            assert exists