Python chainer.functions.select_item() Examples

The following are 16 code examples of chainer.functions.select_item(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module chainer.functions , or try the search function .
Example #1
Source File: nin.py    From chainer-compiler with MIT License 6 votes vote down vote up
def softmax_cross_entropy(self, y, t):
        import numpy as np

        log_softmax = F.log_softmax(y)
        # SelectItem is not supported by onnx-chainer.
        # TODO(hamaji): Support it?
        # log_prob = F.select_item(log_softmax, t)

        # TODO(hamaji): Currently, F.sum with axis=1 cannot be
        # backpropped properly.
        # log_prob = F.sum(log_softmax * t, axis=1)
        # self.batch_size = chainer.Variable(np.array(t.size, np.float32),
        #                                    name='batch_size')
        # return -F.sum(log_prob, axis=0) / self.batch_size
        log_prob = F.sum(log_softmax * t, axis=(0, 1))
        batch_size = chainer.Variable(np.array(t.shape[0], np.float32),
                                      name='batch_size')
        self.extra_inputs = [batch_size]
        loss = -log_prob / batch_size
        loss.name = 'loss'
        return loss 
Example #2
Source File: gen_mnist_mlp.py    From chainer-compiler with MIT License 6 votes vote down vote up
def forward(self, x, t):
        xp = cuda.get_array_module(x)
        y = self.predictor(x)
        log_softmax = F.log_softmax(y)
        # SelectItem is not supported by onnx-chainer.
        # TODO(hamaji): Support it?
        # log_prob = F.select_item(log_softmax, t)

        batch_size = chainer.Variable(xp.array(t.size, xp.float32),
                                      name='batch_size')
        self.extra_inputs = [batch_size]
        # TODO(hamaji): Currently, F.sum with axis=1 cannot be
        # backpropped properly.
        # log_prob = F.sum(log_softmax * t, axis=1)
        # return -F.sum(log_prob, axis=0) / self.batch_size
        log_prob = F.sum(log_softmax * t, axis=(0, 1))
        loss = -log_prob / batch_size
        reporter.report({'loss': loss}, self)
        if self.compute_accuracy:
            acc = accuracy.accuracy(y, xp.argmax(t, axis=1))
            reporter.report({'accuracy': acc}, self)
        loss.name = 'loss'
        return loss 
Example #3
Source File: resnet50.py    From chainer-compiler with MIT License 6 votes vote down vote up
def softmax_cross_entropy(self, y, t):
        import numpy as np

        log_softmax = F.log_softmax(y)
        # SelectItem is not supported by onnx-chainer.
        # TODO(hamaji): Support it?
        # log_prob = F.select_item(log_softmax, t)

        # TODO(hamaji): Currently, F.sum with axis=1 cannot be
        # backpropped properly.
        # log_prob = F.sum(log_softmax * t, axis=1)
        # self.batch_size = chainer.Variable(np.array(t.size, np.float32),
        #                                    name='batch_size')
        # return -F.sum(log_prob, axis=0) / self.batch_size
        log_prob = F.sum(log_softmax * t, axis=(0, 1))
        batch_size = chainer.Variable(self.xp.array(t.shape[0], np.float32),
                                      name='batch_size')
        self.extra_inputs = [batch_size]
        loss = -log_prob / batch_size
        loss.name = 'loss'
        return loss 
Example #4
Source File: alex.py    From chainer-compiler with MIT License 6 votes vote down vote up
def softmax_cross_entropy(self, y, t):
        import numpy as np

        log_softmax = F.log_softmax(y)
        # SelectItem is not supported by onnx-chainer.
        # TODO(hamaji): Support it?
        # log_prob = F.select_item(log_softmax, t)

        # TODO(hamaji): Currently, F.sum with axis=1 cannot be
        # backpropped properly.
        # log_prob = F.sum(log_softmax * t, axis=1)
        # self.batch_size = chainer.Variable(np.array(t.size, np.float32),
        #                                    name='batch_size')
        # return -F.sum(log_prob, axis=0) / self.batch_size
        log_prob = F.sum(log_softmax * t, axis=(0, 1))
        batch_size = chainer.Variable(np.array(t.shape[0], np.float32),
                                      name='batch_size')
        self.extra_inputs = [batch_size]
        loss = -log_prob / batch_size
        loss.name = 'loss'
        return loss 
Example #5
Source File: dqn_cartpole.py    From chainer with MIT License 6 votes vote down vote up
def update(Q, target_Q, opt, samples, gamma=0.99, target_type='double_dqn'):
    """Update a Q-function with given samples and a target Q-function."""
    dtype = chainer.get_dtype()
    xp = Q.xp
    obs = xp.asarray([sample[0] for sample in samples], dtype=dtype)
    action = xp.asarray([sample[1] for sample in samples], dtype=np.int32)
    reward = xp.asarray([sample[2] for sample in samples], dtype=dtype)
    done = xp.asarray([sample[3] for sample in samples], dtype=dtype)
    obs_next = xp.asarray([sample[4] for sample in samples], dtype=dtype)
    # Predicted values: Q(s,a)
    y = F.select_item(Q(obs), action)
    # Target values: r + gamma * max_b Q(s',b)
    with chainer.no_backprop_mode():
        if target_type == 'dqn':
            next_q = F.max(target_Q(obs_next), axis=1)
        elif target_type == 'double_dqn':
            next_q = F.select_item(target_Q(obs_next),
                                   F.argmax(Q(obs_next), axis=1))
        else:
            raise ValueError('Unsupported target_type: {}'.format(target_type))
        target = reward + gamma * (1 - done) * next_q
    loss = mean_clipped_loss(y, target)
    Q.cleargrads()
    loss.backward()
    opt.update() 
Example #6
Source File: distribution.py    From chainerrl with MIT License 5 votes vote down vote up
def prob(self, x):
        return F.select_item(self.all_prob, x) 
Example #7
Source File: distribution.py    From chainerrl with MIT License 5 votes vote down vote up
def log_prob(self, x):
        return F.select_item(self.all_log_prob, x) 
Example #8
Source File: action_value.py    From chainerrl with MIT License 5 votes vote down vote up
def max(self):
        with chainer.force_backprop_mode():
            return F.select_item(self.q_values, self.greedy_actions) 
Example #9
Source File: action_value.py    From chainerrl with MIT License 5 votes vote down vote up
def evaluate_actions(self, actions):
        return F.select_item(self.q_values, actions) 
Example #10
Source File: action_value.py    From chainerrl with MIT License 5 votes vote down vote up
def max(self):
        with chainer.force_backprop_mode():
            return F.select_item(self.q_values, self.greedy_actions) 
Example #11
Source File: policy_output.py    From async-rl with MIT License 5 votes vote down vote up
def sampled_actions_log_probs(self):
        return F.select_item(
            self.log_probs,
            chainer.Variable(np.asarray(self.action_indices, dtype=np.int32))) 
Example #12
Source File: gen_extra_test.py    From chainer-compiler with MIT License 5 votes vote down vote up
def gen_select_item_test(test_name):
    input = V(aranges(4, 3))
    indices = V([1, 2, 0, 1])
    output = F.select_item(input, indices)

    node = onnx.helper.make_node(
        'ChainerSelectItem',
        inputs=['input', 'indices'],
        outputs=['output'])
    expect(node, inputs=[input, indices], outputs=[output], name=test_name) 
Example #13
Source File: test_select_item.py    From chainer with MIT License 5 votes vote down vote up
def check_forward(self, x_data, t_data):
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data)
        y = functions.select_item(x, t)
        y_exp = cuda.to_cpu(x_data)[range(t_data.size), cuda.to_cpu(t_data)]

        self.assertEqual(y.data.dtype, self.dtype)
        numpy.testing.assert_equal(cuda.to_cpu(y.data), y_exp) 
Example #14
Source File: test_select_item.py    From chainer with MIT License 5 votes vote down vote up
def check_backward(self, x_data, t_data, gy_data):
        gradient_check.check_backward(
            functions.select_item,
            (x_data, t_data), gy_data, eps=0.01, dtype='d',
            **self.check_backward_options) 
Example #15
Source File: test_select_item.py    From chainer with MIT License 5 votes vote down vote up
def check_double_backward(self, x_data, t_data, gy_data, ggx_data):
        def f(x):
            return functions.select_item(x, t_data)

        gradient_check.check_double_backward(
            f, x_data, gy_data, ggx_data, eps=0.01, dtype='d',
            **self.check_backward_options) 
Example #16
Source File: test_select_item.py    From chainer with MIT License 5 votes vote down vote up
def check_value_check(self, x_data, t_data):
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data)

        if self.valid:
            # Check if it throws nothing
            functions.select_item(x, t)
        else:
            with self.assertRaises(ValueError):
                functions.select_item(x, t)