Python chainer.functions.broadcast() Examples

The following are 15 code examples of chainer.functions.broadcast(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module chainer.functions , or try the search function .
Example #1
Source File: dueling_dqn.py    From chainerrl with MIT License 6 votes vote down vote up
def __call__(self, x):
        h = x
        for l in self.conv_layers:
            h = self.activation(l(h))

        # Advantage
        batch_size = x.shape[0]
        ya = self.a_stream(h)
        mean = F.reshape(
            F.sum(ya, axis=1) / self.n_actions, (batch_size, 1))
        ya, mean = F.broadcast(ya, mean)
        ya -= mean

        # State value
        ys = self.v_stream(h)

        ya, ys = F.broadcast(ya, ys)
        q = ya + ys
        return action_value.DiscreteActionValue(q) 
Example #2
Source File: dueling_dqn.py    From chainerrl with MIT License 6 votes vote down vote up
def __call__(self, x):
        h = x
        for l in self.conv_layers:
            h = self.activation(l(h))

        # Advantage
        batch_size = x.shape[0]

        h = self.activation(self.main_stream(h))
        h_a, h_v = F.split_axis(h, 2, axis=-1)
        ya = F.reshape(self.a_stream(h_a),
                       (batch_size, self.n_actions, self.n_atoms))

        mean = F.sum(ya, axis=1, keepdims=True) / self.n_actions

        ya, mean = F.broadcast(ya, mean)
        ya -= mean

        # State value
        ys = F.reshape(self.v_stream(h_v), (batch_size, 1, self.n_atoms))
        ya, ys = F.broadcast(ya, ys)
        q = F.softmax(ya + ys, axis=2)

        return action_value.DistributionalDiscreteActionValue(q, self.z_values) 
Example #3
Source File: embed_mixture.py    From lda2vec with MIT License 6 votes vote down vote up
def proportions(self, doc_ids, softmax=False):
        """ Given an array of document indices, return a vector
        for each document of just the unnormalized topic weights.

        Returns:
            doc_weights : chainer.Variable
                Two dimensional topic weights of each document.
        """
        w = self.weights(doc_ids)
        if softmax:
            size = w.data.shape
            mask = self.xp.random.random_integers(0, 1, size=size)
            y = (F.softmax(w * self.temperature) *
                 Variable(mask.astype('float32')))
            norm, y = F.broadcast(F.expand_dims(F.sum(y, axis=1), 1), y)
            return y / (norm + 1e-7)
        else:
            return w 
Example #4
Source File: model.py    From graph-nvp with MIT License 6 votes vote down vote up
def __call__(self, adj, x):
        h = F.broadcast(x)
        # add uniform noise to node feature matrices
        if chainer.config.train:
            h += self.xp.random.uniform(0, 0.9, x.shape)
        adj = F.broadcast(adj)
        sum_log_det_jacs_x = F.broadcast(self.xp.zeros([h.shape[0]], dtype=self.xp.float32))
        sum_log_det_jacs_adj = F.broadcast(self.xp.zeros([h.shape[0]], dtype=self.xp.float32))
        # forward step of channel-coupling layers
        for i in range(self.hyperparams.num_coupling['channel']):
            h, log_det_jacobians = self.clinks[i](h, adj)
            sum_log_det_jacs_x += log_det_jacobians
        # add uniform noise to adjacency tensors
        if chainer.config.train:
            adj += self.xp.random.uniform(0, 0.9, adj.shape)
        # forward step of adjacency-coupling
        for i in range(self.hyperparams.num_coupling['channel'], len(self.clinks)):
            adj, log_det_jacobians = self.clinks[i](adj)
            sum_log_det_jacs_adj += log_det_jacobians

        adj = F.reshape(adj, (adj.shape[0], -1))
        h = F.reshape(h, (h.shape[0], -1))
        out = [h, adj]
        return out, [sum_log_det_jacs_x, sum_log_det_jacs_adj] 
Example #5
Source File: q_functions.py    From baselines with MIT License 6 votes vote down vote up
def __call__(self, x):
        h = x
        for l in self.conv_layers:
            h = self.activation(l(h))

        # Advantage
        batch_size = x.shape[0]
        ya = self.a_stream(h)
        mean = F.reshape(F.sum(ya, axis=1) / self.n_actions, (batch_size, 1))
        ya, mean = F.broadcast(ya, mean)
        ya -= mean

        # State value
        ys = self.v_stream(h)

        ya, ys = F.broadcast(ya, ys)
        q = ya + ys
        return chainerrl.action_value.DiscreteActionValue(q) 
Example #6
Source File: q_functions.py    From baselines with MIT License 6 votes vote down vote up
def __call__(self, x):
        h = x
        for l in self.conv_layers:
            h = self.activation(l(h))

        # Advantage
        batch_size = x.shape[0]

        h = self.activation(self.main_stream(h))
        h_a, h_v = F.split_axis(h, 2, axis=-1)
        ya = F.reshape(self.a_stream(h_a), (batch_size, self.n_actions, self.n_atoms))

        mean = F.sum(ya, axis=1, keepdims=True) / self.n_actions

        ya, mean = F.broadcast(ya, mean)
        ya -= mean

        # State value
        ys = F.reshape(self.v_stream(h_v), (batch_size, 1, self.n_atoms))
        ya, ys = F.broadcast(ya, ys)
        q = F.softmax(ya + ys, axis=2)

        return chainerrl.action_value.DistributionalDiscreteActionValue(q, self.z_values) 
Example #7
Source File: iqn.py    From chainerrl with MIT License 5 votes vote down vote up
def compute_eltwise_huber_quantile_loss(y, t, taus, huber_loss_threshold=1.0):
    """Compute elementwise Huber losses for quantile regression.

    This is based on Algorithm 1 of https://arxiv.org/abs/1806.06923.

    This function assumes that, both of the two kinds of quantile thresholds,
    taus (used to compute y) and taus_prime (used to compute t) are iid samples
    from U([0,1]).

    Args:
        y (chainer.Variable): Quantile prediction from taus as a
            (batch_size, N)-shaped array.
        t (chainer.Variable or ndarray): Target values for quantile regression
            as a (batch_size, N_prime)-array.
        taus (ndarray): Quantile thresholds used to compute y as a
            (batch_size, N)-shaped array.
        huber_loss_threshold (float): Threshold of Huber loss. In the IQN
            paper, this is denoted by kappa.

    Returns:
        chainer.Variable: Loss (batch_size, N, N_prime)
    """
    assert y.shape == taus.shape
    # (batch_size, N) -> (batch_size, N, 1)
    y = F.expand_dims(y, axis=2)
    # (batch_size, N_prime) -> (batch_size, 1, N_prime)
    t = F.expand_dims(t, axis=1)
    # (batch_size, N) -> (batch_size, N, 1)
    taus = F.expand_dims(taus, axis=2)
    # Broadcast to (batch_size, N, N_prime)
    y, t, taus = F.broadcast(y, t, taus)
    I_delta = ((t.array - y.array) > 0).astype('f')
    eltwise_huber_loss = F.huber_loss(
        y, t, delta=huber_loss_threshold, reduce='no')
    eltwise_loss = abs(taus - I_delta) * eltwise_huber_loss
    return eltwise_loss 
Example #8
Source File: test_broadcast.py    From chainer with MIT License 5 votes vote down vote up
def check_forward(self, data):
        xs = [chainer.Variable(x) for x in data]
        bxs = functions.broadcast(*xs)

        # When len(xs) == 1, function returns a Variable object
        if isinstance(bxs, chainer.Variable):
            bxs = (bxs,)

        for bx in bxs:
            self.assertEqual(bx.data.shape, self.out_shape)
            self.assertEqual(bx.data.dtype, self.dtype) 
Example #9
Source File: test_broadcast.py    From chainer with MIT License 5 votes vote down vote up
def check_backward(self, data, grads):
        def f(*xs):
            return functions.broadcast(*xs)
        gradient_check.check_backward(
            f, data, grads, dtype=numpy.float64, **self.check_backward_options) 
Example #10
Source File: test_broadcast.py    From chainer with MIT License 5 votes vote down vote up
def check_double_backward(self, data, grads, gg):
        if len(data) == 1:
            return

        gradient_check.check_double_backward(
            functions.broadcast, data, grads, gg, dtype=numpy.float64,
            **self.check_double_backward_options) 
Example #11
Source File: test_broadcast.py    From chainer with MIT License 5 votes vote down vote up
def test_invalid_shape(self):
        x_data = numpy.zeros((3, 2, 5), dtype=numpy.int32)
        y_data = numpy.zeros((1, 3, 4), dtype=numpy.float32)
        x = chainer.Variable(x_data)
        y = chainer.Variable(y_data)

        with self.assertRaises(type_check.InvalidType):
            functions.broadcast(x, y) 
Example #12
Source File: test_broadcast.py    From chainer with MIT License 5 votes vote down vote up
def test_invalid_shape_fill(self):
        x_data = numpy.zeros((3, 2, 5), dtype=numpy.int32)
        y_data = numpy.zeros(4, dtype=numpy.float32)
        x = chainer.Variable(x_data)
        y = chainer.Variable(y_data)

        with self.assertRaises(type_check.InvalidType):
            functions.broadcast(x, y) 
Example #13
Source File: dern.py    From der-network with MIT License 5 votes vote down vote up
def attention_history(self, dL, cue, train=True):
        D = F.concat(dL, axis=0)
        D, Cue = F.broadcast(D, cue)
        S = self.m(F.tanh(self.W_dm(D) + Cue))
        S = F.softmax(F.reshape(S, (1, len(dL))))
        pre_v = F.matmul(S, D)
        return pre_v 
Example #14
Source File: test_iqn.py    From chainerrl with MIT License 4 votes vote down vote up
def test(self):
        batch_size = self.batch_size
        N = self.N
        N_prime = self.N_prime
        huber_loss_threshold = self.huber_loss_threshold

        # Overestimation is penalized proportionally to tau
        # Underestimation is penalized proportionally to (1-tau)
        y = np.random.normal(size=(batch_size, N)).astype('f')
        y_var = chainer.Variable(y)
        t = np.random.normal(size=(batch_size, N_prime)).astype('f')
        tau = np.random.uniform(size=(batch_size, N)).astype('f')

        loss = iqn.compute_eltwise_huber_quantile_loss(
            y_var, t, tau, huber_loss_threshold=huber_loss_threshold)
        y_var_b, t_b = F.broadcast(
            F.reshape(y_var, (batch_size, N, 1)),
            F.reshape(t, (batch_size, 1, N_prime)),
        )
        self.assertEqual(loss.shape, (batch_size, N, N_prime))
        huber_loss = F.huber_loss(
            y_var_b, t_b, delta=huber_loss_threshold, reduce='no')
        self.assertEqual(huber_loss.shape, (batch_size, N, N_prime))

        for i in range(batch_size):
            for j in range(N):
                for k in range(N_prime):
                    # loss is always positive
                    scalar_loss = loss[i, j, k]
                    scalar_grad = chainer.grad(
                        [scalar_loss], [y_var])[0][i, j]
                    self.assertGreater(scalar_loss.array, 0)
                    if y[i, j] > t[i, k]:
                        # y over-estimates t
                        # loss equals huber loss scaled by tau
                        correct_scalar_loss = tau[i, j] * huber_loss[i, j, k]
                    else:
                        # y under-estimates t
                        # loss equals huber loss scaled by (1-tau)
                        correct_scalar_loss = (
                            (1 - tau[i, j]) * huber_loss[i, j, k])
                    correct_scalar_grad = chainer.grad(
                        [correct_scalar_loss], [y_var])[0][i, j]
                    self.assertAlmostEqual(
                        scalar_loss.array,
                        correct_scalar_loss.array,
                        places=5,
                    )
                    self.assertAlmostEqual(
                        scalar_grad.array,
                        correct_scalar_grad.array,
                        places=5,
                    ) 
Example #15
Source File: lighting.py    From neural_renderer with MIT License 4 votes vote down vote up
def lighting(
        faces, textures, intensity_ambient=0.5, intensity_directional=0.5, color_ambient=(1, 1, 1),
        color_directional=(1, 1, 1), direction=(0, 1, 0)):
    xp = chainer.cuda.get_array_module(faces)
    bs, nf = faces.shape[:2]

    # arguments
    if isinstance(color_ambient, tuple) or isinstance(color_ambient, list):
        color_ambient = xp.array(color_ambient, 'float32')
    if isinstance(color_directional, tuple) or isinstance(color_directional, list):
        color_directional = xp.array(color_directional, 'float32')
    if isinstance(direction, tuple) or isinstance(direction, list):
        direction = xp.array(direction, 'float32')
    if color_ambient.ndim == 1:
        color_ambient = cf.broadcast_to(color_ambient[None, :], (bs, 3))
    if color_directional.ndim == 1:
        color_directional = cf.broadcast_to(color_directional[None, :], (bs, 3))
    if direction.ndim == 1:
        direction = cf.broadcast_to(direction[None, :], (bs, 3))

    # create light
    light = xp.zeros((bs, nf, 3), 'float32')

    # ambient light
    if intensity_ambient != 0:
        light = light + intensity_ambient * cf.broadcast_to(color_ambient[:, None, :], light.shape)

    # directional light
    if intensity_directional != 0:
        faces = faces.reshape((bs * nf, 3, 3))
        v10 = faces[:, 0] - faces[:, 1]
        v12 = faces[:, 2] - faces[:, 1]
        normals = cf.normalize(neural_renderer.cross(v10, v12))
        normals = normals.reshape((bs, nf, 3))

        if direction.ndim == 2:
            direction = cf.broadcast_to(direction[:, None, :], normals.shape)
        cos = cf.relu(cf.sum(normals * direction, axis=2))
        light = (
            light + intensity_directional * cfmath.mul(*cf.broadcast(color_directional[:, None, :], cos[:, :, None])))

    # apply
    light = cf.broadcast_to(light[:, :, None, None, None, :], textures.shape)
    textures = textures * light
    return textures