Python chainer.functions.minimum() Examples

The following are 21 code examples of chainer.functions.minimum(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module chainer.functions , or try the search function .
Example #1
Source File: utils.py    From YOLOv2 with MIT License 6 votes vote down vote up
def reshape_to_yolo_size(img):
    input_height, input_width, _ = img.shape
    min_pixel = 320
    #max_pixel = 608
    max_pixel = 448

    min_edge = np.minimum(input_width, input_height)
    if min_edge < min_pixel:
        input_width *= min_pixel / min_edge
        input_height *= min_pixel / min_edge
    max_edge = np.maximum(input_width, input_height)
    if max_edge > max_pixel:
        input_width *= max_pixel / max_edge
        input_height *= max_pixel / max_edge

    input_width = int(input_width / 32 + round(input_width % 32 / 32)) * 32
    input_height = int(input_height / 32 + round(input_height % 32 / 32)) * 32
    img = cv2.resize(img, (input_width, input_height))

    return img 
Example #2
Source File: utils.py    From kiss with GNU General Public License v3.0 6 votes vote down vote up
def calc_loss(self, grids, image_size, **kwargs):
        normalize = kwargs.get('normalize', True)
        corner_coordinates = self.get_corners(grids, image_size, scale_to_image_size=False)
        # determine whether a point is out of the image, image range is [-1, 1]
        # everything outside of this increases the loss!
        bbox = F.concat(corner_coordinates, axis=0)
        top_loss = bbox + 1.5
        bottom_loss = bbox - 1.5

        # do not penalize anything inside the image
        top_loss = F.absolute(F.minimum(top_loss, self.xp.zeros_like(top_loss.array)))
        top_loss = F.reshape(top_loss, (len(corner_coordinates), -1))
        bottom_loss = F.maximum(bottom_loss, self.xp.zeros_like(bottom_loss.array))
        bottom_loss = F.reshape(bottom_loss, (len(corner_coordinates), -1))

        loss = F.sum(F.concat([top_loss, bottom_loss], axis=0), axis=0)
        if normalize:
            loss = F.sum(loss)
        return loss 
Example #3
Source File: utils.py    From kiss with GNU General Public License v3.0 6 votes vote down vote up
def calc_bboxes(self, predicted_bboxes, image_size, out_size):
        predicted_bboxes = (predicted_bboxes + 1) / 2
        x_points = predicted_bboxes[:, 0, ...] * image_size.width
        y_points = predicted_bboxes[:, 1, ...] * image_size.height
        top_left_x = F.get_item(x_points, [..., 0, 0])
        top_left_y = F.get_item(y_points, [..., 0, 0])
        bottom_right_x = F.get_item(x_points, [..., out_size.height - 1, out_size.width - 1])
        bottom_right_y = F.get_item(y_points, [..., out_size.height - 1, out_size.width - 1])

        bboxes = F.stack(
            [
                F.minimum(top_left_x, bottom_right_x),
                F.minimum(top_left_y, bottom_right_y),
                F.maximum(top_left_x, bottom_right_x),
                F.maximum(top_left_y, bottom_right_y),
            ],
            axis=1
        )
        return bboxes 
Example #4
Source File: match_bbox.py    From kiss with GNU General Public License v3.0 6 votes vote down vote up
def get_aabb_corners(grids, image_size):
    _, _, height, width = grids.shape
    grids = (grids + 1) / 2
    x_points = grids[:, 0, ...] * image_size.width
    y_points = grids[:, 1, ...] * image_size.height
    x_points = F.clip(x_points, 0., float(image_size.width))
    y_points = F.clip(y_points, 0., float(image_size.height))
    top_left_x = F.get_item(x_points, [..., 0, 0])
    top_left_y = F.get_item(y_points, [..., 0, 0])
    top_right_x = F.get_item(x_points, [..., 0, width - 1])
    top_right_y = F.get_item(y_points, [..., 0, width - 1])
    bottom_right_x = F.get_item(x_points, [..., height - 1, width - 1])
    bottom_right_y = F.get_item(y_points, [..., height - 1, width - 1])
    bottom_left_x = F.get_item(x_points, [..., height - 1, 0])
    bottom_left_y = F.get_item(y_points, [..., height - 1, 0])

    top_left_x_aabb = F.minimum(top_left_x, bottom_left_x)
    top_left_y_aabb = F.minimum(top_left_y, top_right_y)
    bottom_right_x_aabb = F.maximum(top_right_x, bottom_right_x)
    bottom_right_y_aabb = F.maximum(bottom_left_y, bottom_right_y)

    return top_left_y_aabb, top_left_x_aabb, bottom_right_y_aabb, bottom_right_x_aabb 
Example #5
Source File: test_minimum.py    From chainer with MIT License 5 votes vote down vote up
def forward_expected(self, inputs):
        x1, x2 = inputs
        expected = numpy.minimum(x1, x2)
        expected = numpy.asarray(expected)
        return expected.astype(self.dtype), 
Example #6
Source File: utils.py    From YOLOv2 with MIT License 5 votes vote down vote up
def multi_overlap(x1, len1, x2, len2):
    len1_half = len1/2
    len2_half = len2/2

    left = F.maximum(x1 - len1_half, x2 - len2_half)
    right = F.minimum(x1 + len1_half, x2 + len2_half)

    return right - left

# 2つのboxを受け取り、被ってる面積を返す(intersection of 2 boxes) 
Example #7
Source File: utils.py    From kiss with GNU General Public License v3.0 5 votes vote down vote up
def overlap(self, x1, w1, x2, w2):
        return F.maximum(self.xp.zeros_like(x1), F.minimum(x1 + w1, x2 + w2) - F.maximum(x1, x2)) 
Example #8
Source File: utils.py    From kiss with GNU General Public License v3.0 5 votes vote down vote up
def overlap(self, x1, w1, x2, w2):
        return self.xp.maximum(self.xp.zeros_like(x1), self.xp.minimum(x1 + w1, x2 + w2) - self.xp.maximum(x1, x2)) 
Example #9
Source File: loss_metrics.py    From see with GNU General Public License v3.0 5 votes vote down vote up
def calc_height_loss(self, height):
        # penalize bboxes that are not high enough to contain text (10 pixels)
        shifted_height = height - 10
        thresholded_height = F.minimum(shifted_height, self.xp.zeros_like(shifted_height))
        thresholded_height *= -1

        return F.average(thresholded_height) 
Example #10
Source File: test_minimum.py    From chainer with MIT License 5 votes vote down vote up
def test_minimum_inconsistent_shapes(self):
        x1_data = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype)
        x2_data = numpy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
        x1 = chainer.Variable(x1_data)
        x2 = chainer.Variable(x2_data)
        with self.assertRaises(type_check.InvalidType):
            functions.minimum(x1, x2) 
Example #11
Source File: test_minimum.py    From chainer with MIT License 5 votes vote down vote up
def test_minimum_inconsistent_types(self):
        if self.dtype1 == self.dtype2:
            return
        x1_data = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype1)
        x2_data = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype2)
        x1 = chainer.Variable(x1_data)
        x2 = chainer.Variable(x2_data)
        with self.assertRaises(type_check.InvalidType):
            functions.minimum(x1, x2) 
Example #12
Source File: action_value.py    From chainerrl with MIT License 5 votes vote down vote up
def greedy_actions(self):
        with chainer.force_backprop_mode():
            a = self.mu
            if self.min_action is not None:
                a = F.maximum(
                    self.xp.broadcast_to(self.min_action, a.array.shape), a)
            if self.max_action is not None:
                a = F.minimum(
                    self.xp.broadcast_to(self.max_action, a.array.shape), a)
            return a 
Example #13
Source File: test_minimum.py    From chainer with MIT License 5 votes vote down vote up
def forward(self, inputs, device):
        x1, x2 = inputs
        return functions.minimum(x1, x2), 
Example #14
Source File: agents.py    From EPG with MIT License 5 votes vote down vote up
def _compute_ppo_loss(self, obs, acts, at, vt, old_params):
        params = self._pi_f(obs)
        cv = F.flatten(self._vf_f(obs))
        ratio = F.exp(self._logp(params, acts) - self._logp(old_params, acts))
        surr1 = ratio * at
        surr2 = F.clip(ratio, 1 - self._ppo_clipparam, 1 + self._ppo_clipparam) * at
        ppo_surr_loss = (
                -sym_mean(F.minimum(surr1, surr2))
                + self._ppo_klcoeff * sym_mean(self.kl(old_params, params))
                + sym_mean(F.square(cv - vt))
        )
        return ppo_surr_loss 
Example #15
Source File: Minimum.py    From chainer-compiler with MIT License 5 votes vote down vote up
def forward(self, v1,v2):
        return np.minimum(v1, v2)

# ====================================== 
Example #16
Source File: Minimum.py    From chainer-compiler with MIT License 5 votes vote down vote up
def forward(self, v1,v2):
        return F.minimum(v1, v2) 
Example #17
Source File: custom_functions.py    From chainer-compiler with MIT License 5 votes vote down vote up
def chainer_clipped_relu(x, z=20.0):
    return F.minimum(F.maximum(0.0, x), z) 
Example #18
Source File: soft_actor_critic.py    From chainerrl with MIT License 5 votes vote down vote up
def update_policy_and_temperature(self, batch):
        """Compute loss for actor."""

        batch_state = batch['state']

        action_distrib = self.policy(batch_state)
        actions, log_prob = action_distrib.sample_with_log_prob()
        q1 = self.q_func1(batch_state, actions)
        q2 = self.q_func2(batch_state, actions)
        q = F.minimum(q1, q2)

        entropy_term = self.temperature * log_prob[..., None]
        assert q.shape == entropy_term.shape
        loss = F.mean(entropy_term - q)

        self.policy_optimizer.update(lambda: loss)

        if self.entropy_target is not None:
            self.update_temperature(log_prob.array)

        # Record entropy
        with chainer.no_backprop_mode():
            try:
                self.entropy_record.extend(
                    cuda.to_cpu(action_distrib.entropy.array))
            except NotImplementedError:
                # Record - log p(x) instead
                self.entropy_record.extend(
                    cuda.to_cpu(-log_prob.array)) 
Example #19
Source File: soft_actor_critic.py    From chainerrl with MIT License 5 votes vote down vote up
def update_q_func(self, batch):
        """Compute loss for a given Q-function."""

        batch_next_state = batch['next_state']
        batch_rewards = batch['reward']
        batch_terminal = batch['is_state_terminal']
        batch_state = batch['state']
        batch_actions = batch['action']
        batch_discount = batch['discount']

        with chainer.no_backprop_mode(), chainer.using_config('train', False):
            next_action_distrib = self.policy(batch_next_state)
            next_actions, next_log_prob =\
                next_action_distrib.sample_with_log_prob()
            next_q1 = self.target_q_func1(batch_next_state, next_actions)
            next_q2 = self.target_q_func2(batch_next_state, next_actions)
            next_q = F.minimum(next_q1, next_q2)
            entropy_term = self.temperature * next_log_prob[..., None]
            assert next_q.shape == entropy_term.shape

            target_q = batch_rewards + batch_discount * \
                (1.0 - batch_terminal) * F.flatten(next_q - entropy_term)

        predict_q1 = F.flatten(self.q_func1(batch_state, batch_actions))
        predict_q2 = F.flatten(self.q_func2(batch_state, batch_actions))

        loss1 = 0.5 * F.mean_squared_error(target_q, predict_q1)
        loss2 = 0.5 * F.mean_squared_error(target_q, predict_q2)

        # Update stats
        self.q1_record.extend(cuda.to_cpu(predict_q1.array))
        self.q2_record.extend(cuda.to_cpu(predict_q2.array))
        self.q_func1_loss_record.append(float(loss1.array))
        self.q_func2_loss_record.append(float(loss2.array))

        self.q_func1_optimizer.update(lambda: loss1)
        self.q_func2_optimizer.update(lambda: loss2) 
Example #20
Source File: ppo.py    From chainerrl with MIT License 5 votes vote down vote up
def _lossfun(self,
                 entropy, vs_pred, log_probs,
                 vs_pred_old, log_probs_old,
                 advs, vs_teacher):

        prob_ratio = F.exp(log_probs - log_probs_old)

        loss_policy = - F.mean(F.minimum(
            prob_ratio * advs,
            F.clip(prob_ratio, 1 - self.clip_eps, 1 + self.clip_eps) * advs))

        if self.clip_eps_vf is None:
            loss_value_func = F.mean_squared_error(vs_pred, vs_teacher)
        else:
            loss_value_func = F.mean(F.maximum(
                F.square(vs_pred - vs_teacher),
                F.square(_elementwise_clip(vs_pred,
                                           vs_pred_old - self.clip_eps_vf,
                                           vs_pred_old + self.clip_eps_vf)
                         - vs_teacher)
            ))
        loss_entropy = -F.mean(entropy)

        self.value_loss_record.append(float(loss_value_func.array))
        self.policy_loss_record.append(float(loss_policy.array))

        loss = (
            loss_policy
            + self.value_func_coef * loss_value_func
            + self.entropy_coef * loss_entropy
        )

        return loss 
Example #21
Source File: ppo.py    From chainerrl with MIT License 5 votes vote down vote up
def _elementwise_clip(x, x_min, x_max):
    """Elementwise clipping

    Note: chainer.functions.clip supports clipping to constant intervals
    """
    return F.minimum(F.maximum(x, x_min), x_max)