Python numpy.take_along_axis() Examples

The following are 30 code examples of numpy.take_along_axis(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: test_base_execute.py    From mars with Apache License 2.0 6 votes vote down vote up
def testArgsort(self):
        # only 1 chunk when axis = -1
        raw = np.random.rand(100, 10)
        x = tensor(raw, chunk_size=10)

        xa = argsort(x)

        r = self.executor.execute_tensor(xa, concat=True)[0]
        np.testing.assert_array_equal(np.sort(raw), np.take_along_axis(raw, r, axis=-1))

        x = tensor(raw, chunk_size=(22, 4))

        xa = argsort(x)

        r = self.executor.execute_tensor(xa, concat=True)[0]
        np.testing.assert_array_equal(np.sort(raw), np.take_along_axis(raw, r, axis=-1))

        raw = np.random.rand(100)

        x = tensor(raw, chunk_size=23)

        xa = argsort(x, axis=0)

        r = self.executor.execute_tensor(xa, concat=True)[0]
        np.testing.assert_array_equal(np.sort(raw, axis=0), raw[r]) 
Example #2
Source File: emd_module.py    From MSN-Point-Cloud-Completion with Apache License 2.0 6 votes vote down vote up
def test_emd():
    x1 = torch.rand(20, 8192, 3).cuda()
    x2 = torch.rand(20, 8192, 3).cuda()
    emd = emdModule()
    start_time = time.perf_counter()
    dis, assigment = emd(x1, x2, 0.05, 3000)
    print("Input_size: ", x1.shape)
    print("Runtime: %lfs" % (time.perf_counter() - start_time))
    print("EMD: %lf" % np.sqrt(dis.cpu()).mean())
    print("|set(assignment)|: %d" % assigment.unique().numel())
    assigment = assigment.cpu().numpy()
    assigment = np.expand_dims(assigment, -1)
    x2 = np.take_along_axis(x2, assigment, axis = 1)
    d = (x1 - x2) * (x1 - x2)
    print("Verified EMD: %lf" % np.sqrt(d.cpu().sum(-1)).mean())

#test_emd() 
Example #3
Source File: layers_test.py    From lingvo with Apache License 2.0 6 votes vote down vote up
def testRelativePositionalEmbeddingLayer(self):
    with self.session(use_gpu=False):
      radius = 3
      p = layers.RelativePositionalEmbeddingLayer.Params().Set(
          name='rel_position_emb', radius=radius, dim=4)
      layer = p.Instantiate()
      indices = np.array([-5, -2, 0, 1, 4], dtype=np.int32)
      pos_emb = layer.FPropDefaultTheta(tf.convert_to_tensor(indices))

      self.evaluate(tf.global_variables_initializer())
      actual_pos_emb, full_emb = self.evaluate([pos_emb, layer.vars.w])

      clipped_indices = np.vectorize(lambda x: max(-radius, min(radius, x)))(
          indices) + radius
      expected_output = np.take_along_axis(full_emb,
                                           np.expand_dims(clipped_indices, -1),
                                           0)
      print('expected_position_embs:', expected_output)
      print('actual_position_embs:', actual_pos_emb)
      self.assertAllClose(actual_pos_emb, expected_output) 
Example #4
Source File: test_base_execute.py    From mars with Apache License 2.0 6 votes vote down vote up
def testSortIndicesExecution(self):
        # only 1 chunk when axis = -1
        raw = np.random.rand(100, 10)
        x = tensor(raw, chunk_size=10)

        r = sort(x, return_index=True)

        sr, si = self.executor.execute_tensors(r)
        np.testing.assert_array_equal(sr, np.take_along_axis(raw, si, axis=-1))

        x = tensor(raw, chunk_size=(22, 4))

        r = sort(x, return_index=True)

        sr, si = self.executor.execute_tensors(r)
        np.testing.assert_array_equal(sr, np.take_along_axis(raw, si, axis=-1))

        raw = np.random.rand(100)

        x = tensor(raw, chunk_size=23)

        r = sort(x, axis=0, return_index=True)

        sr, si = self.executor.execute_tensors(r)
        np.testing.assert_array_equal(sr, raw[si]) 
Example #5
Source File: lax_numpy_test.py    From trax with Apache License 2.0 6 votes vote down vote up
def testTakeAlongAxis(self, x_shape, i_shape, dtype, axis, rng_factory):
    rng = rng_factory()
    i_shape = onp.array(i_shape)
    if axis is None:
      i_shape = [onp.prod(i_shape, dtype=onp.int64)]
    else:
      # Test the case where the size of the axis doesn't necessarily broadcast.
      i_shape[axis] *= 3
      i_shape = list(i_shape)
    def args_maker():
      x = rng(x_shape, dtype)
      n = onp.prod(x_shape, dtype=onp.int32) if axis is None else x_shape[axis]
      i = rng(i_shape, onp.int32) % (2 * n - 1) - (n - 1)
      return x, i

    lnp_op = lambda x, i: lnp.take_along_axis(x, i, axis=axis)

    if hasattr(onp, "take_along_axis"):
      onp_op = lambda x, i: onp.take_along_axis(x, i, axis=axis)
      self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True)
    self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True,
                          check_incomplete_shape=True) 
Example #6
Source File: np_util.py    From bayesmark with Apache License 2.0 6 votes vote down vote up
def cummin(x_val, x_key):
    """Get the cumulative minimum of `x_val` when ranked according to `x_key`.

    Parameters
    ----------
    x_val : :class:`numpy:numpy.ndarray` of shape (n, d)
        The array to get the cumulative minimum of along axis 0.
    x_key : :class:`numpy:numpy.ndarray` of shape (n, d)
        The array for ranking elements as to what is the minimum.

    Returns
    -------
    c_min : :class:`numpy:numpy.ndarray` of shape (n, d)
        The cumulative minimum array.
    """
    assert x_val.shape == x_key.shape
    assert x_val.ndim == 2
    assert not np.any(np.isnan(x_key)), "cummin not defined for nan key"

    n, _ = x_val.shape

    xm = np.minimum.accumulate(x_key, axis=0)
    idx = np.maximum.accumulate((x_key <= xm) * np.arange(n)[:, None])
    c_min = np.take_along_axis(x_val, idx, axis=0)
    return c_min 
Example #7
Source File: numdiff_np.py    From estimagic with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _get_best_estimate_single_method(derivative, errors):
    """Select best derivative estimates element wise.

    Given a single method, e.g. central differences with 2 num_terms (see above), we get
    multiple Richardson approximations including estimated errors. Here we select the
    approximations which result in the lowest error element wise.

    Args:
        derivative (np.ndarray): Derivative estimates from Richardson approximation.
            First axis (axis 0) denotes the potentially multiple estimates. Following
            dimensions represent the dimension of the derivative, i.e. for a classical
            gradient ``derivative`` has 2 dimensions, while for a classical jacobian
            ``derivative`` has 3 dimensions.
        errors (np.ndarray): Error estimates of ``derivative`` estimates. Has the same
            shape as ``derivative``.

    Returns:
        derivative_minimal (np.ndarray): Best derivate estimates chosen with respect
            to minimizing ``errors``. Note that the best values are selected
            element-wise. Has shape ``(derivative.shape[1], derivative.shape[2])``.

        error_minimal (np.ndarray): Minimal errors selected element-wise along axis
            0 of ``errors``.

    """
    if derivative.shape[0] == 1:
        derivative_minimal = np.squeeze(derivative, axis=0)
        error_minimal = np.squeeze(errors, axis=0)
    else:

        minimizer = np.nanargmin(errors, axis=0)

        derivative_minimal = np.take_along_axis(
            derivative, minimizer[np.newaxis, :], axis=0
        )
        derivative_minimal = np.squeeze(derivative_minimal, axis=0)
        error_minimal = np.nanmin(errors, axis=0)

    return derivative_minimal, error_minimal 
Example #8
Source File: layer.py    From tinynn with MIT License 5 votes vote down vote up
def forward(self, inputs):
        s_h, s_w = self.stride
        k_h, k_w = self.kernel_shape
        batch_sz, in_h, in_w, in_c = inputs.shape

        # zero-padding
        if self.padding is None:
            self.padding = get_padding_2d(
                (in_h, in_w), (k_h, k_w), self.padding_mode)
        X = np.pad(inputs, pad_width=self.padding, mode="constant")
        padded_h, padded_w = X.shape[1:3]
    
        out_h = (padded_h - k_h) // s_h + 1
        out_w = (padded_w - k_w) // s_w + 1

        # construct output matrix and argmax matrix
        max_pool = np.empty(shape=(batch_sz, out_h, out_w, in_c))
        argmax = np.empty(shape=(batch_sz, out_h, out_w, in_c), dtype=int)
        for r in range(out_h):
            r_start = r * s_h
            for c in range(out_w):
                c_start = c * s_w
                pool = X[:, r_start: r_start+k_h, c_start: c_start+k_w, :]
                pool = pool.reshape((batch_sz, -1, in_c))

                _argmax = np.argmax(pool, axis=1)[:, np.newaxis, :]
                argmax[:, r, c, :] = _argmax.squeeze()

                # get max elements
                _max_pool = np.take_along_axis(pool, _argmax, axis=1).squeeze()
                max_pool[:, r, c, :] = _max_pool

        self.X_shape = X.shape
        self.out_shape = (out_h, out_w)
        self.argmax = argmax
        return max_pool 
Example #9
Source File: numdiff_np.py    From estimagic with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _get_best_estimate_along_methods(derivatives, errors):
    """Extract best derivative estimate over different methods.

    Given that for each method, where one method can be for example central differences
    with two num_terms (see above), we have selected a single best derivative estimate,
    we select the best derivative estimates element-wise over different methods, where
    again best is defined as minimizing the approximation error.

    Args:
        derivatives (OrderedDict): Dictionary containing derivative estimates for
            different methods.
        errors (OrderedDict): Dictionary containing error estimates for derivates stored
            in ``derivatives``.

    Returns:
        jac_minimal (np.ndarray): The optimal derivative estimate over different
            methods.

    """
    errors = np.stack(list(errors.values()))
    derivatives = np.stack(list(derivatives.values()))

    if derivatives.shape[0] == 1:
        jac_minimal = np.squeeze(derivatives, axis=0)
    else:
        minimizer = np.nanargmin(errors, axis=0)

        jac_minimal = np.take_along_axis(derivatives, minimizer[np.newaxis, :], axis=0)
        jac_minimal = np.squeeze(jac_minimal, axis=0)

    return jac_minimal 
Example #10
Source File: snippets.py    From bert4keras with Apache License 2.0 5 votes vote down vote up
def beam_search(self, inputs, topk, states=None, min_ends=1):
        """beam search解码
        说明:这里的topk即beam size;
        返回:最优解码序列。
        """
        inputs = [np.array([i]) for i in inputs]
        output_ids, output_scores = self.first_output_ids, np.zeros(1)
        for step in range(self.maxlen):
            scores, states = self.predict(
                inputs, output_ids, states, 'logits'
            )  # 计算当前得分
            if step == 0:  # 第1步预测后将输入重复topk次
                inputs = [np.repeat(i, topk, axis=0) for i in inputs]
            scores = output_scores.reshape((-1, 1)) + scores  # 综合累积得分
            indices = scores.argpartition(-topk, axis=None)[-topk:]  # 仅保留topk
            indices_1 = indices // scores.shape[1]  # 行索引
            indices_2 = (indices % scores.shape[1]).reshape((-1, 1))  # 列索引
            output_ids = np.concatenate([output_ids[indices_1], indices_2],
                                        1)  # 更新输出
            output_scores = np.take_along_axis(
                scores, indices, axis=None
            )  # 更新得分
            end_counts = (output_ids == self.end_id).sum(1)  # 统计出现的end标记
            if output_ids.shape[1] >= self.minlen:  # 最短长度判断
                best_one = output_scores.argmax()  # 得分最大的那个
                if end_counts[best_one] == min_ends:  # 如果已经终止
                    return output_ids[best_one]  # 直接输出
                else:  # 否则,只保留未完成部分
                    flag = (end_counts < min_ends)  # 标记未完成序列
                    if not flag.all():  # 如果有已完成的
                        inputs = [i[flag] for i in inputs]  # 扔掉已完成序列
                        output_ids = output_ids[flag]  # 扔掉已完成序列
                        output_scores = output_scores[flag]  # 扔掉已完成序列
                        end_counts = end_counts[flag]  # 扔掉已完成end计数
                        topk = flag.sum()  # topk相应变化
        # 达到长度直接输出
        return output_ids[output_scores.argmax()] 
Example #11
Source File: indexing.py    From cupy with MIT License 5 votes vote down vote up
def take_along_axis(a, indices, axis):
    """Take values from the input array by matching 1d index and data slices.

    Args:
        a (cupy.ndarray): Array to extract elements.
        indices (cupy.ndarray): Indices to take along each 1d slice of ``a``.
        axis (int): The axis to take 1d slices along.

    Returns:
        cupy.ndarray: The indexed result.

    .. seealso:: :func:`numpy.take_along_axis`
    """

    if indices.dtype.kind not in ('i', 'u'):
        raise IndexError('`indices` must be an integer array')

    if axis is None:
        a = a.ravel()
        axis = 0

    ndim = a.ndim

    if not (-ndim <= axis < ndim):
        raise numpy.AxisError('Axis overrun')

    axis %= a.ndim

    if ndim != indices.ndim:
        raise ValueError(
            '`indices` and `a` must have the same number of dimensions')

    fancy_index = []
    for i, n in enumerate(a.shape):
        if i == axis:
            fancy_index.append(indices)
        else:
            ind_shape = (1,) * i + (-1,) + (1,) * (ndim - i - 1)
            fancy_index.append(cupy.arange(n).reshape(ind_shape))

    return a[fancy_index] 
Example #12
Source File: experiment_analysis.py    From bayesmark with Apache License 2.0 5 votes vote down vote up
def get_perf_array(evals, evals_visible):
    """Get the actual (e.g., generalization loss) over iterations.

    Parameters
    ----------
    evals : :class:`numpy:numpy.ndarray` of shape (n_iter, n_batch, n_trials)
        The actual loss (e.g., generalization) for a given experiment.
    evals_visible : :class:`numpy:numpy.ndarray` of shape (n_iter, n_batch, n_trials)
        The observable loss (e.g., validation) for a given experiment.

    Returns
    -------
    perf_array : :class:`numpy:numpy.ndarray` of shape (n_iter, n_trials)
        The best performance so far at iteration i from `evals`. Where the best has been selected according to
        `evals_visible`.
    """
    n_iter, _, n_trials = evals.shape
    assert evals.size > 0, "perf array not supported for empty arrays"
    assert evals_visible.shape == evals.shape
    assert not np.any(np.isnan(evals))
    assert not np.any(np.isnan(evals_visible))

    idx = np.argmin(evals_visible, axis=1)
    perf_array = np.take_along_axis(evals, idx[:, None, :], axis=1).squeeze(axis=1)
    assert perf_array.shape == (n_iter, n_trials)

    visible_perf_array = np.min(evals_visible, axis=1)
    assert visible_perf_array.shape == (n_iter, n_trials)

    # Get the minimum from the visible loss
    perf_array = cummin(perf_array, visible_perf_array)
    return perf_array 
Example #13
Source File: local_scaling.py    From scikit-hubness with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fit(self, neigh_dist, neigh_ind, X=None, assume_sorted: bool = True, *args, **kwargs) -> LocalScaling:
        """ Fit the model using neigh_dist and neigh_ind as training data.

        Parameters
        ----------
        neigh_dist: np.ndarray, shape (n_samples, n_neighbors)
            Distance matrix of training objects (rows) against their
            individual k nearest neighbors (colums).

        neigh_ind: np.ndarray, shape (n_samples, n_neighbors)
            Neighbor indices corresponding to the values in neigh_dist.

        X: ignored

        assume_sorted: bool, default = True
            Assume input matrices are sorted according to neigh_dist.
            If False, these are sorted here.
        """
        # Check equal number of rows and columns
        check_consistent_length(neigh_ind, neigh_dist)
        check_consistent_length(neigh_ind.T, neigh_dist.T)

        # increment to include the k-th element in slicing
        k = self.k + 1

        # Find distances to the k-th neighbor (standard LS) or the k neighbors (NICDM)
        if assume_sorted:
            self.r_dist_train_ = neigh_dist[:, :k]
            self.r_ind_train_ = neigh_ind[:, :k]
        else:
            kth = np.arange(self.k)
            mask = np.argpartition(neigh_dist, kth=kth)[:, :k]
            self.r_dist_train_ = np.take_along_axis(neigh_dist, mask, axis=1)
            self.r_ind_train_ = np.take_along_axis(neigh_ind, mask, axis=1)

        return self 
Example #14
Source File: diagnostics.py    From numpyro with Apache License 2.0 5 votes vote down vote up
def hpdi(x, prob=0.90, axis=0):
    """
    Computes "highest posterior density interval" (HPDI) which is the narrowest
    interval with probability mass ``prob``.

    :param numpy.ndarray x: the input array.
    :param float prob: the probability mass of samples within the interval.
    :param int axis: the dimension to calculate hpdi.
    :return: quantiles of ``x`` at ``(1 - prob) / 2`` and
        ``(1 + prob) / 2``.
    :rtype: numpy.ndarray
    """
    x = np.swapaxes(x, axis, 0)
    sorted_x = np.sort(x, axis=0)
    mass = x.shape[0]
    index_length = int(prob * mass)
    intervals_left = sorted_x[:(mass - index_length)]
    intervals_right = sorted_x[index_length:]
    intervals_length = intervals_right - intervals_left
    index_start = intervals_length.argmin(axis=0)
    index_end = index_start + index_length
    hpd_left = np.take_along_axis(sorted_x, index_start[None, ...], axis=0)
    hpd_left = np.swapaxes(hpd_left, axis, 0)
    hpd_right = np.take_along_axis(sorted_x, index_end[None, ...], axis=0)
    hpd_right = np.swapaxes(hpd_right, axis, 0)
    return np.concatenate([hpd_left, hpd_right], axis=axis) 
Example #15
Source File: topk.py    From NNEF-Tools with Apache License 2.0 5 votes vote down vote up
def topk(data, axis, k):
    indices = np.flip(np.argsort(data, axis=axis), axis=axis).take(indices=range(k), axis=axis)
    values = np.take_along_axis(data, indices, axis=axis)
    return values, indices 
Example #16
Source File: fiber_utils.py    From TractSeg with Apache License 2.0 5 votes vote down vote up
def get_best_original_peaks(peaks_pred, peaks_orig, peak_len_thr=0.1):
    """
    Find the peak from preaks_orig which is closest to the peak in peaks_pred.

    Args:
        peaks_pred: file containing 1 peak [x,y,z,3]
        peaks_orig: file containing 4 peaks [x,y,z,9]
        peak_len_thr: all peaks shorter than this threshold will be removed

    Returns:
        Image containing 1 peak [x,y,z,3]
    """

    def _get_most_aligned_peak(pred, orig):
        orig = np.array(orig)
        angle1 = abs(peak_utils.angle_last_dim(pred, orig[0]))
        angle2 = abs(peak_utils.angle_last_dim(pred, orig[1]))
        angle3 = abs(peak_utils.angle_last_dim(pred, orig[2]))
        argmax = np.argmax(np.stack([angle1, angle2, angle3], axis=-1), axis=-1)

        x, y, z = (orig.shape[1], orig.shape[2], orig.shape[3])
        return orig[tuple([argmax] + np.ogrid[:x, :y, :z])]
        # Other ways that would also work
        # return orig[argmax, np.arange(x)[:, None, None], np.arange(y)[:, None], np.arange(z)]
        # return np.take_along_axis(orig, argmax[None, ..., None], axis=0)[0]   # only supported in newest numpy version

    peaks_pred = np.nan_to_num(peaks_pred)
    peaks_orig = np.nan_to_num(peaks_orig)

    #Remove all peaks where predicted peaks are too short
    peaks_orig[np.linalg.norm(peaks_pred, axis=-1) < peak_len_thr] = 0

    best_orig = _get_most_aligned_peak(peaks_pred,
                                      [peaks_orig[:, :, :, 0:3],
                                       peaks_orig[:, :, :, 3:6],
                                       peaks_orig[:, :, :, 6:9]])
    return best_orig 
Example #17
Source File: test_quantity_non_ufuncs.py    From Carnets with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_take_along_axis(self):
        indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
        out = np.take_along_axis(self.q, indices, axis=0)
        expected = np.take_along_axis(self.q.value, indices,
                                      axis=0) * self.q.unit
        assert np.all(out == expected) 
Example #18
Source File: circular_vec_buffer_backend.py    From vel with MIT License 5 votes vote down vote up
def take_along_axis(large_array, indexes):
    """ Take along axis """
    # Reshape indexes into the right shape
    if len(large_array.shape) > len(indexes.shape):
        indexes = indexes.reshape(indexes.shape + tuple([1] * (len(large_array.shape) - len(indexes.shape))))

    return np.take_along_axis(large_array, indexes, axis=0) 
Example #19
Source File: circular_vec_buffer_backend.py    From vel with MIT License 5 votes vote down vote up
def get_transitions(self, indexes):
        """ Get dictionary of transition data """
        assert indexes.shape[1] == self.state_buffer.shape[1], \
            "Must have the same number of indexes as there are environments"

        frame_batch_shape = (
                [indexes.shape[0], indexes.shape[1]]
                + list(self.state_buffer.shape[2:-1])
                + [self.state_buffer.shape[-1] * self.frame_history]
        )

        past_frame_buffer = np.zeros(frame_batch_shape, dtype=self.state_buffer.dtype)
        future_frame_buffer = np.zeros(frame_batch_shape, dtype=self.state_buffer.dtype)

        for buffer_idx, frame_row in enumerate(indexes):
            for env_idx, frame_idx in enumerate(frame_row):
                past_frame_buffer[buffer_idx, env_idx], future_frame_buffer[buffer_idx, env_idx] = (
                    self.get_frame_with_future(frame_idx, env_idx)
                )

        actions = take_along_axis(self.action_buffer, indexes)
        rewards = take_along_axis(self.reward_buffer, indexes)
        dones = take_along_axis(self.dones_buffer, indexes)

        transition_tensors = {
            'observations': past_frame_buffer,
            'actions': actions,
            'rewards': rewards,
            'observations_next': future_frame_buffer,
            'dones': dones.astype(np.float32),
        }

        for name in self.extra_data:
            transition_tensors[name] = take_along_axis(self.extra_data[name], indexes)

        return transition_tensors 
Example #20
Source File: ntsnet_cub.py    From imgclsmob with MIT License 5 votes vote down vote up
def __call__(self, x):
        raw_pre_features = self.backbone(x)

        rpn_score = self.navigator_unit(raw_pre_features)
        rpn_score.to_cpu()
        all_cdds = [np.concatenate((y.reshape(-1, 1), self.edge_anchors.copy()), axis=1)
                    for y in rpn_score.array]
        top_n_cdds = [hard_nms(y, top_n=self.top_n, iou_thresh=0.25) for y in all_cdds]
        top_n_cdds = np.array(top_n_cdds)
        top_n_index = top_n_cdds[:, :, -1].astype(np.int64)
        top_n_index = np.array(top_n_index, dtype=np.int64)
        top_n_prob = np.take_along_axis(rpn_score.array, top_n_index, axis=1)

        batch = x.shape[0]
        x_pad = F.pad(x, pad_width=self.pad_width, mode="constant", constant_values=0)
        part_imgs = []
        for i in range(batch):
            for j in range(self.top_n):
                y0, x0, y1, x1 = tuple(top_n_cdds[i][j, 1:5].astype(np.int64))
                x_res = F.resize_images(
                    x_pad[i:i + 1, :, y0:y1, x0:x1],
                    output_shape=(224, 224))
                part_imgs.append(x_res)
        part_imgs = F.concat(tuple(part_imgs), axis=0)
        part_features = self.backbone_tail(self.backbone(part_imgs))

        part_feature = part_features.reshape((batch, self.top_n, -1))
        part_feature = part_feature[:, :self.num_cat, :]
        part_feature = part_feature.reshape((batch, -1))

        raw_features = self.backbone_tail(raw_pre_features)

        concat_out = F.concat((part_feature, raw_features), axis=1)
        concat_logits = self.concat_net(concat_out)

        if self.aux:
            raw_logits = self.backbone_classifier(raw_features)
            part_logits = self.partcls_net(part_features).reshape((batch, self.top_n, -1))
            return concat_logits, raw_logits, part_logits, top_n_prob
        else:
            return concat_logits 
Example #21
Source File: centernet.py    From imgclsmob with MIT License 5 votes vote down vote up
def call(self, x, training=None):
        import numpy as np

        x_ = x.numpy()
        if not is_channels_first(self.data_format):
            x_ = x_.transpose((0, 3, 1, 2))

        heatmap = x_[:, :-4]
        wh = x_[:, -4:-2]
        reg = x_[:, -2:]
        batch, _, out_h, out_w = heatmap.shape

        heatmap_flat = heatmap.reshape((batch, -1))
        indices = np.argsort(heatmap_flat)[:, -self.topk:]
        scores = np.take_along_axis(heatmap_flat, indices=indices, axis=-1)
        topk_classes = (indices // (out_h * out_w)).astype(dtype=np.float32)
        topk_indices = indices % (out_h * out_w)
        topk_ys = (topk_indices // out_w).astype(dtype=np.float32)
        topk_xs = (topk_indices % out_w).astype(dtype=np.float32)
        center = reg.transpose((0, 2, 3, 1)).reshape((batch, -1, 2))
        wh = wh.transpose((0, 2, 3, 1)).reshape((batch, -1, 2))
        xs = np.take_along_axis(center[:, :, 0], indices=topk_indices, axis=-1)
        ys = np.take_along_axis(center[:, :, 1], indices=topk_indices, axis=-1)
        topk_xs = topk_xs + xs
        topk_ys = topk_ys + ys
        w = np.take_along_axis(wh[:, :, 0], indices=topk_indices, axis=-1)
        h = np.take_along_axis(wh[:, :, 1], indices=topk_indices, axis=-1)
        half_w = 0.5 * w
        half_h = 0.5 * h
        bboxes = tf.stack((topk_xs - half_w, topk_ys - half_h, topk_xs + half_w, topk_ys + half_h), axis=-1)

        bboxes = bboxes * self.scale
        topk_classes = tf.expand_dims(topk_classes, axis=-1)
        scores = tf.expand_dims(scores, axis=-1)
        result = tf.concat((bboxes, topk_classes, scores), axis=-1)
        return result 
Example #22
Source File: convert2onnx.py    From centerpose with MIT License 5 votes vote down vote up
def _gather_feat(feat, ind, mask=None):
    dim  = feat.shape[2]
    ind = np.repeat(ind[:, :, np.newaxis], dim, axis=2)
    feat = np.take_along_axis(feat, ind, 1) 
    if mask is not None:
        mask = np.expand_dims(mask, 2).reshape(feat.shape)
        feat = feat[mask]
        feat = feat.reshape(-1, dim)
    return feat 
Example #23
Source File: tensor.py    From dgl with Apache License 2.0 5 votes vote down vote up
def topk(input, k, dim, descending=True):
    topk_indices = argtopk(input, k, dim, descending)
    return np.take_along_axis(input, topk_indices, axis=dim) 
Example #24
Source File: centernet.py    From imgclsmob with MIT License 5 votes vote down vote up
def __call__(self, x):
        import numpy as np

        heatmap = x[:, :-4].array
        wh = x[:, -4:-2].array
        reg = x[:, -2:].array
        batch, _, out_h, out_w = heatmap.shape

        heatmap_flat = heatmap.reshape((batch, -1))
        indices = np.argsort(heatmap_flat)[:, -self.topk:]
        scores = np.take_along_axis(heatmap_flat, indices=indices, axis=-1)
        topk_classes = (indices // (out_h * out_w)).astype(dtype=np.float32)
        topk_indices = indices % (out_h * out_w)
        topk_ys = (topk_indices // out_w).astype(dtype=np.float32)
        topk_xs = (topk_indices % out_w).astype(dtype=np.float32)
        center = reg.transpose((0, 2, 3, 1)).reshape((batch, -1, 2))
        wh = wh.transpose((0, 2, 3, 1)).reshape((batch, -1, 2))
        xs = np.take_along_axis(center[:, :, 0], indices=topk_indices, axis=-1)
        ys = np.take_along_axis(center[:, :, 1], indices=topk_indices, axis=-1)
        topk_xs = topk_xs + xs
        topk_ys = topk_ys + ys
        w = np.take_along_axis(wh[:, :, 0], indices=topk_indices, axis=-1)
        h = np.take_along_axis(wh[:, :, 1], indices=topk_indices, axis=-1)
        half_w = 0.5 * w
        half_h = 0.5 * h
        bboxes = F.stack((topk_xs - half_w, topk_ys - half_h, topk_xs + half_w, topk_ys + half_h), axis=-1)

        bboxes = bboxes * self.scale
        topk_classes = F.expand_dims(topk_classes, axis=-1)
        scores = F.expand_dims(scores, axis=-1)
        result = F.concat((bboxes, topk_classes, scores), axis=-1)
        return result 
Example #25
Source File: test_base_execute.py    From mars with Apache License 2.0 5 votes vote down vote up
def testTopkExecution(self):
        raw1, order1 = np.random.rand(5, 6, 7), None
        raw2 = np.empty((5, 6, 7), dtype=[('a', np.int32), ('b', np.float64)])
        raw2['a'] = np.random.randint(1000, size=(5, 6, 7), dtype=np.int32)
        raw2['b'] = np.random.rand(5, 6, 7)
        order2 = ['b', 'a']

        for raw, order in [(raw1, order1), (raw2, order2)]:
            for chunk_size in [7, 4]:
                a = tensor(raw, chunk_size=chunk_size)
                for axis in [0, 1, 2, None]:
                    size = raw.shape[axis] if axis is not None else raw.size
                    for largest in [True, False]:
                        for to_sort in [True, False]:
                            for parallel_kind in ['tree', 'psrs']:
                                for k in [2, size - 2, size, size + 2]:
                                    r = topk(a, k, axis=axis, largest=largest, sorted=to_sort,
                                             order=order, parallel_kind=parallel_kind)

                                    result = self.executor.execute_tensor(r, concat=True)[0]

                                    if not to_sort:
                                        result = self._handle_result(result, axis, largest, order)
                                    expected = self._topk_slow(raw, k, axis, largest, order)
                                    np.testing.assert_array_equal(result, expected)

                                    r = topk(a, k, axis=axis, largest=largest,
                                             sorted=to_sort, order=order,
                                             parallel_kind=parallel_kind,
                                             return_index=True)

                                    ta, ti = self.executor.execute_tensors(r)
                                    raw2 = raw
                                    if axis is None:
                                        raw2 = raw.flatten()
                                    np.testing.assert_array_equal(ta, np.take_along_axis(raw2, ti, axis))
                                    if not to_sort:
                                        ta = self._handle_result(ta, axis, largest, order)
                                    np.testing.assert_array_equal(ta, expected) 
Example #26
Source File: test_base_execute.py    From mars with Apache License 2.0 5 votes vote down vote up
def testArgpartitionExecution(self):
        # only 1 chunk when axis = -1
        raw = np.random.rand(100, 10)
        x = tensor(raw, chunk_size=10)

        kth = [6, 3, 8]
        pa = argpartition(x, kth)

        r = self.executor.execute_tensor(pa, concat=True)[0]
        np.testing.assert_array_equal(np.sort(raw)[:, kth], np.take_along_axis(raw, r, axis=-1)[:, kth])

        x = tensor(raw, chunk_size=(22, 4))

        pa = argpartition(x, kth)

        r = self.executor.execute_tensor(pa, concat=True)[0]
        np.testing.assert_array_equal(np.sort(raw)[:, kth], np.take_along_axis(raw, r, axis=-1)[:, kth])

        raw = np.random.rand(100)

        x = tensor(raw, chunk_size=23)

        pa = argpartition(x, kth, axis=0)

        r = self.executor.execute_tensor(pa, concat=True)[0]
        np.testing.assert_array_equal(np.sort(raw, axis=0)[kth], raw[r][kth]) 
Example #27
Source File: lax_numpy_test.py    From trax with Apache License 2.0 5 votes vote down vote up
def testTakeAlongAxisIssue1521(self):
    # https://github.com/google/jax/issues/1521
    idx = lnp.repeat(lnp.arange(3), 10).reshape((30, 1))

    def f(x):
      y = x * lnp.arange(3.).reshape((1, 3))
      return lnp.take_along_axis(y, idx, -1).sum()

    check_grads(f, (1.,), order=1) 
Example #28
Source File: test_base_execute.py    From mars with Apache License 2.0 5 votes vote down vote up
def testPartitionIndicesExecution(self):
        # only 1 chunk when axis = -1
        raw = np.random.rand(100, 10)
        x = tensor(raw, chunk_size=10)

        kth = [2, 5, 9]
        r = partition(x, kth, return_index=True)

        pr, pi = self.executor.execute_tensors(r)
        np.testing.assert_array_equal(pr, np.take_along_axis(raw, pi, axis=-1))
        np.testing.assert_array_equal(np.sort(raw)[:, kth], pr[:, kth])

        x = tensor(raw, chunk_size=(22, 4))

        r = partition(x, kth, return_index=True)

        pr, pi = self.executor.execute_tensors(r)
        np.testing.assert_array_equal(pr, np.take_along_axis(raw, pi, axis=-1))
        np.testing.assert_array_equal(np.sort(raw)[:, kth], pr[:, kth])

        raw = np.random.rand(100)

        x = tensor(raw, chunk_size=23)

        r = partition(x, kth, axis=0, return_index=True)

        pr, pi = self.executor.execute_tensors(r)
        np.testing.assert_array_equal(pr, np.take_along_axis(raw, pi, axis=-1))
        np.testing.assert_array_equal(np.sort(raw)[kth], pr[kth]) 
Example #29
Source File: complex_bingham.py    From pb_bss with MIT License 5 votes vote down vote up
def _remove_duplicate_eigenvalues(cls, covariance_eigenvalues, eps=1e-8):
        """
        >>> import pytest; pytest.skip('Bingham is to slow')
        >>> ComplexBingham._remove_duplicate_eigenvalues(np.array([0.5, 0.5]))[-1]
        array([0.5       , 0.50000001])

        Demonstrate the suboptimal behaviour for duplicate eigenvalues.
        >>> ComplexBingham._remove_duplicate_eigenvalues(np.array([0.2, 0.4, 0.4]), eps=0.02)[-1]
        array([0.2 , 0.4 , 0.42])

        This function sorts the eigenvalues
        >>> ComplexBingham._remove_duplicate_eigenvalues(np.array([0.9, 0.1]))
        (array([1, 0]), array([0.1, 0.9]))
        >>> ComplexBingham._remove_duplicate_eigenvalues(np.array([0.9, 0.06, 0.04]))
        (array([2, 1, 0]), array([0.04, 0.06, 0.9 ]))
        >>> ComplexBingham._remove_duplicate_eigenvalues(np.array([0.9, 0.04, 0.06]))
        (array([2, 0, 1]), array([0.04, 0.06, 0.9 ]))

        >>> ComplexBingham._remove_duplicate_eigenvalues(np.array([1, 0.0, 0.0]))
        (array([2, 0, 1]), array([0.00000000e+00, 1.00000000e-08, 1.00000001e+00]))
        """
        permutation = np.argsort(covariance_eigenvalues, axis=-1, )
        covariance_eigenvalues = np.take_along_axis(covariance_eigenvalues, permutation, axis=-1)
        diff = np.diff(covariance_eigenvalues, axis=-1)
        # eps = covariance_eigenvalues[..., -1] * eps
        # diff = np.maximum(diff, eps[..., None])
        diff = np.maximum(diff, eps)

        # This reconstruction is not optimal, but an error of 1e-8
        covariance_eigenvalues[..., 1:] = (
                covariance_eigenvalues[..., 0][..., None]
                + np.cumsum(diff, axis=-1)
        )

        # https://stackoverflow.com/a/55737198/5766934
        inverse_permutation = np.arange(permutation.shape[-1])[np.argsort(permutation, axis=-1)]
        return inverse_permutation, covariance_eigenvalues 
Example #30
Source File: xc_metrics.py    From pyxclib with MIT License 5 votes vote down vote up
def _eval_flags(indices, true_labels, inv_psp=None):
    if sp.issparse(true_labels):
        eval_flags = np.take_along_axis(true_labels.tocsc(),
                                        indices, axis=-1).todense()
    elif type(true_labels) == np.ndarray:
        eval_flags = np.take_along_axis(true_labels,
                                        indices, axis=-1)
    if inv_psp is not None:
        eval_flags = np.multiply(inv_psp[indices], eval_flags)
    return eval_flags