Python mxnet.gluon.data.Sampler() Examples

The following are 6 code examples of mxnet.gluon.data.Sampler(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mxnet.gluon.data , or try the search function .
Example #1
Source File: samplers.py    From d-SNE with Apache License 2.0 6 votes vote down vote up
def __init__(self, batch_size, cls_idx_dict1, cls_idx_dict2, ratio=1):
        """
        Balanced Two Steam Sampler, use cls_idx_dict1 as main dictinary and list
        :param batch_size: batch size
        :param cls_idx_dict1: class index dictionary
        :param cls_idx_dict2: class index dictionary
        :param ratio: negative / positive flag
        """
        self.batch_size = batch_size
        self.cls_idx_dict1 = cls_idx_dict1
        self.cls_idx_dict2 = cls_idx_dict2
        self.ratio = ratio

        assert set(cls_idx_dict1.keys()) == set(cls_idx_dict2.keys()), 'The labels of two classes are not consistent'

        self.n_cls = len(cls_idx_dict1.keys())
        self.n_samples = self.batch_size // self.n_cls

        assert self.batch_size >= self.n_cls, "batch size should equal or larger than number of classes"

        self.length = self.cal_len() 
Example #2
Source File: samplers.py    From d-SNE with Apache License 2.0 5 votes vote down vote up
def __init__(self, batch_size, cls_idx_dict):
        """
        Balance Sampler to make sure that every class have similar number of samples in the batch without replacement
        :param batch_size: batch size
        :param cls_idx_dict: class idx dictionary
        """
        self.batch_size = batch_size
        self.cls_idx_dict = cls_idx_dict

        self.n_cls = len(cls_idx_dict.keys())
        self.n_samples = self.batch_size // self.n_cls

        assert self.batch_size >= self.n_cls, "batch size should equal or larger than number of classes"

        self.length = self.cal_len() 
Example #3
Source File: samplers.py    From d-SNE with Apache License 2.0 5 votes vote down vote up
def __init__(self, batch_size, cls_idx_dict1, idx_lst2, ratio=1, preds_f=None):
        """
        Two Steam weighted balanced Sampler
        :param batch_size: batch size (bs, )
        :param cls_idx_dict1: class index dictionary (C, )
        :param idx_lst2: index list, [l]
        :param ratio: ratio
        :param preds_f: prediction file, [l x C]
        """
        self.batch_size = batch_size
        self.cls_idx_dict1 = cls_idx_dict1
        self.idx_lst2 = idx_lst2
        self.ratio = ratio
        self.preds_f = preds_f

        self.cls = cls_idx_dict1.keys()
        self.n_cls = len(cls_idx_dict1.keys())
        self.n_samples = self.batch_size // self.n_cls

        assert self.batch_size >= self.n_cls, "batch size should equal or larger than number of classes"

        preds_cls_idx = load_json(self.preds_f)
        keys = list(preds_cls_idx.keys())
        for k in keys:
            preds_cls_idx[int(k)] = preds_cls_idx.pop(k)

        self.preds_cls_idx = preds_cls_idx

        self.length = cal_len(self.cls_idx_dict1) 
Example #4
Source File: samplers.py    From d-SNE with Apache License 2.0 5 votes vote down vote up
def __init__(self, idx1, idx2):
        """
        Two Steam Random Sampler
        :param idx1: index 1
        :param idx2: index 2
        """
        self.lst1 = idx1
        self.lst2 = idx2

        self.length = len(idx1) 
Example #5
Source File: sampler.py    From gluon-cv with Apache License 2.0 4 votes vote down vote up
def forward(self, matches, ious):
        """Quota Sampler

        Parameters:
        ----------
        matches : NDArray or Symbol
            Matching results, positive number for positive matching, -1 for not matched.
        ious : NDArray or Symbol
            IOU overlaps with shape (N, M), batching is supported.

        Returns:
        --------
        NDArray or Symbol
            Sampling results with same shape as ``matches``.
            1 for positive, -1 for negative, 0 for ignore.

        """
        F = mx.nd
        max_pos = int(round(self._pos_ratio * self._num_sample))
        max_neg = int(self._neg_ratio * self._num_sample)
        results = []
        for i in range(matches.shape[0]):
            # init with 0s, which are ignored
            result = F.zeros_like(matches[0])
            # positive samples
            ious_max = ious.max(axis=-1)[i]
            result = F.where(matches[i] >= 0, F.ones_like(result), result)
            result = F.where(ious_max >= self._pos_thresh, F.ones_like(result), result)
            # negative samples with label -1
            neg_mask = ious_max < self._neg_thresh_high
            neg_mask = neg_mask * (ious_max >= self._neg_thresh_low)
            result = F.where(neg_mask, F.ones_like(result) * -1, result)

            # re-balance if number of positive or negative exceed limits
            result = result.asnumpy()
            num_pos = int((result > 0).sum())
            if num_pos > max_pos:
                disable_indices = np.random.choice(
                    np.where(result > 0)[0], size=(num_pos - max_pos), replace=False)
                result[disable_indices] = 0  # use 0 to ignore
            num_neg = int((result < 0).sum())
            if self._fill_negative:
                # if pos_sample is less than quota, we can have negative samples filling the gap
                max_neg = max(self._num_sample - min(num_pos, max_pos), max_neg)
            if num_neg > max_neg:
                disable_indices = np.random.choice(
                    np.where(result < 0)[0], size=(num_neg - max_neg), replace=False)
                result[disable_indices] = 0
            results.append(mx.nd.array(result))

        return mx.nd.stack(*results, axis=0) 
Example #6
Source File: sampler.py    From gluon-cv with Apache License 2.0 4 votes vote down vote up
def forward(self, is_train, req, in_data, out_data, aux):
        """Quota Sampler

        Parameters:
        ----------
        in_data: array-like of Symbol
            [matches, ious], see below.
        matches : NDArray or Symbol
            Matching results, positive number for positive matching, -1 for not matched.
        ious : NDArray or Symbol
            IOU overlaps with shape (N, M), batching is supported.

        Returns:
        --------
        NDArray or Symbol
            Sampling results with same shape as ``matches``.
            1 for positive, -1 for negative, 0 for ignore.

        """
        matches = in_data[0]
        ious = in_data[1]
        F = mx.nd
        max_pos = int(round(self._pos_ratio * self._num_sample))
        max_neg = int(self._neg_ratio * self._num_sample)
        for i in range(matches.shape[0]):
            # init with 0s, which are ignored
            result = F.zeros_like(matches[i])
            # negative samples with label -1
            ious_max = ious.max(axis=-1)[i]
            neg_mask = ious_max < self._neg_thresh_high
            neg_mask = neg_mask * (ious_max >= self._neg_thresh_low)
            result = F.where(neg_mask, F.ones_like(result) * -1, result)
            # positive samples
            result = F.where(matches[i] >= 0, F.ones_like(result), result)
            result = F.where(ious_max >= self._pos_thresh, F.ones_like(result), result)

            # re-balance if number of positive or negative exceed limits
            result = result.asnumpy()
            num_pos = int((result > 0).sum())
            if num_pos > max_pos:
                disable_indices = np.random.choice(
                    np.where(result > 0)[0], size=(num_pos - max_pos), replace=False)
                result[disable_indices] = 0  # use 0 to ignore
            num_neg = int((result < 0).sum())
            if self._fill_negative:
                # if pos_sample is less than quota, we can have negative samples filling the gap
                max_neg = max(self._num_sample - min(num_pos, max_pos), max_neg)
            if num_neg > max_neg:
                disable_indices = np.random.choice(
                    np.where(result < 0)[0], size=(num_neg - max_neg), replace=False)
                result[disable_indices] = 0  # use 0 to ignore

            self.assign(out_data[0][i], req[0], mx.nd.array(result))