Python mxnet.nd.repeat() Examples

The following are 11 code examples of mxnet.nd.repeat(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mxnet.nd , or try the search function .
Example #1
Source File: capsule_block.py    From comment_toxic_CapsuleNet with MIT License 6 votes vote down vote up
def Route(self, x):
        # b_mat = nd.repeat(self.b_mat.data(), repeats=x.shape[0], axis=0)#nd.stop_gradient(nd.repeat(self.b_mat.data(), repeats=x.shape[0], axis=0))
        b_mat = nd.zeros((x.shape[0],1,self.num_cap, self.num_locations), ctx=x.context)
        x_expand = nd.expand_dims(nd.expand_dims(x, axis=2),2)
        w_expand = nd.repeat(nd.expand_dims(self.w_ij.data(x.context),axis=0), repeats=x.shape[0], axis=0)
        u_ = w_expand*x_expand
        # u_ = nd.abs(w_expand - x_expand)
        u = nd.sum(u_, axis = 1)
        u_no_gradient = nd.stop_gradient(u)
        for i in range(self.route_num):
            c_mat = nd.softmax(b_mat, axis=2)
            if i == self.route_num -1:
                s = nd.sum(u * c_mat, axis=-1)
            else:
                s = nd.sum(u_no_gradient * c_mat, axis=-1)
            v = squash(s, 1)
            v1 = nd.expand_dims(v, axis=-1)
            if i != self.route_num - 1:
                update_term = nd.sum(u_no_gradient*v1, axis=1, keepdims=True)
                b_mat = b_mat + update_term
        return v 
Example #2
Source File: coder.py    From cascade_rcnn_gluon with Apache License 2.0 6 votes vote down vote up
def forward(self, samples, matches, anchors, refs):
        """Forward"""
        F = nd
        # TODO(zhreshold): batch_pick, take multiple elements?
        ref_boxes = nd.repeat(refs.reshape((0, 1, -1, 4)), axis=1, repeats=matches.shape[1])
        ref_boxes = nd.split(ref_boxes, axis=-1, num_outputs=4, squeeze_axis=True)
        ref_boxes = nd.concat(*[F.pick(ref_boxes[i], matches, axis=2).reshape((0, -1, 1)) \
            for i in range(4)], dim=2)
        g = self.corner_to_center(ref_boxes)
        a = self.corner_to_center(anchors)
        t0 = ((g[0] - a[0]) / a[2] - self._means[0]) / self._stds[0]
        t1 = ((g[1] - a[1]) / a[3] - self._means[1]) / self._stds[1]
        t2 = (F.log(g[2] / a[2]) - self._means[2]) / self._stds[2]
        t3 = (F.log(g[3] / a[3]) - self._means[3]) / self._stds[3]
        codecs = F.concat(t0, t1, t2, t3, dim=2)
        temp = F.tile(samples.reshape((0, -1, 1)), reps=(1, 1, 4)) > 0.5
        targets = F.where(temp, codecs, F.zeros_like(codecs))
        masks = F.where(temp, F.ones_like(temp), F.zeros_like(temp))
        return targets, masks 
Example #3
Source File: capsule_block.py    From comment_toxic_CapsuleNet with MIT License 5 votes vote down vote up
def Route(self, x):
        b_mat = nd.zeros((x.shape[0],1,self.num_cap, self.num_locations), ctx=x.context)
        x_expand = nd.expand_dims(nd.expand_dims(x, axis=2),2)
        w_expand = nd.repeat(nd.expand_dims(self.w_ij.data(x.context),axis=0), repeats=x.shape[0], axis=0)
        u_ = w_expand*x_expand
        u = nd.sum(u_, axis = 1)
        for i in range(self.route_num):
            c_mat = nd.softmax(b_mat, axis=2)
            s = nd.sum(u * c_mat, axis=-1)
            v = squash(s, 1)
            v1 = nd.expand_dims(v, axis=-1)
            update_term = nd.sum(u * v1, axis=1, keepdims=True)
            b_mat = b_mat + update_term
        return v 
Example #4
Source File: capsule_block.py    From comment_toxic_CapsuleNet with MIT License 5 votes vote down vote up
def Route(self, x):
        # print x.context
        b_mat = nd.zeros((x.shape[0],1,self.num_cap, self.num_locations), ctx=x.context)
        x_expand = nd.expand_dims(nd.expand_dims(x, axis=2),2)
        w_expand = nd.repeat(nd.expand_dims(self.w_ij.data(x.context),axis=0), repeats=x.shape[0], axis=0)
        u_ = w_expand*x_expand
        u = nd.sum(u_, axis = 1)
        # u_ = nd.square(w_expand - x_expand)
        # u = -nd.sum(u_, axis = 1)
        u_no_gradient = nd.stop_gradient(u)
        for i in range(self.route_num):
            # c_mat = nd.softmax(b_mat, axis=2)
            c_mat = nd.sigmoid(b_mat)
            if i == self.route_num -1:
                s = nd.sum(u * c_mat, axis=-1)
            else:
                s = nd.sum(u_no_gradient * c_mat, axis=-1)
            v = squash(s, 1)
            if i != self.route_num - 1:
                v1 = nd.expand_dims(v, axis=-1)
                update_term = nd.sum(u_no_gradient*v1, axis=1, keepdims=True)
                b_mat = b_mat + update_term
                # b_mat = update_term
            # else:
            #    v = s
        return v 
Example #5
Source File: coder.py    From cascade_rcnn_gluon with Apache License 2.0 5 votes vote down vote up
def forward(self, samples, matches, anchors, labels, refs):
        """Encode BBox One entry per category"""
        F = nd
        ref_boxes = F.repeat(refs.reshape((0, 1, -1, 4)), axis=1, repeats=matches.shape[1])
        ref_boxes = F.split(ref_boxes, axis=-1, num_outputs=4, squeeze_axis=True)
        ref_boxes = F.concat(*[F.pick(ref_boxes[i], matches, axis=2).reshape((0, -1, 1)) \
            for i in range(4)], dim=2)
        ref_labels = F.repeat(labels.reshape((0, 1, -1)), axis=1, repeats=matches.shape[1])
        ref_labels = F.pick(ref_labels, matches, axis=2).reshape((0, -1, 1))
        g = self.corner_to_center(ref_boxes)
        a = self.corner_to_center(anchors)
        t0 = ((g[0] - a[0]) / a[2] - self._means[0]) / self._stds[0]
        t1 = ((g[1] - a[1]) / a[3] - self._means[1]) / self._stds[1]
        t2 = (F.log(g[2] / a[2]) - self._means[2]) / self._stds[2]
        t3 = (F.log(g[3] / a[3]) - self._means[3]) / self._stds[3]
        codecs = F.concat(t0, t1, t2, t3, dim=2)
        temp = F.tile(samples.reshape((0, -1, 1)), reps=(1, 1, 4)) > 0.5
        targets = F.where(temp, codecs, F.zeros_like(codecs))
        masks = F.where(temp, F.ones_like(temp), F.zeros_like(temp))
        out_targets = []
        out_masks = []
        for cid in range(self._num_class):
            same_cid = ref_labels == cid
            # keep orig targets
            out_targets.append(targets)
            # but mask out the one not belong to this class
            out_masks.append(masks * same_cid.repeat(axis=-1, repeats=4))
        all_targets = F.stack(*out_targets, axis=0)
        all_masks = F.stack(*out_masks, axis=0)
        return all_targets, all_masks 
Example #6
Source File: coder.py    From cascade_rcnn_gluon with Apache License 2.0 5 votes vote down vote up
def forward(self, samples, matches, anchors, labels, refs):
        """Encode BBox One entry per category"""
        F = nd
        ref_boxes = F.repeat(refs.reshape((0, 1, -1, 4)), axis=1, repeats=matches.shape[1])
        ref_boxes = F.split(ref_boxes, axis=-1, num_outputs=4, squeeze_axis=True)
        ref_boxes = F.concat(*[F.pick(ref_boxes[i], matches, axis=2).reshape((0, -1, 1)) \
            for i in range(4)], dim=2)
        ref_labels = F.repeat(labels.reshape((0, 1, -1)), axis=1, repeats=matches.shape[1])
        ref_labels = F.pick(ref_labels, matches, axis=2).reshape((0, -1, 1))
        g = self.corner_to_center(ref_boxes)
        a = self.corner_to_center(anchors)
        t0 = ((g[0] - a[0]) / a[2] - self._means[0]) / self._stds[0]
        t1 = ((g[1] - a[1]) / a[3] - self._means[1]) / self._stds[1]
        t2 = (F.log(g[2] / a[2]) - self._means[2]) / self._stds[2]
        t3 = (F.log(g[3] / a[3]) - self._means[3]) / self._stds[3]
        codecs = F.concat(t0, t1, t2, t3, dim=2)
        temp = F.tile(samples.reshape((0, -1, 1)), reps=(1, 1, 4)) > 0.5
        targets = F.where(temp, codecs, F.zeros_like(codecs))
        masks = F.where(temp, F.ones_like(temp), F.zeros_like(temp))
        out_targets = []
        out_masks = []
        # for cid in range(self._num_class):
        out_targets.append(targets)
        # but mask out the one not belong to this class
        out_masks.append(masks )
        all_targets = F.stack(*out_targets, axis=0)
        all_masks = F.stack(*out_masks, axis=0)
        return all_targets, all_masks 
Example #7
Source File: coder.py    From cascade_rcnn_gluon with Apache License 2.0 5 votes vote down vote up
def hybrid_forward(self, F, samples, matches, refs):
        refs = F.repeat(refs.reshape((0, 1, -1)), axis=1, repeats=matches.shape[1])
        target_ids = F.pick(refs, matches, axis=2) + 1
        targets = F.where(samples > 0.5, target_ids, nd.ones_like(target_ids) * self._ignore_label)
        targets = F.where(samples < -0.5, nd.zeros_like(targets), targets)
        return targets 
Example #8
Source File: image.py    From gluon-cv with Apache License 2.0 4 votes vote down vote up
def resize_contain(src, size, fill=0):
    """Resize the image to fit in the given area while keeping aspect ratio.

    If both the height and the width in `size` are larger than
    the height and the width of input image, the image is placed on
    the center with an appropriate padding to match `size`.
    Otherwise, the input image is scaled to fit in a canvas whose size
    is `size` while preserving aspect ratio.

    Parameters
    ----------
    src : mxnet.nd.NDArray
        The original image with HWC format.
    size : tuple
        Tuple of length 2 as (width, height).
    fill : int or float or array-like
        The value(s) for padded borders. If `fill` is numerical type, RGB channels
        will be padded with single value. Otherwise `fill` must have same length
        as image channels, which resulted in padding with per-channel values.

    Returns
    -------
    mxnet.nd.NDArray
        Augmented image.
    tuple
        Tuple of (offset_x, offset_y, scaled_x, scaled_y)

    """
    h, w, c = src.shape
    ow, oh = size
    scale_h = oh / h
    scale_w = ow / w
    scale = min(min(scale_h, scale_w), 1)
    scaled_x = int(w * scale)
    scaled_y = int(h * scale)
    if scale < 1:
        src = mx.image.imresize(src, scaled_x, scaled_y)

    off_y = (oh - scaled_y) // 2 if scaled_y < oh else 0
    off_x = (ow - scaled_x) // 2 if scaled_x < ow else 0

    # make canvas
    if isinstance(fill, numeric_types):
        dst = nd.full(shape=(oh, ow, c), val=fill, dtype=src.dtype)
    else:
        fill = nd.array(fill, ctx=src.context)
        if not c == fill.size:
            raise ValueError("Channel and fill size mismatch, {} vs {}".format(c, fill.size))
        dst = nd.repeat(fill, repeats=oh * ow).reshape((oh, ow, c))

    dst[off_y:off_y+scaled_y, off_x:off_x+scaled_x, :] = src
    return dst, (off_x, off_y, scaled_x, scaled_y) 
Example #9
Source File: image.py    From MobileFace with MIT License 4 votes vote down vote up
def resize_contain(src, size, fill=0):
    """Resize the image to fit in the given area while keeping aspect ratio.

    If both the height and the width in `size` are larger than
    the height and the width of input image, the image is placed on
    the center with an appropriate padding to match `size`.
    Otherwise, the input image is scaled to fit in a canvas whose size
    is `size` while preserving aspect ratio.

    Parameters
    ----------
    src : mxnet.nd.NDArray
        The original image with HWC format.
    size : tuple
        Tuple of length 2 as (width, height).
    fill : int or float or array-like
        The value(s) for padded borders. If `fill` is numerical type, RGB channels
        will be padded with single value. Otherwise `fill` must have same length
        as image channels, which resulted in padding with per-channel values.

    Returns
    -------
    mxnet.nd.NDArray
        Augmented image.
    tuple
        Tuple of (offset_x, offset_y, scaled_x, scaled_y)

    """
    h, w, c = src.shape
    ow, oh = size
    scale_h = oh / h
    scale_w = ow / w
    scale = min(min(scale_h, scale_w), 1)
    scaled_x = int(w * scale)
    scaled_y = int(h * scale)
    if scale < 1:
        src = mx.image.imresize(src, scaled_x, scaled_y)

    off_y = (oh - scaled_y) // 2 if scaled_y < oh else 0
    off_x = (ow - scaled_x) // 2 if scaled_x < ow else 0

    # make canvas
    if isinstance(fill, numeric_types):
        dst = nd.full(shape=(oh, ow, c), val=fill, dtype=src.dtype)
    else:
        fill = nd.array(fill, ctx=src.context)
        if not c == fill.size:
            raise ValueError("Channel and fill size mismatch, {} vs {}".format(c, fill.size))
        dst = nd.repeat(fill, repeats=oh * ow).reshape((oh, ow, c))

    dst[off_y:off_y+scaled_y, off_x:off_x+scaled_x, :] = src
    return dst, (off_x, off_y, scaled_x, scaled_y) 
Example #10
Source File: image.py    From panoptic-fpn-gluon with Apache License 2.0 4 votes vote down vote up
def resize_contain(src, size, fill=0):
    """Resize the image to fit in the given area while keeping aspect ratio.

    If both the height and the width in `size` are larger than
    the height and the width of input image, the image is placed on
    the center with an appropriate padding to match `size`.
    Otherwise, the input image is scaled to fit in a canvas whose size
    is `size` while preserving aspect ratio.

    Parameters
    ----------
    src : mxnet.nd.NDArray
        The original image with HWC format.
    size : tuple
        Tuple of length 2 as (width, height).
    fill : int or float or array-like
        The value(s) for padded borders. If `fill` is numerical type, RGB channels
        will be padded with single value. Otherwise `fill` must have same length
        as image channels, which resulted in padding with per-channel values.

    Returns
    -------
    mxnet.nd.NDArray
        Augmented image.
    tuple
        Tuple of (offset_x, offset_y, scaled_x, scaled_y)

    """
    h, w, c = src.shape
    ow, oh = size
    scale_h = oh / h
    scale_w = ow / w
    scale = min(min(scale_h, scale_w), 1)
    scaled_x = int(w * scale)
    scaled_y = int(h * scale)
    if scale < 1:
        src = mx.image.imresize(src, scaled_x, scaled_y)

    off_y = (oh - scaled_y) // 2 if scaled_y < oh else 0
    off_x = (ow - scaled_x) // 2 if scaled_x < ow else 0

    # make canvas
    if isinstance(fill, numeric_types):
        dst = nd.full(shape=(oh, ow, c), val=fill, dtype=src.dtype)
    else:
        fill = nd.array(fill, ctx=src.context)
        if not c == fill.size:
            raise ValueError("Channel and fill size mismatch, {} vs {}".format(c, fill.size))
        dst = nd.repeat(fill, repeats=oh * ow).reshape((oh, ow, c))

    dst[off_y:off_y+scaled_y, off_x:off_x+scaled_x, :] = src
    return dst, (off_x, off_y, scaled_x, scaled_y) 
Example #11
Source File: image.py    From cascade_rcnn_gluon with Apache License 2.0 4 votes vote down vote up
def resize_contain(src, size, fill=0):
    """Resize the image to fit in the given area while keeping aspect ratio.

    If both the height and the width in `size` are larger than
    the height and the width of input image, the image is placed on
    the center with an appropriate padding to match `size`.
    Otherwise, the input image is scaled to fit in a canvas whose size
    is `size` while preserving aspect ratio.

    Parameters
    ----------
    src : mxnet.nd.NDArray
        The original image with HWC format.
    size : tuple
        Tuple of length 2 as (width, height).
    fill : int or float or array-like
        The value(s) for padded borders. If `fill` is numerical type, RGB channels
        will be padded with single value. Otherwise `fill` must have same length
        as image channels, which resulted in padding with per-channel values.

    Returns
    -------
    mxnet.nd.NDArray
        Augmented image.
    tuple
        Tuple of (offset_x, offset_y, scaled_x, scaled_y)

    """
    h, w, c = src.shape
    ow, oh = size
    scale_h = oh / h
    scale_w = oh / w
    scale = min(min(scale_h, scale_w), 1)
    scaled_x = int(w * scale)
    scaled_y = int(h * scale)
    if scale < 1:
        src = mx.image.imresize(src, scaled_x, scaled_y)

    off_y = (oh - scaled_y) // 2 if scaled_y < oh else 0
    off_x = (ow - scaled_x) // 2 if scaled_x < ow else 0

    # make canvas
    if isinstance(fill, numeric_types):
        dst = nd.full(shape=(oh, ow, c), val=fill, dtype=src.dtype)
    else:
        fill = nd.array(fill, ctx=src.context)
        if not c == fill.size:
            raise ValueError("Channel and fill size mismatch, {} vs {}".format(c, fill.size))
        dst = nd.repeat(fill, repeats=oh * ow).reshape((oh, ow, c))

    dst[off_y:off_y+scaled_y, off_x:off_x+scaled_x, :] = src
    return dst, (off_x, off_y, scaled_x, scaled_y)