Python mxnet.ndarray.concatenate() Examples

The following are 30 code examples of mxnet.ndarray.concatenate(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mxnet.ndarray , or try the search function .
Example #1
Source File: anchor_generator.py    From ya_mxdet with MIT License 6 votes vote down vote up
def ssd_generate_anchors(scale, ratios=nd.array([0.5, 1, 2]), append_scale=None):
    """
    Generate anchor (reference) windows by enumerating aspect ratios X
    scales wrt a reference (0, 0, scale, scale) window.
    
    append_scale is used to generate an extra anchor whose scale is 
    sqrt{scale*append_scale}. Set append_scale=None to disenable this 
    extra anchor.
    """
    base_anchor = nd.array([1, 1, scale, scale])
    anchors = _ratio_enum(base_anchor, ratios)
    if append_scale is not None:
        ns = int(scale * append_scale)
        append_anchor = nd.round(nd.sqrt(nd.array([[1, 1, ns, ns]])))
        anchors = nd.concatenate([anchors, append_anchor], axis=0)
    return anchors 
Example #2
Source File: anchor_generator.py    From ya_mxdet with MIT License 6 votes vote down vote up
def generate_anchors(base_size=16, ratios=nd.array([0.5, 1, 2]), scales=2**nd.arange(3,6)):
    """
    Generate anchor (reference) windows by enumerating aspect ratios X
    scales wrt a reference (0, 0, 15, 15) window.
    This implementation matches the original Faster-RCNN RPN generate_anchors().
    But all calculations are on mxnet.ndarray.NDArray.

    Refer to 
    https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/rpn/generate_anchors.py
    """

    base_anchor = nd.array([1, 1, base_size, base_size])
    ratio_anchors = _ratio_enum(base_anchor, ratios)
    anchors = nd.concatenate([_scale_enum(ratio_anchors[i, :], scales)
                                 for i in range(ratio_anchors.shape[0])])
    return anchors 
Example #3
Source File: DataParallelExecutorGroup.py    From Accel with MIT License 5 votes vote down vote up
def _merge_multi_context(outputs, major_axis):
    """Merge outputs that lives on multiple context into one, so that they look
    like living on one context.
    """
    rets = []
    for tensors, axis in zip(outputs, major_axis):
        if axis >= 0:
            rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
        else:
            # negative axis means the there is no batch_size axis, and all the
            # results should be the same on each device. We simply take the
            # first one, without checking they are actually the same
            rets.append(tensors[0])
    return rets 
Example #4
Source File: DataParallelExecutorGroup.py    From Deformable-ConvNets with MIT License 5 votes vote down vote up
def _merge_multi_context(outputs, major_axis):
    """Merge outputs that lives on multiple context into one, so that they look
    like living on one context.
    """
    rets = []
    for tensors, axis in zip(outputs, major_axis):
        if axis >= 0:
            rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
        else:
            # negative axis means the there is no batch_size axis, and all the
            # results should be the same on each device. We simply take the
            # first one, without checking they are actually the same
            rets.append(tensors[0])
    return rets 
Example #5
Source File: DataParallelExecutorGroup.py    From Deformable-ConvNets with MIT License 5 votes vote down vote up
def _merge_multi_context(outputs, major_axis):
    """Merge outputs that lives on multiple context into one, so that they look
    like living on one context.
    """
    rets = []
    for tensors, axis in zip(outputs, major_axis):
        if axis >= 0:
            rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
        else:
            # negative axis means the there is no batch_size axis, and all the
            # results should be the same on each device. We simply take the
            # first one, without checking they are actually the same
            rets.append(tensors[0])
    return rets 
Example #6
Source File: DataParallelExecutorGroup.py    From Deformable-ConvNets with MIT License 5 votes vote down vote up
def _merge_multi_context(outputs, major_axis):
    """Merge outputs that lives on multiple context into one, so that they look
    like living on one context.
    """
    rets = []
    for tensors, axis in zip(outputs, major_axis):
        if axis >= 0:
            rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
        else:
            # negative axis means the there is no batch_size axis, and all the
            # results should be the same on each device. We simply take the
            # first one, without checking they are actually the same
            rets.append(tensors[0])
    return rets 
Example #7
Source File: DataParallelExecutorGroup.py    From Deformable-ConvNets with MIT License 5 votes vote down vote up
def _merge_multi_context(outputs, major_axis):
    """Merge outputs that lives on multiple context into one, so that they look
    like living on one context.
    """
    rets = []
    for tensors, axis in zip(outputs, major_axis):
        if axis >= 0:
            rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
        else:
            # negative axis means the there is no batch_size axis, and all the
            # results should be the same on each device. We simply take the
            # first one, without checking they are actually the same
            rets.append(tensors[0])
    return rets 
Example #8
Source File: DataParallelExecutorGroup.py    From RoITransformer_DOTA with MIT License 5 votes vote down vote up
def _merge_multi_context(outputs, major_axis):
    """Merge outputs that lives on multiple context into one, so that they look
    like living on one context.
    """
    rets = []
    for tensors, axis in zip(outputs, major_axis):
        if axis >= 0:
            rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
        else:
            # negative axis means the there is no batch_size axis, and all the
            # results should be the same on each device. We simply take the
            # first one, without checking they are actually the same
            rets.append(tensors[0])
    return rets 
Example #9
Source File: DataParallelExecutorGroup.py    From RoITransformer_DOTA with MIT License 5 votes vote down vote up
def _merge_multi_context(outputs, major_axis):
    """Merge outputs that lives on multiple context into one, so that they look
    like living on one context.
    """
    rets = []
    for tensors, axis in zip(outputs, major_axis):
        if axis >= 0:
            rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
        else:
            # negative axis means the there is no batch_size axis, and all the
            # results should be the same on each device. We simply take the
            # first one, without checking they are actually the same
            rets.append(tensors[0])
    return rets 
Example #10
Source File: DataParallelExecutorGroup.py    From Relation-Networks-for-Object-Detection with MIT License 5 votes vote down vote up
def _merge_multi_context(outputs, major_axis):
    """Merge outputs that lives on multiple context into one, so that they look
    like living on one context.
    """
    rets = []
    for tensors, axis in zip(outputs, major_axis):
        if axis >= 0:
            rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
        else:
            # negative axis means the there is no batch_size axis, and all the
            # results should be the same on each device. We simply take the
            # first one, without checking they are actually the same
            rets.append(tensors[0])
    return rets 
Example #11
Source File: DataParallelExecutorGroup.py    From Accel with MIT License 5 votes vote down vote up
def _merge_multi_context(outputs, major_axis):
    """Merge outputs that lives on multiple context into one, so that they look
    like living on one context.
    """
    rets = []
    for tensors, axis in zip(outputs, major_axis):
        if axis >= 0:
            rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
        else:
            # negative axis means the there is no batch_size axis, and all the
            # results should be the same on each device. We simply take the
            # first one, without checking they are actually the same
            rets.append(tensors[0])
    return rets 
Example #12
Source File: DataParallelExecutorGroup.py    From Decoupled-Classification-Refinement with MIT License 5 votes vote down vote up
def _merge_multi_context(outputs, major_axis):
    """Merge outputs that lives on multiple context into one, so that they look
    like living on one context.
    """
    rets = []
    for tensors, axis in zip(outputs, major_axis):
        if axis >= 0:
            rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
        else:
            # negative axis means the there is no batch_size axis, and all the
            # results should be the same on each device. We simply take the
            # first one, without checking they are actually the same
            rets.append(tensors[0])
    return rets 
Example #13
Source File: DataParallelExecutorGroup.py    From Accel with MIT License 5 votes vote down vote up
def _merge_multi_context(outputs, major_axis):
    """Merge outputs that lives on multiple context into one, so that they look
    like living on one context.
    """
    rets = []
    for tensors, axis in zip(outputs, major_axis):
        if axis >= 0:
            rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
        else:
            # negative axis means the there is no batch_size axis, and all the
            # results should be the same on each device. We simply take the
            # first one, without checking they are actually the same
            rets.append(tensors[0])
    return rets 
Example #14
Source File: DataParallelExecutorGroup.py    From Accel with MIT License 5 votes vote down vote up
def _merge_multi_context(outputs, major_axis):
    """Merge outputs that lives on multiple context into one, so that they look
    like living on one context.
    """
    rets = []
    for tensors, axis in zip(outputs, major_axis):
        if axis >= 0:
            rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
        else:
            # negative axis means the there is no batch_size axis, and all the
            # results should be the same on each device. We simply take the
            # first one, without checking they are actually the same
            rets.append(tensors[0])
    return rets 
Example #15
Source File: DataParallelExecutorGroup.py    From mx-DeepIM with Apache License 2.0 5 votes vote down vote up
def _merge_multi_context(outputs, major_axis):
    """Merge outputs that lives on multiple context into one, so that they look
    like living on one context.
    """
    rets = []
    for tensors, axis in zip(outputs, major_axis):
        if axis >= 0:
            rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
        else:
            # negative axis means the there is no batch_size axis, and all the
            # results should be the same on each device. We simply take the
            # first one, without checking they are actually the same
            rets.append(tensors[0])
    return rets 
Example #16
Source File: DataParallelExecutorGroup.py    From Faster_RCNN_for_DOTA with Apache License 2.0 5 votes vote down vote up
def _merge_multi_context(outputs, major_axis):
    """Merge outputs that lives on multiple context into one, so that they look
    like living on one context.
    """
    rets = []
    for tensors, axis in zip(outputs, major_axis):
        if axis >= 0:
            rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
        else:
            # negative axis means the there is no batch_size axis, and all the
            # results should be the same on each device. We simply take the
            # first one, without checking they are actually the same
            rets.append(tensors[0])
    return rets 
Example #17
Source File: DataParallelExecutorGroup.py    From Sequence-Level-Semantics-Aggregation with Apache License 2.0 5 votes vote down vote up
def _merge_multi_context(outputs, major_axis):
    """Merge outputs that lives on multiple context into one, so that they look
    like living on one context.
    """
    rets = []
    for tensors, axis in zip(outputs, major_axis):
        if axis >= 0:
            rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
        else:
            # negative axis means the there is no batch_size axis, and all the
            # results should be the same on each device. We simply take the
            # first one, without checking they are actually the same
            rets.append(tensors[0])
    return rets 
Example #18
Source File: DataParallelExecutorGroup.py    From Decoupled-Classification-Refinement with MIT License 5 votes vote down vote up
def _merge_multi_context(outputs, major_axis):
    """Merge outputs that lives on multiple context into one, so that they look
    like living on one context.
    """
    rets = []
    for tensors, axis in zip(outputs, major_axis):
        if axis >= 0:
            rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
        else:
            # negative axis means the there is no batch_size axis, and all the
            # results should be the same on each device. We simply take the
            # first one, without checking they are actually the same
            rets.append(tensors[0])
    return rets 
Example #19
Source File: DataParallelExecutorGroup.py    From Decoupled-Classification-Refinement with MIT License 5 votes vote down vote up
def _merge_multi_context(outputs, major_axis):
    """Merge outputs that lives on multiple context into one, so that they look
    like living on one context.
    """
    rets = []
    for tensors, axis in zip(outputs, major_axis):
        if axis >= 0:
            rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
        else:
            # negative axis means the there is no batch_size axis, and all the
            # results should be the same on each device. We simply take the
            # first one, without checking they are actually the same
            rets.append(tensors[0])
    return rets 
Example #20
Source File: DataParallelExecutorGroup.py    From Decoupled-Classification-Refinement with MIT License 5 votes vote down vote up
def _merge_multi_context(outputs, major_axis):
    """Merge outputs that lives on multiple context into one, so that they look
    like living on one context.
    """
    rets = []
    for tensors, axis in zip(outputs, major_axis):
        if axis >= 0:
            rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
        else:
            # negative axis means the there is no batch_size axis, and all the
            # results should be the same on each device. We simply take the
            # first one, without checking they are actually the same
            rets.append(tensors[0])
    return rets 
Example #21
Source File: DataParallelExecutorGroup.py    From Deep-Feature-Flow-Segmentation with MIT License 5 votes vote down vote up
def _merge_multi_context(outputs, major_axis):
    """Merge outputs that lives on multiple context into one, so that they look
    like living on one context.
    """
    rets = []
    for tensors, axis in zip(outputs, major_axis):
        if axis >= 0:
            rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
        else:
            # negative axis means the there is no batch_size axis, and all the
            # results should be the same on each device. We simply take the
            # first one, without checking they are actually the same
            rets.append(tensors[0])
    return rets 
Example #22
Source File: DataParallelExecutorGroup.py    From kaggle-rsna18 with MIT License 5 votes vote down vote up
def _merge_multi_context(outputs, major_axis):
    """Merge outputs that lives on multiple context into one, so that they look
    like living on one context.
    """
    rets = []
    for tensors, axis in zip(outputs, major_axis):
        if axis >= 0:
            rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
        else:
            # negative axis means the there is no batch_size axis, and all the
            # results should be the same on each device. We simply take the
            # first one, without checking they are actually the same
            rets.append(tensors[0])
    return rets 
Example #23
Source File: train_srgan.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def plot_loss(losses_log,global_step,epoch, i):
    message = '(epoch: %d, iters: %d) ' % (epoch, i)
    for key,value in losses_log.losses.items():
        if 'err' in key:
            loss = nd.concatenate(value,axis=0).mean().asscalar()
            sw.add_scalar('err', {key : loss}, global_step)
            message += '%s: %.6f ' % (key, loss)
    print(message) 
Example #24
Source File: train_srgan.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def plot_img(losses_log):
    sw.add_image(tag='lr_img', image=nd.clip(nd.concatenate(losses_log['lr_img'])[0:4], 0, 1))
    sw.add_image(tag='hr_img', image=nd.clip(nd.concatenate(losses_log['hr_img'])[0:4], 0, 1))
    sw.add_image(tag='hr_img_fake', image=nd.clip(nd.concatenate(losses_log['hr_img_fake'])[0:4], 0, 1)) 
Example #25
Source File: train_cgan.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def plot_loss(losses_log,global_step,epoch, i):
    message = '(epoch: %d, iters: %d) ' % (epoch, i)
    for key,value in losses_log.losses.items():
        if 'loss_' in key:
            loss = nd.concatenate(value,axis=0).mean().asscalar()
            sw.add_scalar('loss', {key : loss}, global_step)
            message += '%s: %.3f ' % (key, loss)
    print(message) 
Example #26
Source File: train_cgan.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def plot_img(losses_log):
    sw.add_image(tag='A', image=nd.clip(nd.concatenate([losses_log['real_A'][0][0:1],
                                                        losses_log['fake_B'][0][0:1],
                                                        losses_log['rec_A'][0][0:1],
                                                        losses_log['idt_A'][0][0:1]]) * 0.5 + 0.5, 0, 1))
    sw.add_image(tag='B', image=nd.clip(nd.concatenate([losses_log['real_B'][0][0:1],
                                                        losses_log['fake_A'][0][0:1],
                                                        losses_log['rec_B'][0][0:1],
                                                        losses_log['idt_B'][0][0:1]]) * 0.5 + 0.5, 0, 1)) 
Example #27
Source File: train_srgan.py    From panoptic-fpn-gluon with Apache License 2.0 5 votes vote down vote up
def plot_loss(losses_log,global_step,epoch, i):
    message = '(epoch: %d, iters: %d) ' % (epoch, i)
    for key,value in losses_log.losses.items():
        if 'err' in key:
            loss = nd.concatenate(value,axis=0).mean().asscalar()
            sw.add_scalar('err', {key : loss}, global_step)
            message += '%s: %.6f ' % (key, loss)
    print(message) 
Example #28
Source File: train_srgan.py    From panoptic-fpn-gluon with Apache License 2.0 5 votes vote down vote up
def plot_img(losses_log):
    sw.add_image(tag='lr_img', image=nd.clip(nd.concatenate(losses_log['lr_img'])[0:4], 0, 1))
    sw.add_image(tag='hr_img', image=nd.clip(nd.concatenate(losses_log['hr_img'])[0:4], 0, 1))
    sw.add_image(tag='hr_img_fake', image=nd.clip(nd.concatenate(losses_log['hr_img_fake'])[0:4], 0, 1)) 
Example #29
Source File: train_cgan.py    From panoptic-fpn-gluon with Apache License 2.0 5 votes vote down vote up
def plot_loss(losses_log,global_step,epoch, i):
    message = '(epoch: %d, iters: %d) ' % (epoch, i)
    for key,value in losses_log.losses.items():
        if 'loss_' in key:
            loss = nd.concatenate(value,axis=0).mean().asscalar()
            sw.add_scalar('loss', {key : loss}, global_step)
            message += '%s: %.3f ' % (key, loss)
    print(message) 
Example #30
Source File: train_cgan.py    From panoptic-fpn-gluon with Apache License 2.0 5 votes vote down vote up
def plot_img(losses_log):
    sw.add_image(tag='A', image=nd.clip(nd.concatenate([losses_log['real_A'][0][0:1],
                                                        losses_log['fake_B'][0][0:1],
                                                        losses_log['rec_A'][0][0:1],
                                                        losses_log['idt_A'][0][0:1]]) * 0.5 + 0.5, 0, 1))
    sw.add_image(tag='B', image=nd.clip(nd.concatenate([losses_log['real_B'][0][0:1],
                                                        losses_log['fake_A'][0][0:1],
                                                        losses_log['rec_B'][0][0:1],
                                                        losses_log['idt_B'][0][0:1]]) * 0.5 + 0.5, 0, 1))