Python mxnet.ndarray.clip() Examples

The following are 19 code examples of mxnet.ndarray.clip(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mxnet.ndarray , or try the search function .
Example #1
Source File: utils.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def tensor_save_rgbimage(img, filename, cuda=False):
    img = F.clip(img, 0, 255).asnumpy()
    img = img.transpose(1, 2, 0).astype('uint8')
    img = Image.fromarray(img)
    img.save(filename) 
Example #2
Source File: kaggle_k_fold_cross_validation.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def get_rmse_log(net, X_train, y_train):
    """Gets root mse between the logarithms of the prediction and the truth."""
    num_train = X_train.shape[0]
    clipped_preds = nd.clip(net(X_train), 1, float('inf'))
    return np.sqrt(2 * nd.sum(square_loss(
        nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train) 
Example #3
Source File: utils.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def imagenet_clamp_batch(batch, low, high):
    """ Not necessary in practice """
    F.clip(batch[:,0,:,:],low-123.680, high-123.680)
    F.clip(batch[:,1,:,:],low-116.779, high-116.779)
    F.clip(batch[:,2,:,:],low-103.939, high-103.939) 
Example #4
Source File: utils.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def tensor_save_rgbimage(img, filename, cuda=False):
    img = F.clip(img, 0, 255).asnumpy()
    img = img.transpose(1, 2, 0).astype('uint8')
    img = Image.fromarray(img)
    img.save(filename) 
Example #5
Source File: utils.py    From MXNet-Gluon-Style-Transfer with MIT License 5 votes vote down vote up
def imagenet_clamp_batch(batch, low, high):
    """ Not necessary in practice """
    F.clip(batch[:,0,:,:],low-123.680, high-123.680)
    F.clip(batch[:,1,:,:],low-116.779, high-116.779)
    F.clip(batch[:,2,:,:],low-103.939, high-103.939) 
Example #6
Source File: utils.py    From MXNet-Gluon-Style-Transfer with MIT License 5 votes vote down vote up
def tensor_save_rgbimage(img, filename, cuda=False):
    img = F.clip(img, 0, 255).asnumpy()
    img = img.transpose(1, 2, 0).astype('uint8')
    img = Image.fromarray(img)
    img.save(filename) 
Example #7
Source File: train_cgan.py    From panoptic-fpn-gluon with Apache License 2.0 5 votes vote down vote up
def plot_img(losses_log):
    sw.add_image(tag='A', image=nd.clip(nd.concatenate([losses_log['real_A'][0][0:1],
                                                        losses_log['fake_B'][0][0:1],
                                                        losses_log['rec_A'][0][0:1],
                                                        losses_log['idt_A'][0][0:1]]) * 0.5 + 0.5, 0, 1))
    sw.add_image(tag='B', image=nd.clip(nd.concatenate([losses_log['real_B'][0][0:1],
                                                        losses_log['fake_A'][0][0:1],
                                                        losses_log['rec_B'][0][0:1],
                                                        losses_log['idt_B'][0][0:1]]) * 0.5 + 0.5, 0, 1)) 
Example #8
Source File: train_srgan.py    From panoptic-fpn-gluon with Apache License 2.0 5 votes vote down vote up
def plot_img(losses_log):
    sw.add_image(tag='lr_img', image=nd.clip(nd.concatenate(losses_log['lr_img'])[0:4], 0, 1))
    sw.add_image(tag='hr_img', image=nd.clip(nd.concatenate(losses_log['hr_img'])[0:4], 0, 1))
    sw.add_image(tag='hr_img_fake', image=nd.clip(nd.concatenate(losses_log['hr_img_fake'])[0:4], 0, 1)) 
Example #9
Source File: kaggle_k_fold_cross_validation.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def get_rmse_log(net, X_train, y_train):
    """Gets root mse between the logarithms of the prediction and the truth."""
    num_train = X_train.shape[0]
    clipped_preds = nd.clip(net(X_train), 1, float('inf'))
    return np.sqrt(2 * nd.sum(square_loss(
        nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train) 
Example #10
Source File: utils.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def imagenet_clamp_batch(batch, low, high):
    """ Not necessary in practice """
    F.clip(batch[:,0,:,:],low-123.680, high-123.680)
    F.clip(batch[:,1,:,:],low-116.779, high-116.779)
    F.clip(batch[:,2,:,:],low-103.939, high-103.939) 
Example #11
Source File: utils.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def tensor_save_rgbimage(img, filename, cuda=False):
    img = F.clip(img, 0, 255).asnumpy()
    img = img.transpose(1, 2, 0).astype('uint8')
    img = Image.fromarray(img)
    img.save(filename) 
Example #12
Source File: custom_layers.py    From d-SNE with Apache License 2.0 5 votes vote down vote up
def hybrid_forward(self, F, x, weight):
        x_norm = F.L2Normalization(x, mode='instance', name='x_n')
        w_norm = F.L2Normalization(weight, mode='instance', name='w_n')
        cos_theta = F.FullyConnected(x_norm, w_norm, no_bias=True, num_hidden=self._units, name='cos_theta')
        cos_theta = F.clip(cos_theta, a_min=-1, a_max=1)
        return cos_theta 
Example #13
Source File: train_cgan.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def plot_img(losses_log):
    sw.add_image(tag='A', image=nd.clip(nd.concatenate([losses_log['real_A'][0][0:1],
                                                        losses_log['fake_B'][0][0:1],
                                                        losses_log['rec_A'][0][0:1],
                                                        losses_log['idt_A'][0][0:1]]) * 0.5 + 0.5, 0, 1))
    sw.add_image(tag='B', image=nd.clip(nd.concatenate([losses_log['real_B'][0][0:1],
                                                        losses_log['fake_A'][0][0:1],
                                                        losses_log['rec_B'][0][0:1],
                                                        losses_log['idt_B'][0][0:1]]) * 0.5 + 0.5, 0, 1)) 
Example #14
Source File: train_srgan.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def plot_img(losses_log):
    sw.add_image(tag='lr_img', image=nd.clip(nd.concatenate(losses_log['lr_img'])[0:4], 0, 1))
    sw.add_image(tag='hr_img', image=nd.clip(nd.concatenate(losses_log['hr_img'])[0:4], 0, 1))
    sw.add_image(tag='hr_img_fake', image=nd.clip(nd.concatenate(losses_log['hr_img_fake'])[0:4], 0, 1)) 
Example #15
Source File: tensor.py    From dgl with Apache License 2.0 5 votes vote down vote up
def forward(self, in_data):
        feat_shape = in_data.shape[1:]
        out_data = nd.empty((self.out_size,) + feat_shape,
                            ctx=in_data.context, dtype=in_data.dtype)
        in_data_nd = zerocopy_to_dgl_ndarray(in_data)
        out_data_nd = zerocopy_to_dgl_ndarray_for_write(out_data)
        K.copy_reduce(
            self.reducer if self.reducer != 'mean' else 'sum',
            self.graph, self.target, in_data_nd, out_data_nd,
            self.in_map[0], self.out_map[0])
        # normalize if mean reducer
        # NOTE(zihao): this is a temporary hack and we should have better solution in the future.
        if self.reducer == 'mean':
            in_ones = nd.ones((in_data.shape[0],),
                              ctx=in_data.context, dtype=in_data.dtype)
            degs = nd.empty((out_data.shape[0],),
                            ctx=out_data.context, dtype=out_data.dtype)
            in_ones_nd = zerocopy_to_dgl_ndarray(in_ones)
            degs_nd = zerocopy_to_dgl_ndarray(degs)
            K.copy_reduce(
                'sum', self.graph, self.target, in_ones_nd, degs_nd, 
                self.in_map[0], self.out_map[0])
            # reshape
            degs = degs.reshape((out_data.shape[0],) + (1,) * (out_data.ndim - 1)).clip(1, float('inf')) 
            out_data = out_data / degs
        else:
            degs = None
        self.save_for_backward(in_data_nd, out_data_nd, degs)
        return out_data 
Example #16
Source File: tensor.py    From dgl with Apache License 2.0 5 votes vote down vote up
def unsorted_1d_segment_mean(input, seg_id, n_segs, dim):
    # TODO: support other dimensions
    assert dim == 0, 'MXNet only supports segment mean on first dimension'

    n_ones = nd.ones_like(seg_id).astype(input.dtype)
    w = unsorted_1d_segment_sum(n_ones, seg_id, n_segs, 0)
    w = nd.clip(w, a_min=1, a_max=np.inf)
    y = unsorted_1d_segment_sum(input, seg_id, n_segs, dim)
    y = y / w.reshape((-1,) + (1,) * (y.ndim - 1))
    return y 
Example #17
Source File: kaggle_k_fold_cross_validation.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def get_rmse_log(net, X_train, y_train):
    """Gets root mse between the logarithms of the prediction and the truth."""
    num_train = X_train.shape[0]
    clipped_preds = nd.clip(net(X_train), 1, float('inf'))
    return np.sqrt(2 * nd.sum(square_loss(
        nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train) 
Example #18
Source File: utils.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def imagenet_clamp_batch(batch, low, high):
    """ Not necessary in practice """
    F.clip(batch[:,0,:,:],low-123.680, high-123.680)
    F.clip(batch[:,1,:,:],low-116.779, high-116.779)
    F.clip(batch[:,2,:,:],low-103.939, high-103.939) 
Example #19
Source File: tensor.py    From dgl with Apache License 2.0 4 votes vote down vote up
def forward(self, lhs_data, rhs_data):
        lhs_data_nd = zerocopy_to_dgl_ndarray(lhs_data)
        rhs_data_nd = zerocopy_to_dgl_ndarray(rhs_data)
        feat_shape = K.infer_binary_feature_shape(self.binary_op, lhs_data_nd, rhs_data_nd)
        out_shape = feat_shape
        if self.binary_op == 'dot':
            out_shape = feat_shape[:-1]
        out_data = nd.empty((self.out_size,) + out_shape,
                            ctx=lhs_data.context, dtype=lhs_data.dtype)
        out_data_nd = zerocopy_to_dgl_ndarray_for_write(out_data)
        K.binary_op_reduce(
            self.reducer if self.reducer != 'mean' else 'sum',
            self.binary_op, self.graph, self.lhs, self.rhs,
            lhs_data_nd, rhs_data_nd, out_data_nd, self.lhs_map[0],
            self.rhs_map[0], self.out_map[0])
        # normalize if mean reducer
        # NOTE(zihao): this is a temporary hack and we should have better solution in the future.
        if self.reducer == 'mean':
            degs = nd.empty((out_data.shape[0],),
                            ctx=out_data.context, dtype=out_data.dtype)
            degs_nd = zerocopy_to_dgl_ndarray(degs)
            if self.lhs != TargetCode.DST:
                target = self.lhs
                n = lhs_data.shape[0]
                in_map = self.lhs_map[0]
            else:
                target = self.rhs
                n = rhs_data.shape[0]
                in_map = self.rhs_map[0]
            in_ones = nd.ones((n,), ctx=lhs_data.context, dtype=lhs_data.dtype)
            in_ones_nd = zerocopy_to_dgl_ndarray(in_ones)
            K.copy_reduce(
                'sum', self.graph, target, in_ones_nd, degs_nd,
                in_map, self.out_map[0])
            # reshape
            degs = degs.reshape((out_data.shape[0],) + (1,) * (out_data.ndim - 1)).clip(1, float('inf'))
            out_data = out_data / degs
        else:
            degs = None
        self.save_for_backward(lhs_data_nd, rhs_data_nd, out_data_nd,
                               feat_shape, degs)
        return out_data