Python mxnet.nd.zeros() Examples

The following are 30 code examples of mxnet.nd.zeros(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mxnet.nd , or try the search function .
Example #1
Source File: pose.py    From gluon-cv with Apache License 2.0 6 votes vote down vote up
def cv_rotate(img, rot, resW, resH):
    cv2 = try_import_cv2()
    center = np.array((resW - 1, resH - 1)) / 2
    rot_rad = np.pi * rot / 180

    src_dir = get_dir([0, (resH - 1) * -0.5], rot_rad)
    dst_dir = np.array([0, (resH - 1) * -0.5], np.float32)

    src = np.zeros((3, 2), dtype=np.float32)
    dst = np.zeros((3, 2), dtype=np.float32)

    src[0, :] = center
    src[1, :] = center + src_dir
    dst[0, :] = [(resW - 1) * 0.5, (resH - 1) * 0.5]
    dst[1, :] = np.array([(resW - 1) * 0.5, (resH - 1) * 0.5]) + dst_dir

    src[2:, :] = get_3rd_point(src[0, :], src[1, :])
    dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])

    trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))

    dst_img = cv2.warpAffine(img, trans,
                             (resW, resH), flags=cv2.INTER_LINEAR)
    return dst_img 
Example #2
Source File: fps.py    From dgl with Apache License 2.0 6 votes vote down vote up
def forward(self, pos):
        r"""Memory allocation and sampling

        Parameters
        ----------
        pos : tensor
            The positional tensor of shape (B, N, C)

        Returns
        -------
        tensor of shape (B, self.npoints)
            The sampled indices in each batch.
        """
        ctx = pos.context
        B, N, C = pos.shape
        pos = pos.reshape(-1, C)
        dist = nd.zeros((B * N), dtype=pos.dtype, ctx=ctx)
        start_idx = nd.random.randint(0, N - 1, (B, ), dtype=np.int, ctx=ctx)
        result = nd.zeros((self.npoints * B), dtype=np.int, ctx=ctx)
        farthest_point_sampler(pos, B, self.npoints, dist, start_idx, result)
        return result.reshape(B, self.npoints) 
Example #3
Source File: utils.py    From EmotionClassifier with GNU General Public License v3.0 6 votes vote down vote up
def predict_rnn(rnn, prefix, num_chars, params, hidden_dim, ctx, idx_to_char,
                char_to_idx, get_inputs, is_lstm=False):
    """Predict the next chars given the prefix."""
    prefix = prefix.lower()
    state_h = nd.zeros(shape=(1, hidden_dim), ctx=ctx)
    if is_lstm:
        state_c = nd.zeros(shape=(1, hidden_dim), ctx=ctx)
    output = [char_to_idx[prefix[0]]]
    for i in range(num_chars + len(prefix)):
        X = nd.array([output[-1]], ctx=ctx)
        if is_lstm:
            Y, state_h, state_c = rnn(get_inputs(X), state_h, state_c, *params)
        else:
            Y, state_h = rnn(get_inputs(X), state_h, *params)
        if i < len(prefix)-1:
            next_input = char_to_idx[prefix[i+1]]
        else:
            next_input = int(Y[0].argmax(axis=1).asscalar())
        output.append(next_input)
    return ''.join([idx_to_char[i] for i in output]) 
Example #4
Source File: test_quantile_loss.py    From gluon-ts with Apache License 2.0 6 votes vote down vote up
def test_compute_quantile_loss() -> None:
    y_true = nd.ones(shape=(10, 10, 10))
    y_pred = nd.zeros(shape=(10, 10, 10, 2))

    quantiles = [0.5, 0.9]

    loss = QuantileLoss(quantiles)

    correct_qt_loss = [1.0, 1.8]

    for idx, q in enumerate(quantiles):
        assert (
            nd.mean(
                loss.compute_quantile_loss(
                    nd.ndarray, y_true, y_pred[:, :, :, idx], q
                )
            )
            - correct_qt_loss[idx]
            < 1e-5
        ), f"computing quantile loss at quantile {q} fails!" 
Example #5
Source File: utils.py    From EmotionClassifier with GNU General Public License v3.0 6 votes vote down vote up
def load_data_fashion_mnist(batch_size, resize=None, root="~/.mxnet/datasets/fashion-mnist"):
    """download the fashion mnist dataest and then load into memory"""
    def transform_mnist(data, label):
        # Transform a batch of examples.
        if resize:
            n = data.shape[0]
            new_data = nd.zeros((n, resize, resize, data.shape[3]))
            for i in range(n):
                new_data[i] = image.imresize(data[i], resize, resize)
            data = new_data
        # change data from batch x height x width x channel to batch x channel x height x width
        return nd.transpose(data.astype('float32'), (0,3,1,2))/255, label.astype('float32')

    mnist_train = gluon.data.vision.FashionMNIST(root=root, train=True, transform=None)
    mnist_test = gluon.data.vision.FashionMNIST(root=root, train=False, transform=None)
    # Transform later to avoid memory explosion. 
    train_data = DataLoader(mnist_train, batch_size, shuffle=True, transform=transform_mnist)
    test_data = DataLoader(mnist_test, batch_size, shuffle=False, transform=transform_mnist)
    return (train_data, test_data) 
Example #6
Source File: pose.py    From gluon-cv with Apache License 2.0 6 votes vote down vote up
def transformBoxInvert(pt, ul, br, resH, resW):
    # type: (Tensor, Tensor, Tensor, float, float, float, float) -> Tensor

    center = mx.nd.zeros(2)
    center[0] = (br[0] - 1 - ul[0]) / 2
    center[1] = (br[1] - 1 - ul[1]) / 2

    lenH = max(br[1] - ul[1], (br[0] - ul[0]) * resH / resW)
    lenW = lenH * resW / resH

    _pt = (pt * lenH) / resH

    if bool(((lenW - 1) / 2 - center[0]) > 0):
        _pt[0] = _pt[0] - ((lenW - 1) / 2 - center[0]).asscalar()
    if bool(((lenH - 1) / 2 - center[1]) > 0):
        _pt[1] = _pt[1] - ((lenH - 1) / 2 - center[1]).asscalar()

    new_point = mx.nd.zeros(2)
    new_point[0] = _pt[0] + ul[0]
    new_point[1] = _pt[1] + ul[1]
    return new_point 
Example #7
Source File: utils_final.py    From InsightFace_TF with MIT License 6 votes vote down vote up
def load_data_fashion_mnist(batch_size, resize=None, root="~/.mxnet/datasets/fashion-mnist"):
    """download the fashion mnist dataest and then load into memory"""

    def transform_mnist(data, label):
        # Transform a batch of examples.
        if resize:
            n = data.shape[0]
            new_data = nd.zeros((n, resize, resize, data.shape[3]))
            for i in range(n):
                new_data[i] = image.imresize(data[i], resize, resize)
            data = new_data
        # change data from batch x height x width x channel to batch x channel x height x width
        return nd.transpose(data.astype('float32'), (0, 3, 1, 2)) / 255, label.astype('float32')

    mnist_train = gluon.data.vision.FashionMNIST(root=root, train=True, transform=None)
    mnist_test = gluon.data.vision.FashionMNIST(root=root, train=False, transform=None)
    # Transform later to avoid memory explosion.
    train_data = DataLoader(mnist_train, batch_size, shuffle=True, transform=transform_mnist)
    test_data = DataLoader(mnist_test, batch_size, shuffle=False, transform=transform_mnist)
    return (train_data, test_data) 
Example #8
Source File: utils_final.py    From InsightFace_TF with MIT License 6 votes vote down vote up
def load_data_mnist(batch_size, resize=None, root="~/.mxnet/datasets/mnist"):
    """download the fashion mnist dataest and then load into memory"""

    def transform_mnist(data, label):
        # Transform a batch of examples.
        if resize:
            n = data.shape[0]
            new_data = nd.zeros((n, resize, resize, data.shape[3]))
            for i in range(n):
                new_data[i] = image.imresize(data[i], resize, resize)
            data = new_data
        # change data from batch x height x width x channel to batch x channel x height x width
        return nd.transpose(data.astype('float32'), (0, 3, 1, 2)) / 255, label.astype('float32')

    mnist_train = gluon.data.vision.MNIST(root=root, train=True, transform=None)
    mnist_test = gluon.data.vision.MNIST(root=root, train=False, transform=None)
    # Transform later to avoid memory explosion.
    train_data = DataLoader(mnist_train, batch_size, shuffle=True, transform=transform_mnist)
    test_data = DataLoader(mnist_test, batch_size, shuffle=False, transform=transform_mnist)
    return (train_data, test_data) 
Example #9
Source File: utils_final.py    From InsightFace_TF with MIT License 6 votes vote down vote up
def predict_rnn(rnn, prefix, num_chars, params, hidden_dim, ctx, idx_to_char,
                char_to_idx, get_inputs, is_lstm=False):
    """Predict the next chars given the prefix."""
    prefix = prefix.lower()
    state_h = nd.zeros(shape=(1, hidden_dim), ctx=ctx)
    if is_lstm:
        state_c = nd.zeros(shape=(1, hidden_dim), ctx=ctx)
    output = [char_to_idx[prefix[0]]]
    for i in range(num_chars + len(prefix)):
        X = nd.array([output[-1]], ctx=ctx)
        if is_lstm:
            Y, state_h, state_c = rnn(get_inputs(X), state_h, state_c, *params)
        else:
            Y, state_h = rnn(get_inputs(X), state_h, *params)
        if i < len(prefix) - 1:
            next_input = char_to_idx[prefix[i + 1]]
        else:
            next_input = int(Y[0].argmax(axis=1).asscalar())
        output.append(next_input)
    return ''.join([idx_to_char[i] for i in output]) 
Example #10
Source File: capsule_block.py    From comment_toxic_CapsuleNet with MIT License 6 votes vote down vote up
def Route(self, x):
        # b_mat = nd.repeat(self.b_mat.data(), repeats=x.shape[0], axis=0)#nd.stop_gradient(nd.repeat(self.b_mat.data(), repeats=x.shape[0], axis=0))
        b_mat = nd.zeros((x.shape[0],1,self.num_cap, self.num_locations), ctx=x.context)
        x_expand = nd.expand_dims(nd.expand_dims(x, axis=2),2)
        w_expand = nd.repeat(nd.expand_dims(self.w_ij.data(x.context),axis=0), repeats=x.shape[0], axis=0)
        u_ = w_expand*x_expand
        # u_ = nd.abs(w_expand - x_expand)
        u = nd.sum(u_, axis = 1)
        u_no_gradient = nd.stop_gradient(u)
        for i in range(self.route_num):
            c_mat = nd.softmax(b_mat, axis=2)
            if i == self.route_num -1:
                s = nd.sum(u * c_mat, axis=-1)
            else:
                s = nd.sum(u_no_gradient * c_mat, axis=-1)
            v = squash(s, 1)
            v1 = nd.expand_dims(v, axis=-1)
            if i != self.route_num - 1:
                update_term = nd.sum(u_no_gradient*v1, axis=1, keepdims=True)
                b_mat = b_mat + update_term
        return v 
Example #11
Source File: utils.py    From comment_toxic_CapsuleNet with MIT License 6 votes vote down vote up
def evaluate_accuracy_multi(data_iterator, net, ctx):
    data_iterator.reset()
    acc = 0
    dummy_label = np.zeros((0,6))
    dummy_pred = np.zeros((0,6))
    t1 = time.time()
    for i, batch in enumerate(data_iterator):
        data, label = _get_batch_multi(batch, ctx, False)
        # acc += np.mean([accuracy(net(X), Y) for X, Y in zip(data, label)])
        # acc += np.mean([roc_auc_score(Y.asnumpy(), net(X).asnumpy()) for X, Y in zip(data, label)])
        output = np.vstack((net(X).asnumpy() for X in data))
        labels = np.vstack((Y.asnumpy() for Y in label))
        dummy_label = np.vstack((dummy_label, labels)) 
        dummy_pred = np.vstack((dummy_pred, output))
    # return acc / (i+1)
    # print dummy_label.shape, dummy_pred.shape
    dummy_pred_label = dummy_pred > 0.5
    for i in range(dummy_label.shape[1]):
        print i, confusion_matrix(dummy_label[:,i], dummy_pred_label[:,i])

    return roc_auc_score(dummy_label, dummy_pred), accuracy(dummy_pred, dummy_label), time.time() - t1 
Example #12
Source File: test_tensor_utils.py    From mxnet-centernet with MIT License 6 votes vote down vote up
def test_get_nonzero():
    '''
    feat = nd.zeros(shape = (4, 2))
    feat[0, 0] = 1
    feat[1, 1] = 1
    feat[2, 1] = 1
    feat[3, 0] = 1
    feat = nd.array([[0.6, 0.0, 0.0, 0.0],
                     [0.0, 0.4, 0.0, 0.0],
                     [0.0, 0.0, 1.2, 0.0],
                     [0.0, 0.0, 0.0,-0.4]])
    feat = nd.array([[[1,1,1,0,1],[1,0,0,0,1]],
                     [[1,1,1,0,1],[1,0,0,0,1]]])
    '''
    feat = nd.zeros(shape = (4, ))
    feat[2] = 1
    print(feat)

    feat_sparse = feat.tostype('csr')
    print(feat_sparse)

    indices = feat_sparse.indices
    print(indices)
    return 
Example #13
Source File: base_detector.py    From mxnet-centernet with MIT License 6 votes vote down vote up
def __init__(self, options):
        if options.gpus[0] >= 0:
            try:
                self.ctx = mx.gpu()
                _ = nd.zeros((1,), ctx=self.ctx)
            except mx.base.MXNetError:
                print("No GPU available. Use CPU instead.")
                self.ctx = mx.cpu()
        else:
            self.ctx = mx.cpu()

        print("Creating model...")
        self.model = create_model(options.arch, options.heads, options.head_conv, ctx = self.ctx)
        if options.load_model_path != '':
            self.model = load_model(self.model, options.load_model_path, ctx = self.ctx)

        self.mean = np.array(options.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(options.std, dtype=np.float32).reshape(1, 1, 3)
        self.max_per_image = 100
        self.num_classes = options.num_classes
        self.scales = options.test_scales
        self.opt = options
        self.pause = True 
Example #14
Source File: simulate_quantization.py    From Quantization.MXNet with MIT License 6 votes vote down vote up
def __iter__(self):
        sample_indices = []
        label_counter = np.zeros(self._classes)
        shuffle_indices = np.arange(len(self._labels))
        np.random.shuffle(shuffle_indices)
        for idx in shuffle_indices:
            label = self._labels[idx]
            if label_counter[label] < self._num_per_class:
                sample_indices.append(idx)
                label_counter[label] += 1
            if label_counter.sum() == self._classes * self._num_per_class:
                break
        for idx, cnt in enumerate(label_counter):
            if cnt < self._num_per_class:
                raise ValueError("Number of samples for class {} is {} < {}".format(idx, cnt, self._num_per_class))
        return iter(sample_indices) 
Example #15
Source File: test_merge_bn.py    From Quantization.MXNet with MIT License 6 votes vote down vote up
def test_export():
    print("<<TEST: hybridize and export>>")
    bn_string_in_json_file = '"op": "BatchNorm"'
    export_file_name = "/tmp/test_merge_bn-" + time.strftime('%Y%m%d%H%M%S',time.localtime(time.time()))

    net = mobilenet1_0(pretrained=True)
    merge_bn(net)
    print("merge_bn ...[ok]")
    net.hybridize()
    print("hybridize ...[ok]")
    _ = net(nd.zeros(shape=(1, 3, 224, 224)))
    print("run hybrid graph forward ...[ok]")
    net.export(export_file_name, 0)
    print("export to", export_file_name, "...[ok]")
    with open(export_file_name+"-symbol.json", "r") as f:
        s = f.read()
        if bn_string_in_json_file not in s:
            print('[OK] op "BatchNorm" is not in exported file')
        else:
            print('[Error] op "BatchNorm" is in exported file')
    print() 
Example #16
Source File: test_freeze.py    From Quantization.MXNet with MIT License 6 votes vote down vote up
def test_quantize_symbol():
    print("<<TEST: Quantize symbol for mobilenet_v1_1_0>>")
    export_file_name = "/tmp/test_freeze_utils-" + time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
    in_file_name = export_file_name + "-symbol.json"
    out_file_name = export_file_name + "-qsymbol.json"

    net = mobilenet1_0(pretrained=True)
    net.hybridize()
    _ = net(nd.zeros(shape=(1, 3, 224, 224)))
    net.export(export_file_name, 0)

    mobilenet_sym = sym.load(in_file_name)
    qsym = quantize_symbol(mobilenet_sym)
    qsym.save()

    print('Quantized symbol has saved to ' + out_file_name)
    print()
    return out_file_name 
Example #17
Source File: get_data.py    From EmotionClassifier with GNU General Public License v3.0 5 votes vote down vote up
def random_mask(ndimg, size, n_chanel= 3,flag=0):
    w, h = ndimg[0].shape # 获取图像的尺寸
    w_ = random.randint(0, w-size) #确定起始坐标的位置范围
    h_ = random.randint(0, h-size)
    if flag==0:
        # 随机遮盖的形状是一个正方形
        ndimg[w_:w_+size, h_:h_+size, :] = nd.zeros((size, size, n_chanel)) # 用黑色来遮盖
        return ndimg
    elif flag==1:
        # 随机遮盖的形状是一个长方形
        w_size = random.randint(0, size-1)
        h_size = random.randint(0, size-1)
        # 用随机噪声来遮盖
        ndimg[w_:w_+w_size, h_:h_+h_size, :] = mx.ndarray.random_uniform(low=0, high=255, shape=(w_size, h_size, n_chanel))
        return ndimg 
Example #18
Source File: loss.py    From cascade_rcnn_gluon with Apache License 2.0 5 votes vote down vote up
def forward(self, cls_pred, box_pred, cls_target, box_target):
        """Compute loss in entire batch across devices."""
        # require results across different devices at this time
        cls_pred, box_pred, cls_target, box_target = [_as_list(x) \
            for x in (cls_pred, box_pred, cls_target, box_target)]
        # cross device reduction to obtain positive samples in entire batch
        num_pos = []
        for cp, bp, ct, bt in zip(*[cls_pred, box_pred, cls_target, box_target]):
            pos_samples = (ct > 0)
            num_pos.append(pos_samples.sum())
        num_pos_all = sum([p.asscalar() for p in num_pos])
        if num_pos_all < 1:
            # no positive samples found, return dummy losses
            return nd.zeros((1,)), nd.zeros((1,)), nd.zeros((1,))

        # compute element-wise cross entropy loss and sort, then perform negative mining
        cls_losses = []
        box_losses = []
        sum_losses = []
        for cp, bp, ct, bt in zip(*[cls_pred, box_pred, cls_target, box_target]):
            pred = nd.log_softmax(cp, axis=-1)
            pos = ct > 0
            cls_loss = -nd.pick(pred, ct, axis=-1, keepdims=False)
            rank = (cls_loss * (pos - 1)).argsort(axis=1).argsort(axis=1)
            hard_negative = rank < (pos.sum(axis=1) * self._negative_mining_ratio).expand_dims(-1)
            # mask out if not positive or negative
            cls_loss = nd.where((pos + hard_negative) > 0, cls_loss, nd.zeros_like(cls_loss))
            cls_losses.append(nd.sum(cls_loss, axis=0, exclude=True) / num_pos_all)

            bp = _reshape_like(nd, bp, bt)
            box_loss = nd.abs(bp - bt)
            box_loss = nd.where(box_loss > self._rho, box_loss - 0.5 * self._rho,
                                (0.5 / self._rho) * nd.square(box_loss))
            # box loss only apply to positive samples
            box_loss = box_loss * pos.expand_dims(axis=-1)
            box_losses.append(nd.sum(box_loss, axis=0, exclude=True) / num_pos_all)
            sum_losses.append(cls_losses[-1] + self._lambd * box_losses[-1])

        return sum_losses, cls_losses, box_losses 
Example #19
Source File: simulate_quantization.py    From Quantization.MXNet with MIT License 5 votes vote down vote up
def evaluate(net, num_class, dataloader, ctx, update_ema=False, tqdm_desc="Eval"):
    correct_counter = nd.zeros(num_class)
    label_counter = nd.zeros(num_class)
    test_num_correct = 0

    with tqdm(total=len(dataloader), desc=tqdm_desc) as pbar:
        for i, (X, y) in enumerate(dataloader):
            X = X.as_in_context(ctx)
            y = y.as_in_context(ctx)
            outputs = net(X)
            if update_ema:
                net.update_ema()
            # collect predictions
            pred = outputs.argmax(axis=1)
            test_num_correct += (pred == y.astype('float32')).sum().asscalar()
            pred = pred.as_in_context(cpu())
            y = y.astype('float32').as_in_context(cpu())
            for p, gt in zip(pred, y):
                label_counter[gt] += 1
                if p == gt:
                    correct_counter[gt] += 1
            # update tqdm
            pbar.update(1)
    # calculate acc and avg_acc
    eval_acc = test_num_correct / label_counter.sum().asscalar()
    eval_acc_avg = (correct_counter / (label_counter + 1e-10)).mean().asscalar()
    return eval_acc, eval_acc_avg 
Example #20
Source File: utils.py    From d2l-zh with Apache License 2.0 5 votes vote down vote up
def corr2d(X, K):
    """Compute 2D cross-correlation."""
    h, w = K.shape
    Y = nd.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))
    for i in range(Y.shape[0]):
        for j in range(Y.shape[1]):
            Y[i, j] = (X[i: i + h, j: j + w] * K).sum()
    return Y 
Example #21
Source File: utils.py    From CapsNet_Mxnet with Apache License 2.0 5 votes vote down vote up
def try_gpu():
    """If GPU is available, return mx.gpu(0); else return mx.cpu()"""
    try:
        ctx = mx.gpu()
        _ = nd.zeros((1,), ctx=ctx)
    except:
        ctx = mx.cpu()
    return ctx 
Example #22
Source File: utils.py    From comment_toxic_CapsuleNet with MIT License 5 votes vote down vote up
def try_gpu():
    """If GPU is available, return mx.gpu(0); else return mx.cpu()"""
    try:
        ctx = mx.gpu()
        _ = nd.zeros((1,), ctx=ctx)
    except:
        ctx = mx.cpu()
    return ctx 
Example #23
Source File: test_periodic_kernel.py    From gluon-ts with Apache License 2.0 5 votes vote down vote up
def test_periodic_kernel_compute(
    x1, x2, amplitude, length_scale, frequency
) -> None:
    tol = 1e-5
    batch_size = amplitude.shape[0]
    history_length_1 = x1.shape[0]
    history_length_2 = x2.shape[0]
    num_features = x1.shape[1]
    x1 = x1.reshape(batch_size, history_length_1, num_features)
    x2 = x2.reshape(batch_size, history_length_2, num_features)
    amplitude = amplitude.reshape(batch_size, 1, 1)
    length_scale = length_scale.reshape(batch_size, 1, 1)
    frequency = frequency.reshape(batch_size, 1, 1)
    periodic = PeriodicKernel(amplitude, length_scale, frequency)

    exact = nd.zeros((batch_size, history_length_1, history_length_2))
    for i in range(history_length_1):
        for j in range(history_length_2):
            val = (
                2
                * (
                    nd.sin(frequency * math.pi * (x1[:, i, :] - x2[:, j, :]))
                    / length_scale
                )
                ** 2
            )
            exact[:, i, j] = (amplitude * nd.exp(-val)).reshape(-1)
    res = periodic.kernel_matrix(x1, x2)
    assert nd.norm(res - exact) < tol 
Example #24
Source File: architecture.py    From coach with Apache License 2.0 5 votes vote down vote up
def _dummy_model_inputs(self) -> Tuple[NDArray, ...]:
        """
        Creates a tuple of input arrays with correct shapes that can be used for shape inference
        of the model weights and for printing the summary
        :return: tuple of inputs for model forward pass
        """
        input_shapes = self._model_input_shapes()
        inputs = tuple(nd.zeros(tuple(shape), ctx=self._devices[0]) for shape in input_shapes)
        return inputs 
Example #25
Source File: conv_cap.py    From comment_toxic_CapsuleNet with MIT License 5 votes vote down vote up
def route(self, u):
        b_mat = nd.zeros((u.shape[0], self.num_cap_in, self.num_cap, 1, u.shape[4], u.shape[5]), ctx=u.context)
        for i in range(self.route_num):
            c_mat = nd.softmax(b_mat, axis=2)
            s = nd.sum(u * c_mat, axis=1)
            v = squash(s, 2)
            if i != self.route_num - 1:
                v1 = nd.expand_dims(v, axis=1)
                update_term = nd.sum(u*v1, axis=3, keepdims=True)
                b_mat = b_mat + update_term
        return v 
Example #26
Source File: convert_conv2d.py    From Quantization.MXNet with MIT License 5 votes vote down vote up
def _add_fake_bn_ema_hook(m):
    def _ema_hook(m, x):
        x = x[0]
        weight = m.weight.data()
        bias = nd.zeros(shape=weight.shape[0], ctx=weight.context) if m.bias is None else m.bias.data()
        y = nd.Convolution(x, weight, bias, **m._kwargs)
        num_samples = y.shape[0] * y.shape[2] * y.shape[3]
        m.current_mean = y.sum(axis=(0, 2, 3)) / num_samples
        diff_square = (y - m.current_mean.reshape(1, -1, 1, 1)) ** 2
        m.current_var = diff_square.sum(axis=(0, 2, 3)) / num_samples
    m.register_forward_pre_hook(_ema_hook) 
Example #27
Source File: test_utils.py    From coach with Apache License 2.0 5 votes vote down vote up
def test_scoped_onxx_enable():
    class Counter(object):
        def __init__(self):
            self._count = 0

        def increment(self):
            self._count += 1

        @property
        def count(self):
            return self._count

    class TempBlock(gluon.HybridBlock, OnnxHandlerBlock):
        def __init__(self, counter: Counter):
            super(TempBlock, self).__init__()
            OnnxHandlerBlock.__init__(self)
            self._counter = counter

        def hybrid_forward(self, F, x, *args, **kwargs):
            if self._onnx:
                self._counter.increment()
            return x

    counter = Counter()
    net = gluon.nn.HybridSequential()
    for _ in range(10):
        net.add(TempBlock(counter))

    # ONNX disabled
    net(nd.zeros((1,)))
    assert counter.count == 0

    # ONNX enabled
    with ScopedOnnxEnable(net):
        net(nd.zeros((1,)))
    assert counter.count == 10 
Example #28
Source File: tabular_nn_model.py    From autogluon with Apache License 2.0 5 votes vote down vote up
def _predict_tabular_data(self, new_data, process=True, predict_proba=True):  # TODO ensure API lines up with tabular.Model class.
        """ Specific TabularNN method to produce predictions on new (unprocessed) data.
            Returns 1D numpy array unless predict_proba=True and task is multi-class classification (not binary).
            Args:
                new_data (pd.Dataframe or TabularNNDataset): new data to make predictions on.
                If you want to make prediction for just a single row of new_data, pass in: new_data.iloc[[row_index]]
                process (bool): should new data be processed (if False, new_data must be TabularNNDataset)
                predict_proba (bool): should we output class-probabilities (not used for regression)
        """
        if process:
            new_data = self.process_test_data(new_data, batch_size=self.batch_size, num_dataloading_workers=self.num_dataloading_workers_inference, labels=None)
        if not isinstance(new_data, TabularNNDataset):
            raise ValueError("new_data must of of type TabularNNDataset if process=False")
        if self.problem_type == REGRESSION or not predict_proba:
            preds = nd.zeros((new_data.num_examples,1))
        else:
            preds = nd.zeros((new_data.num_examples, self.num_net_outputs))
        i = 0
        for batch_idx, data_batch in enumerate(new_data.dataloader):
            data_batch = new_data.format_batch_data(data_batch, self.ctx)
            preds_batch = self.model(data_batch)
            batch_size = len(preds_batch)
            if self.problem_type != REGRESSION:
                if not predict_proba: # need to take argmax
                    preds_batch = nd.argmax(preds_batch, axis=1, keepdims=True)
                else: # need to take softmax
                    preds_batch = nd.softmax(preds_batch, axis=1)
            preds[i:(i+batch_size)] = preds_batch
            i = i+batch_size
        if self.problem_type == REGRESSION or not predict_proba:
            return preds.asnumpy().flatten()  # return 1D numpy array
        elif self.problem_type == BINARY and predict_proba:
            return preds[:,1].asnumpy()  # for binary problems, only return P(Y==+1)

        return preds.asnumpy()  # return 2D numpy array 
Example #29
Source File: tabular_nn_model.py    From autogluon with Apache License 2.0 5 votes vote down vote up
def _create_preprocessor(self, impute_strategy, max_category_levels):
        """ Defines data encoders used to preprocess different data types and creates instance variable which is sklearn ColumnTransformer object """
        if self.processor is not None:
            Warning("Attempting to process training data for TabularNeuralNetModel, but previously already did this.")
        continuous_features = self.types_of_features['continuous']
        skewed_features = self.types_of_features['skewed']
        onehot_features = self.types_of_features['onehot']
        embed_features = self.types_of_features['embed']
        language_features = self.types_of_features['language']
        transformers = [] # order of various column transformers in this list is important!
        if len(continuous_features) > 0:
            continuous_transformer = Pipeline(steps=[
                ('imputer', SimpleImputer(strategy=impute_strategy)),
                ('scaler', StandardScaler())])
            transformers.append( ('continuous', continuous_transformer, continuous_features) )
        if len(skewed_features) > 0:
            power_transformer = Pipeline(steps=[
                ('imputer', SimpleImputer(strategy=impute_strategy)),
                ('quantile', QuantileTransformer(output_distribution='normal')) ]) # Or output_distribution = 'uniform'
                # TODO: remove old code: ('power', PowerTransformer(method=self.params['proc.power_transform_method'])) ])
            transformers.append( ('skewed', power_transformer, skewed_features) )
        if len(onehot_features) > 0:
            onehot_transformer = Pipeline(steps=[
                # TODO: Consider avoiding converting to string for improved memory efficiency
                ('to_str', FunctionTransformer(self.convert_df_dtype_to_str)),
                ('imputer', SimpleImputer(strategy='constant', fill_value=self.unique_category_str)),
                ('onehot', OneHotMergeRaresHandleUnknownEncoder(max_levels=max_category_levels, sparse=False))]) # test-time unknown values will be encoded as all zeros vector
            transformers.append( ('onehot', onehot_transformer, onehot_features) )
        if len(embed_features) > 0: # Ordinal transformer applied to convert to-be-embedded categorical features to integer levels
            ordinal_transformer = Pipeline(steps=[
                ('to_str', FunctionTransformer(self.convert_df_dtype_to_str)),
                ('imputer', SimpleImputer(strategy='constant', fill_value=self.unique_category_str)),
                ('ordinal', OrdinalMergeRaresHandleUnknownEncoder(max_levels=max_category_levels))]) # returns 0-n when max_category_levels = n-1. category n is reserved for unknown test-time categories.
            transformers.append( ('ordinal', ordinal_transformer, embed_features) )
        if len(language_features) > 0:
            raise NotImplementedError("language_features cannot be used at the moment")
        return ColumnTransformer(transformers=transformers) # numeric features are processed in the same order as in numeric_features vector, so feature-names remain the same. 
Example #30
Source File: cell.py    From ST-MetaNet with MIT License 5 votes vote down vote up
def forward_single(self, feature, data, begin_state):
        """ unroll one step

        Parameters
        ----------
        feature: a NDArray with shape [n, d].
        data: a NDArray with shape [n, b, d].        
        begin_state: a NDArray with shape [n, b, d].
        
        Returns
        -------
        output: ouptut of the cell, which is a NDArray with shape [n, b, d]
        states: a list of hidden states (list of hidden units with shape [n, b, d]) of RNNs.
        
        """
        if begin_state is None:
            num_nodes, batch_size, _ = data.shape
            begin_state = [nd.zeros((num_nodes, batch_size, self.hidden_size), ctx=feature.context)]

        prev_state = begin_state[0]
        data_and_state = nd.concat(data, prev_state, dim=-1)
        z = nd.sigmoid(self.dense_z(feature, data_and_state))
        r = nd.sigmoid(self.dense_r(feature, data_and_state))

        state = z * prev_state + (1 - z) * nd.tanh(self.dense_i2h(feature, data) + self.dense_h2h(feature, r * prev_state))
        return state, [state]