Python mxnet.ndarray.ones() Examples

The following are 30 code examples of mxnet.ndarray.ones(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mxnet.ndarray , or try the search function .
Example #1
Source File: test_autograd.py    From SNIPER-mxnet with Apache License 2.0 6 votes vote down vote up
def test_binary_func():
    def check_binary_func(x, y):
        f_add      = lambda x, y: x+y
        f_add_grad = lambda x, y: [nd.ones(x.shape), nd.ones(y.shape)]
        autograd_assert(x, y, func=f_add, grad_func=f_add_grad)
        f_mul      = lambda x, y: x*y
        f_mul_grad = lambda x, y: [y, x]
        autograd_assert(x, y, func=f_mul, grad_func=f_mul_grad)
        f_compose  = lambda x, y: x+x*y
        f_compose_grad = lambda x, y: [nd.ones(x.shape) + y, x]
        autograd_assert(x, y, func=f_compose, grad_func=f_compose_grad)
    uniform_x = nd.uniform(shape=(4, 5))
    uniform_y = nd.uniform(shape=(4, 5))
    stypes = ['default', 'row_sparse', 'csr']
    for stype_x in stypes:
        for stype_y in stypes:
            x = uniform_x.tostype(stype_x)
            y = uniform_y.tostype(stype_y)
            check_binary_func(x, y) 
Example #2
Source File: test_autograd.py    From SNIPER-mxnet with Apache License 2.0 6 votes vote down vote up
def test_sparse_dot_grad():
    def check_sparse_dot_grad(rhs):
        lhs = rand_ndarray((2, 8), 'csr')
        with mx.autograd.record():
            y = mx.nd.dot(lhs, rhs)
        y.backward()
        grad = rhs.grad
        grad_np = np.dot(lhs.asnumpy().T, np.ones((lhs.shape[0], rhs.shape[1])))
        assert grad.stype == 'row_sparse'
        assert_almost_equal(grad.asnumpy(), grad_np)

    # check grad with row_sparse weight
    shape = (8, 3)
    rsp = mx.nd.ones(shape).tostype('row_sparse')
    rsp.attach_grad()
    check_sparse_dot_grad(rsp)

    # check grad with dense weight
    dns = mx.nd.ones(shape)
    dns.attach_grad(stype='row_sparse')
    check_sparse_dot_grad(dns) 
Example #3
Source File: test_module.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_forward_types():
    #Test forward with other data batch API
    Batch = namedtuple('Batch', ['data'])
    data = mx.sym.Variable('data')
    out = data * 2
    mod = mx.mod.Module(symbol=out, label_names=None)
    mod.bind(data_shapes=[('data', (1, 10))])
    mod.init_params()
    data1 = [mx.nd.ones((1, 10))]
    mod.forward(Batch(data1))
    assert mod.get_outputs()[0].shape == (1, 10)
    data2 = [mx.nd.ones((3, 5))]
    mod.forward(Batch(data2))
    assert mod.get_outputs()[0].shape == (3, 5)

    #Test forward with other NDArray and np.ndarray inputs
    data = mx.sym.Variable('data')
    out = data * 2
    mod = mx.mod.Module(symbol=out, label_names=None)
    mod.bind(data_shapes=[('data', (1, 10))])
    mod.init_params()
    data1 = mx.nd.ones((1, 10))
    assert mod.predict(data1).shape == (1, 10)
    data2 = np.ones((1, 10))
    assert mod.predict(data1).shape == (1, 10) 
Example #4
Source File: test_module.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_module_reshape():
    data = mx.sym.Variable('data')
    sym = mx.sym.FullyConnected(data, num_hidden=20, name='fc')

    dshape = (7, 20)
    mod = mx.mod.Module(sym, ('data',), None, context=[mx.cpu(0), mx.cpu(1)])
    mod.bind(data_shapes=[('data', dshape)])
    mod.init_params()
    mod.init_optimizer(optimizer_params={'learning_rate': 1})

    mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape)],
                                label=None))
    mod.backward([mx.nd.ones(dshape)])
    mod.update()
    assert mod.get_outputs()[0].shape == dshape
    assert (mod.get_params()[0]['fc_bias'].asnumpy() == -1).all()

    dshape = (14, 20)
    mod.reshape(data_shapes=[('data', dshape)])
    mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape)],
                                label=None))
    mod.backward([mx.nd.ones(dshape)])
    mod.update()
    assert mod.get_outputs()[0].shape == dshape
    assert (mod.get_params()[0]['fc_bias'].asnumpy() == -3).all() 
Example #5
Source File: test_module.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_module_layout():
    sym = mx.sym.Variable('data')
    sym = mx.sym.Activation(data=sym, act_type='relu', __layout__='TNC')

    dshape = (3, 8, 7)
    mod = mx.mod.Module(sym, ('data',), None, context=[mx.cpu(0), mx.cpu(1)])
    mod.bind(data_shapes=[mx.io.DataDesc('data', dshape, layout='TNC')])
    mod.init_params()
    mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape)],
                                label=None))
    mod.backward([mx.nd.ones(dshape)])
    assert mod.get_outputs()[0].shape == dshape

    hdshape = (3, 4, 7)
    for x in mod.get_outputs(merge_multi_context=False)[0]:
        assert x.shape == hdshape 
Example #6
Source File: test_module.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_module_input_grads():
    a = mx.sym.Variable('a', __layout__='NC')
    b = mx.sym.Variable('b', __layout__='NC')
    c = mx.sym.Variable('c', __layout__='NC')

    c = a + 2 * b + 3 * c
    net = mx.mod.Module(c, data_names=['b', 'c', 'a'], label_names=None,
                        context=[mx.cpu(0), mx.cpu(1)])
    net.bind(data_shapes=[['b', (5, 5)], ['c', (5, 5)], ['a', (5, 5)]],
             label_shapes=None, inputs_need_grad=True)
    net.init_params()

    net.forward(data_batch=mx.io.DataBatch(data=[nd.ones((5, 5)),
                                                 nd.ones((5, 5)),
                                                 nd.ones((5, 5))]))
    net.backward(out_grads=[nd.ones((5, 5))])
    input_grads = net.get_input_grads()
    b_grad = input_grads[0].asnumpy()
    c_grad = input_grads[1].asnumpy()
    a_grad = input_grads[2].asnumpy()
    assert np.all(a_grad == 1), a_grad
    assert np.all(b_grad == 2), b_grad
    assert np.all(c_grad == 3), c_grad 
Example #7
Source File: test_module.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_module_dtype():
    dtype = np.float16
    dshape = (3, 8, 7)

    sym = mx.sym.Variable('data')
    sym = mx.sym.Activation(data=sym, act_type='relu', __layout__='TNC')

    mod = mx.mod.Module(sym, ('data',), None, context=[mx.cpu(0), mx.cpu(1)])
    mod.bind(data_shapes=[mx.io.DataDesc('data', dshape, dtype, layout='TNC')])
    mod.init_params()
    mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape, dtype=dtype)],
                              label=None))
    mod.backward([mx.nd.ones(dshape, dtype=dtype)])

    for x in mod.get_outputs():
      assert x.dtype == dtype 
Example #8
Source File: test_module.py    From SNIPER-mxnet with Apache License 2.0 6 votes vote down vote up
def test_module_dtype():
    dtype = np.float16
    dshape = (3, 8, 7)

    sym = mx.sym.Variable('data')
    sym = mx.sym.Activation(data=sym, act_type='relu', __layout__='TNC')

    mod = mx.mod.Module(sym, ('data',), None, context=[mx.cpu(0), mx.cpu(1)])
    mod.bind(data_shapes=[mx.io.DataDesc('data', dshape, dtype, layout='TNC')])
    mod.init_params()
    mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape, dtype=dtype)],
                              label=None))
    mod.backward([mx.nd.ones(dshape, dtype=dtype)])

    for x in mod.get_outputs():
      assert x.dtype == dtype 
Example #9
Source File: test_autograd.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_sparse_dot_grad():
    def check_sparse_dot_grad(rhs):
        lhs = rand_ndarray((2, 8), 'csr')
        with mx.autograd.record():
            y = mx.nd.dot(lhs, rhs)
        y.backward()
        grad = rhs.grad
        grad_np = np.dot(lhs.asnumpy().T, np.ones((lhs.shape[0], rhs.shape[1])))
        assert grad.stype == 'row_sparse'
        assert_almost_equal(grad.asnumpy(), grad_np)

    # check grad with row_sparse weight
    shape = (8, 3)
    rsp = mx.nd.ones(shape).tostype('row_sparse')
    rsp.attach_grad()
    check_sparse_dot_grad(rsp)

    # check grad with dense weight
    dns = mx.nd.ones(shape)
    dns.attach_grad(stype='row_sparse')
    check_sparse_dot_grad(dns) 
Example #10
Source File: test_module.py    From SNIPER-mxnet with Apache License 2.0 6 votes vote down vote up
def test_module_input_grads():
    a = mx.sym.Variable('a', __layout__='NC')
    b = mx.sym.Variable('b', __layout__='NC')
    c = mx.sym.Variable('c', __layout__='NC')

    c = a + 2 * b + 3 * c
    net = mx.mod.Module(c, data_names=['b', 'c', 'a'], label_names=None,
                        context=[mx.cpu(0), mx.cpu(1)])
    net.bind(data_shapes=[['b', (5, 5)], ['c', (5, 5)], ['a', (5, 5)]],
             label_shapes=None, inputs_need_grad=True)
    net.init_params()

    net.forward(data_batch=mx.io.DataBatch(data=[nd.ones((5, 5)),
                                                 nd.ones((5, 5)),
                                                 nd.ones((5, 5))]))
    net.backward(out_grads=[nd.ones((5, 5))])
    input_grads = net.get_input_grads()
    b_grad = input_grads[0].asnumpy()
    c_grad = input_grads[1].asnumpy()
    a_grad = input_grads[2].asnumpy()
    assert np.all(a_grad == 1), a_grad
    assert np.all(b_grad == 2), b_grad
    assert np.all(c_grad == 3), c_grad 
Example #11
Source File: test_module.py    From SNIPER-mxnet with Apache License 2.0 6 votes vote down vote up
def test_module_layout():
    sym = mx.sym.Variable('data')
    sym = mx.sym.Activation(data=sym, act_type='relu', __layout__='TNC')

    dshape = (3, 8, 7)
    mod = mx.mod.Module(sym, ('data',), None, context=[mx.cpu(0), mx.cpu(1)])
    mod.bind(data_shapes=[mx.io.DataDesc('data', dshape, layout='TNC')])
    mod.init_params()
    mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape)],
                                label=None))
    mod.backward([mx.nd.ones(dshape)])
    assert mod.get_outputs()[0].shape == dshape

    hdshape = (3, 4, 7)
    for x in mod.get_outputs(merge_multi_context=False)[0]:
        assert x.shape == hdshape 
Example #12
Source File: test_autograd.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_out_grads():
    x = nd.ones((3, 5))
    dx = nd.zeros_like(x)
    mark_variables([x], [dx])
    da = None
    db = nd.array([1,2,3,4,5])
    dc = nd.array([5,4,3,2,1])

    with record():
        a, b, c = nd.split(x, axis=0, num_outputs=3, squeeze_axis=True)
        backward([a, b, c], [da, db, dc])

    assert (dx.asnumpy() == np.array(
        [[1,1,1,1,1],
         [1,2,3,4,5],
         [5,4,3,2,1]])).all() 
Example #13
Source File: test_module.py    From SNIPER-mxnet with Apache License 2.0 6 votes vote down vote up
def test_module_reshape():
    data = mx.sym.Variable('data')
    sym = mx.sym.FullyConnected(data, num_hidden=20, name='fc')

    dshape = (7, 20)
    mod = mx.mod.Module(sym, ('data',), None, context=[mx.cpu(0), mx.cpu(1)])
    mod.bind(data_shapes=[('data', dshape)])
    mod.init_params()
    mod.init_optimizer(optimizer_params={'learning_rate': 1})

    mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape)],
                                label=None))
    mod.backward([mx.nd.ones(dshape)])
    mod.update()
    assert mod.get_outputs()[0].shape == dshape
    assert (mod.get_params()[0]['fc_bias'].asnumpy() == -1).all()

    dshape = (14, 20)
    mod.reshape(data_shapes=[('data', dshape)])
    mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape)],
                                label=None))
    mod.backward([mx.nd.ones(dshape)])
    mod.update()
    assert mod.get_outputs()[0].shape == dshape
    assert (mod.get_params()[0]['fc_bias'].asnumpy() == -3).all() 
Example #14
Source File: test_contrib_autograd.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_out_grads():
    x = nd.ones((3, 5))
    dx = nd.zeros_like(x)
    mark_variables([x], [dx])
    da = None
    db = nd.array([1,2,3,4,5])
    dc = nd.array([5,4,3,2,1])

    with train_section():
        a, b, c = nd.split(x, axis=0, num_outputs=3, squeeze_axis=True)
        backward([a, b, c], [da, db, dc])

    assert (dx.asnumpy() == np.array(
        [[1,1,1,1,1],
         [1,2,3,4,5],
         [5,4,3,2,1]])).all() 
Example #15
Source File: tensor.py    From dgl with Apache License 2.0 6 votes vote down vote up
def unsorted_1d_segment_sum(input, seg_id, n_segs, dim):
    # TODO: support other dimensions
    assert dim == 0, 'MXNet only supports segment sum on first dimension'

    # Use SPMV to simulate segment sum
    ctx = input.context
    n_inputs = input.shape[0]
    input_shape_suffix = input.shape[1:]
    input = input.reshape(n_inputs, -1)
    n_range = nd.arange(n_inputs, dtype='int64').as_in_context(input.context)
    w_nnz = nd.ones(n_inputs).as_in_context(input.context)
    w_nid = nd.stack(seg_id, n_range, axis=0)
    w = nd.sparse.csr_matrix((w_nnz, (seg_id, n_range)), (n_segs, n_inputs))
    w = w.as_in_context(input.context)
    y = nd.dot(w, input)
    y = nd.reshape(y, (n_segs,) + input_shape_suffix)
    return y 
Example #16
Source File: test_contrib_autograd.py    From SNIPER-mxnet with Apache License 2.0 6 votes vote down vote up
def test_out_grads():
    x = nd.ones((3, 5))
    dx = nd.zeros_like(x)
    mark_variables([x], [dx])
    da = None
    db = nd.array([1,2,3,4,5])
    dc = nd.array([5,4,3,2,1])

    with train_section():
        a, b, c = nd.split(x, axis=0, num_outputs=3, squeeze_axis=True)
        backward([a, b, c], [da, db, dc])

    assert (dx.asnumpy() == np.array(
        [[1,1,1,1,1],
         [1,2,3,4,5],
         [5,4,3,2,1]])).all() 
Example #17
Source File: test_contrib_autograd.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def test_detach_updated_grad():
    x = nd.ones((2, 2))
    dx = nd.zeros_like(x)
    y = nd.ones_like(x)
    dy = nd.zeros_like(x)
    mark_variables([x, y], [dx, dy])
    assert x._fresh_grad == False
    assert y._fresh_grad == False

    with train_section():
        x2 = x + 2
        y2  = x2 + y
        y2.backward()
    assert (dx.asnumpy() == 1).all()
    assert x._fresh_grad == True
    assert y._fresh_grad == True

    dx[:] = 0
    x._fresh_grad = False
    y._fresh_grad = False
    assert x._fresh_grad == False
    assert y._fresh_grad == False
    with train_section():
        x2 = x + 2
        x2 = x2.detach()
        y2  = x2 + y
        y2.backward()
    assert (dx.asnumpy() == 0).all()
    assert y._fresh_grad == True
    assert x._fresh_grad == False 
Example #18
Source File: test_contrib_autograd.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def test_training():
    x = nd.ones((10, 10))
    with train_section():
        y = nd.Dropout(x, p=0.5)
        assert not (y.asnumpy() == x.asnumpy()).all()
        with test_section():
            y = nd.Dropout(x, p=0.5)
            assert (y.asnumpy() == x.asnumpy()).all() 
Example #19
Source File: test_autograd.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def test_detach_updated_grad():
    x = nd.ones((2, 2))
    dx = nd.zeros_like(x)
    y = nd.ones_like(x)
    dy = nd.zeros_like(x)
    mark_variables([x, y], [dx, dy])
    assert x._fresh_grad == False
    assert y._fresh_grad == False

    with record():
        x2 = x + 2
        y2  = x2 + y
        y2.backward()
    assert (dx.asnumpy() == 1).all()
    assert x._fresh_grad == True
    assert y._fresh_grad == True

    dx[:] = 0
    x._fresh_grad = False
    y._fresh_grad = False
    assert x._fresh_grad == False
    assert y._fresh_grad == False
    with record():
        x2 = x + 2
        x2 = x2.detach()
        y2  = x2 + y
        y2.backward()
    assert (dx.asnumpy() == 0).all()
    assert y._fresh_grad == True
    assert x._fresh_grad == False 
Example #20
Source File: test_contrib_autograd.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def test_argnum():
    def f_with_mode(a, b, mode):
        if mode:
            return a+b
        else:
            return a*b

    a = nd.uniform(shape=(3, 2))
    b = nd.uniform(shape=(3, 2))
    f_add_grad = lambda x, y, mode: [nd.ones(x.shape), nd.ones(y.shape)]
    f_mul_grad = lambda x, y, mode: [y, x]
    autograd_assert(a, b, True,
        argnum=[0, 1], func=f_with_mode, grad_func=f_add_grad)
    autograd_assert(a, b, False,
        argnum=[0, 1], func=f_with_mode, grad_func=f_mul_grad) 
Example #21
Source File: test_autograd.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def test_get_symbol():
    x = mx.nd.ones((1,))
    x.attach_grad()
    with record():
        y = x*x + 2*x - 1
    assert len(get_symbol(y).list_arguments()) == 1

    z = mx.nd.ones((1,))
    z.attach_grad()
    with record():
        y = x*x + 2*z - 1
    assert len(get_symbol(y).list_arguments()) == 2 
Example #22
Source File: test_autograd.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def test_retain_grad():
    x = mx.nd.ones((2, 2))
    dx = mx.nd.zeros((2, 2))
    mark_variables([x], [dx], grad_reqs='add')
    with record():
        y = x + 1
        y.backward(retain_graph=False)
    assert (dx.asnumpy() == 1).all()

    dx[:] = 0
    with record():
        y = x + 1
        y.backward(retain_graph=True)
        y.backward(retain_graph=False)
    assert (dx.asnumpy() == 2).all()

    # The following sequence should throw an exception. We discard the expected
    # stderr stack trace output for this operation to keep the test logs clean.
    with discard_stderr():
        try:
            with record():
                y = x + 1
                y.backward()
                y.backward()
        except Exception:
            return

    raise AssertionError(
        "differentiating the same graph twice without retain_graph should fail") 
Example #23
Source File: train_cgan.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def gan_loss(input,target_is_real):
        if target_is_real:
            target = nd.ones(input.shape,ctx=input.context)
        else:
            target = nd.zeros(input.shape, ctx=input.context)
        #mse loss for lsgan
        e = ((input - target) ** 2).mean(axis=0, exclude=True)
        return e 
Example #24
Source File: test_contrib_autograd.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def test_binary_func():
    x = nd.uniform(shape=(4, 5))
    y = nd.uniform(shape=(4, 5))
    f_add      = lambda x, y: x+y
    f_add_grad = lambda x, y: [nd.ones(x.shape), nd.ones(y.shape)]
    autograd_assert(x, y, func=f_add, grad_func=f_add_grad)
    f_mul      = lambda x, y: x*y
    f_mul_grad = lambda x, y: [y, x]
    autograd_assert(x, y, func=f_mul, grad_func=f_mul_grad)
    f_compose  = lambda x, y: x+x*y
    f_compose_grad = lambda x, y: [nd.ones(x.shape) + y, x]
    autograd_assert(x, y, func=f_compose, grad_func=f_compose_grad) 
Example #25
Source File: test_autograd.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def test_training():
    x = nd.ones((10, 10))
    with record():
        y = nd.Dropout(x, p=0.5)
        assert not (y.asnumpy() == x.asnumpy()).all()
        with pause():
            y = nd.Dropout(x, p=0.5)
            assert (y.asnumpy() == x.asnumpy()).all() 
Example #26
Source File: test_autograd.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def test_argnum():
    def f_with_mode(a, b, mode):
        if mode:
            return a+b
        else:
            return a*b

    a = nd.uniform(shape=(3, 2))
    b = nd.uniform(shape=(3, 2))
    f_add_grad = lambda x, y, mode: [nd.ones(x.shape), nd.ones(y.shape)]
    f_mul_grad = lambda x, y, mode: [y, x]
    autograd_assert(a, b, True,
        argnum=[0, 1], func=f_with_mode, grad_func=f_add_grad)
    autograd_assert(a, b, False,
        argnum=[0, 1], func=f_with_mode, grad_func=f_mul_grad) 
Example #27
Source File: test_autograd.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def test_unary_func():
    def check_unary_func(x):
        f_exp         = lambda x: nd.exp(x)
        f_exp_grad    = lambda x: [nd.exp(x)]
        autograd_assert(x, func=f_exp, grad_func=f_exp_grad)
        f_half        = lambda x: x/2
        f_half_grad   = lambda x: [nd.ones(x.shape) * 0.5]
        autograd_assert(x, func=f_half, grad_func=f_half_grad)
        f_square      = lambda x: x**2
        f_square_grad = lambda x: [2*x]
        autograd_assert(x, func=f_square, grad_func=f_square_grad)
    uniform = nd.uniform(shape=(4, 5))
    stypes = ['default', 'row_sparse', 'csr']
    for stype in stypes:
        check_unary_func(uniform.tostype(stype)) 
Example #28
Source File: MxnetA.py    From nn_tools with MIT License 5 votes vote down vote up
def profiling_symbol(symbol,data_shape,data_name='data'):
    monitor = Monitor()
    model=mx.mod.Module(symbol)
    model.bind(data_shapes=[(data_name,tuple(data_shape))])
    model.install_monitor(monitor)
    model.init_params()
    monitor.tic()
    model.forward(mx.io.DataBatch(data=(nd.ones(data_shape),)))
    data_infos=monitor.toc()
    module_json=symbol.tojson()
    analyse(data_infos,module_json,data_name) 
Example #29
Source File: train_cgan.py    From panoptic-fpn-gluon with Apache License 2.0 5 votes vote down vote up
def gan_loss(input,target_is_real):
        if target_is_real:
            target = nd.ones(input.shape,ctx=input.context)
        else:
            target = nd.zeros(input.shape, ctx=input.context)
        #mse loss for lsgan
        e = ((input - target) ** 2).mean(axis=0, exclude=True)
        return e 
Example #30
Source File: tensor.py    From dgl with Apache License 2.0 5 votes vote down vote up
def forward(self, in_data):
        feat_shape = in_data.shape[1:]
        out_data = nd.empty((self.out_size,) + feat_shape,
                            ctx=in_data.context, dtype=in_data.dtype)
        in_data_nd = zerocopy_to_dgl_ndarray(in_data)
        out_data_nd = zerocopy_to_dgl_ndarray_for_write(out_data)
        K.copy_reduce(
            self.reducer if self.reducer != 'mean' else 'sum',
            self.graph, self.target, in_data_nd, out_data_nd,
            self.in_map[0], self.out_map[0])
        # normalize if mean reducer
        # NOTE(zihao): this is a temporary hack and we should have better solution in the future.
        if self.reducer == 'mean':
            in_ones = nd.ones((in_data.shape[0],),
                              ctx=in_data.context, dtype=in_data.dtype)
            degs = nd.empty((out_data.shape[0],),
                            ctx=out_data.context, dtype=out_data.dtype)
            in_ones_nd = zerocopy_to_dgl_ndarray(in_ones)
            degs_nd = zerocopy_to_dgl_ndarray(degs)
            K.copy_reduce(
                'sum', self.graph, self.target, in_ones_nd, degs_nd, 
                self.in_map[0], self.out_map[0])
            # reshape
            degs = degs.reshape((out_data.shape[0],) + (1,) * (out_data.ndim - 1)).clip(1, float('inf')) 
            out_data = out_data / degs
        else:
            degs = None
        self.save_for_backward(in_data_nd, out_data_nd, degs)
        return out_data