Python mxnet.nd.dot() Examples

The following are 13 code examples of mxnet.nd.dot(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mxnet.nd , or try the search function .
Example #1
Source File: TextEXAM_multi-label.py    From AAAI_2019_EXAM with GNU General Public License v2.0 6 votes vote down vote up
def forward(self,x):
        """
        return shape:(batch_size,2000,2)
        """
        # Encode layer
        question = x[:,0:30]
        question = self.Embed(question)
        question = self.gru(question)

        #interaction layer
        interaction = nd.dot(question,self.topic_embedding.data())
        interaction = nd.transpose(interaction,axes=(0,2,1))
        interaction = interaction.reshape((batch_size*2000,-1))
        # interaction = interaction.expand_dims(axis=1)
        # print("interaction done")

        #agg layer
        # interaction = self.pooling(self.conv_2(self.conv_1(interaction)))
        # print("agg done")
        res = self.mlp_2(self.mlp_1(interaction))
        res = res.reshape((batch_size,2000))

        return res

#Train Model 
Example #2
Source File: relgraphconv.py    From dgl with Apache License 2.0 6 votes vote down vote up
def basis_message_func(self, edges):
        """Message function for basis regularizer"""
        ctx = edges.src['h'].context
        if self.num_bases < self.num_rels:
            # generate all weights from bases
            weight = self.weight.data(ctx).reshape(
                self.num_bases, self.in_feat * self.out_feat)
            weight = nd.dot(self.w_comp.data(ctx), weight).reshape(
                self.num_rels, self.in_feat, self.out_feat)
        else:
            weight = self.weight.data(ctx)

        msg = utils.bmm_maybe_select(edges.src['h'], weight, edges.data['type'])
        if 'norm' in edges.data:
            msg = msg * edges.data['norm']
        return {'msg': msg} 
Example #3
Source File: utils.py    From coach with Apache License 2.0 6 votes vote down vote up
def global_norm(arrays: Union[Generator[NDArray, NDArray, NDArray], List[NDArray], Tuple[NDArray]]) -> NDArray:
    """
    Calculate global norm on list or tuple of NDArrays using this formula:
        `global_norm = sqrt(sum([l2norm(p)**2 for p in parameters]))`

    :param arrays: list or tuple of parameters to calculate global norm on
    :return: single-value NDArray
    """
    def _norm(array):
        if array.stype == 'default':
            x = array.reshape((-1,))
            return nd.dot(x, x)
        return array.norm().square()

    total_norm = nd.add_n(*[_norm(arr) for arr in arrays])
    total_norm = nd.sqrt(total_norm)
    return total_norm 
Example #4
Source File: train.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def get_distance_matrix(x):
    """Get distance matrix given a matrix. Used in testing."""
    square = nd.sum(x ** 2.0, axis=1, keepdims=True)
    distance_square = square + square.transpose() - (2.0 * nd.dot(x, x.transpose()))
    return nd.sqrt(distance_square) 
Example #5
Source File: utils.py    From dgl with Apache License 2.0 5 votes vote down vote up
def matmul_maybe_select(A, B):
    """Perform Matrix multiplication C = A * B but A could be an integer id vector.

    If A is an integer vector, we treat it as multiplying a one-hot encoded tensor.
    In this case, the expensive dense matrix multiply can be replaced by a much
    cheaper index lookup.

    For example,
    ::

        A = [2, 0, 1],
        B = [[0.1, 0.2],
             [0.3, 0.4],
             [0.5, 0.6]]

    then matmul_maybe_select(A, B) is equivalent to
    ::

        [[0, 0, 1],     [[0.1, 0.2],
         [1, 0, 0],  *   [0.3, 0.4],
         [0, 1, 0]]      [0.5, 0.6]]

    In all other cases, perform a normal matmul.

    Parameters
    ----------
    A : mxnet.NDArray
        lhs tensor
    B : mxnet.NDArray
        rhs tensor

    Returns
    -------
    C : mxnet.NDArray
        result tensor
    """
    if A.dtype in (np.int32, np.int64) and len(A.shape) == 1:
        return nd.take(B, A, axis=0)
    else:
        return nd.dot(A, B) 
Example #6
Source File: base_layers.py    From STGCN with GNU General Public License v3.0 5 votes vote down vote up
def forward(self, x, cheb_polys):
        '''
        Parameters
        ----------
        x: nd.array, shape is (batch_size * time_step, num_of_vertices, c_in)

        cheb_polys: nd.array,
                shape is (num_of_vertices, order_of_cheb * num_of_vertices)

        Returns
        ----------
        shape is (batch_size * time_step, num_of_vertices, c_out)

        '''

        _, num_of_vertices, c_in = x.shape

        # (batch_size * c_in, num_of_vertices)
        x_tmp = x.transpose((0, 2, 1)).reshape((-1, num_of_vertices))

        # (batch_size, c_in, order_of_cheb, num_of_vertices)
        x_mul = nd.dot(x_tmp, cheb_polys).reshape((-1,
                                                   c_in,
                                                   self.order_of_cheb,
                                                   num_of_vertices))

        # (batch_size, num_of_vertices, c_in * order_of_cheb)
        x_ker = x_mul.transpose((0, 3, 1, 2)) \
                     .reshape((-1, num_of_vertices, c_in * self.order_of_cheb))

        return self.theta(x_ker) 
Example #7
Source File: train.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def get_distance_matrix(x):
    """Get distance matrix given a matrix. Used in testing."""
    square = nd.sum(x ** 2.0, axis=1, keepdims=True)
    distance_square = square + square.transpose() - (2.0 * nd.dot(x, x.transpose()))
    return nd.sqrt(distance_square) 
Example #8
Source File: utils.py    From d2l-zh with Apache License 2.0 5 votes vote down vote up
def linreg(X, w, b):
    """Linear regression."""
    return nd.dot(X, w) + b 
Example #9
Source File: utils.py    From d2l-zh with Apache License 2.0 5 votes vote down vote up
def linreg(X, w, b):
    """Linear regression."""
    return nd.dot(X, w) + b 
Example #10
Source File: train.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def get_distance_matrix(x):
    """Get distance matrix given a matrix. Used in testing."""
    square = nd.sum(x ** 2.0, axis=1, keepdims=True)
    distance_square = square + square.transpose() - (2.0 * nd.dot(x, x.transpose()))
    return nd.sqrt(distance_square) 
Example #11
Source File: densesageconv.py    From dgl with Apache License 2.0 4 votes vote down vote up
def forward(self, adj, feat):
        r"""Compute (Dense) Graph SAGE layer.

        Parameters
        ----------
        adj : mxnet.NDArray
            The adjacency matrix of the graph to apply SAGE Convolution on, when
            applied to a unidirectional bipartite graph, ``adj`` should be of shape
            should be of shape :math:`(N_{out}, N_{in})`; when applied to a homo
            graph, ``adj`` should be of shape :math:`(N, N)`. In both cases,
            a row represents a destination node while a column represents a source
            node.
        feat : mxnet.NDArray or a pair of mxnet.NDArray
            If a mxnet.NDArray is given, the input feature of shape :math:`(N, D_{in})`
            where :math:`D_{in}` is size of input feature, :math:`N` is the number of
            nodes.
            If a pair of torch.Tensor is given, the pair must contain two tensors of
            shape :math:`(N_{in}, D_{in})` and :math:`(N_{out}, D_{in})`.

        Returns
        -------
        mxnet.NDArray
            The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`
            is size of output feature.
        """
        check_eq_shape(feat)
        if isinstance(feat, tuple):
            feat_src = self.feat_drop(feat[0])
            feat_dst = self.feat_drop(feat[1])
        else:
            feat_src = feat_dst = self.feat_drop(feat)
        adj = adj.astype(feat_src.dtype).as_in_context(feat_src.context)
        in_degrees = adj.sum(axis=1, keepdims=True)
        h_neigh = (nd.dot(adj, feat_src) + feat_dst) / (in_degrees + 1)
        rst = self.fc(h_neigh)
        # activation
        if self.activation is not None:
            rst = self.activation(rst)
        # normalization
        if self._norm is not None:
            rst = self._norm(rst)

        return rst 
Example #12
Source File: densechebconv.py    From dgl with Apache License 2.0 4 votes vote down vote up
def forward(self, adj, feat, lambda_max=None):
        r"""Compute (Dense) Chebyshev Spectral Graph Convolution layer.

        Parameters
        ----------
        adj : mxnet.NDArray
            The adjacency matrix of the graph to apply Graph Convolution on,
            should be of shape :math:`(N, N)`, where a row represents the destination
            and a column represents the source.
        feat : mxnet.NDArray
            The input feature of shape :math:`(N, D_{in})` where :math:`D_{in}`
            is size of input feature, :math:`N` is the number of nodes.
        lambda_max : float or None, optional
            A float value indicates the largest eigenvalue of given graph.
            Default: None.

        Returns
        -------
        mxnet.NDArray
            The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`
            is size of output feature.
        """
        A = adj.astype(feat.dtype).as_in_context(feat.context)
        num_nodes = A.shape[0]

        in_degree = 1. / nd.clip(A.sum(axis=1), 1, float('inf')).sqrt()
        D_invsqrt = nd.diag(in_degree)
        I = nd.eye(num_nodes, ctx=A.context)
        L = I - nd.dot(D_invsqrt, nd.dot(A, D_invsqrt))

        if lambda_max is None:
            # NOTE(zihao): this only works for directed graph.
            lambda_max = (nd.linalg.syevd(L)[1]).max()

        L_hat = 2 * L / lambda_max - I
        Z = [nd.eye(num_nodes, ctx=A.context)]
        Zh = self.fc[0](feat)
        for i in range(1, self._k):
            if i == 1:
                Z.append(L_hat)
            else:
                Z.append(2 * nd.dot(L_hat, Z[-1]) - Z[-2])
            Zh = Zh + nd.dot(Z[i], self.fc[i](feat))

        if self.bias is not None:
            Zh = Zh + self.bias.data(feat.context)
        return Zh 
Example #13
Source File: densegraphconv.py    From dgl with Apache License 2.0 4 votes vote down vote up
def forward(self, adj, feat):
        r"""Compute (Dense) Graph Convolution layer.

        Parameters
        ----------
        adj : mxnet.NDArray
            The adjacency matrix of the graph to apply Graph Convolution on, when
            applied to a unidirectional bipartite graph, ``adj`` should be of shape
            should be of shape :math:`(N_{out}, N_{in})`; when applied to a homo
            graph, ``adj`` should be of shape :math:`(N, N)`. In both cases,
            a row represents a destination node while a column represents a source
            node.
        feat : torch.Tensor
            The input feature.

        Returns
        -------
        mxnet.NDArray
            The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`
            is size of output feature.
        """
        adj = adj.astype(feat.dtype).as_in_context(feat.context)
        src_degrees = nd.clip(adj.sum(axis=0), a_min=1, a_max=float('inf'))
        dst_degrees = nd.clip(adj.sum(axis=1), a_min=1, a_max=float('inf'))
        feat_src = feat

        if self._norm == 'both':
            norm_src = nd.power(src_degrees, -0.5)
            shp_src = norm_src.shape + (1,) * (feat.ndim - 1)
            norm_src = norm_src.reshape(shp_src).as_in_context(feat.context)
            feat_src = feat_src * norm_src

        if self._in_feats > self._out_feats:
            # mult W first to reduce the feature size for aggregation.
            feat_src = nd.dot(feat_src, self.weight.data(feat_src.context))
            rst = nd.dot(adj, feat_src)
        else:
            # aggregate first then mult W
            rst = nd.dot(adj, feat_src)
            rst = nd.dot(rst, self.weight.data(feat_src.context))

        if self._norm != 'none':
            if self._norm == 'both':
                norm_dst = nd.power(dst_degrees, -0.5)
            else: # right
                norm_dst = 1.0 / dst_degrees
            shp_dst = norm_dst.shape + (1,) * (feat.ndim - 1)
            norm_dst = norm_dst.reshape(shp_dst).as_in_context(feat.context)
            rst = rst * norm_dst

        if self.bias is not None:
            rst = rst + self.bias.data(feat.context)

        if self._activation is not None:
            rst = self._activation(rst)

        return rst