Python autograd.numpy.dot() Examples

The following are 30 code examples of autograd.numpy.dot(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module autograd.numpy , or try the search function .
Example #1
Source File: test_wrappers.py    From autograd with MIT License 6 votes vote down vote up
def test_grad_and_aux():
    A = npr.randn(5, 4)
    x = npr.randn(4)

    f = lambda x: (np.sum(np.dot(A, x)), x**2)
    g = lambda x: np.sum(np.dot(A, x))

    assert len(grad_and_aux(f)(x)) == 2

    check_equivalent(grad_and_aux(f)(x)[0], grad(g)(x))
    check_equivalent(grad_and_aux(f)(x)[1], x**2)

## No longer support this behavior
# def test_make_ggnvp_broadcasting():
#   A = npr.randn(4, 5)
#   x = npr.randn(10, 4)
#   v = npr.randn(10, 4)

#   fun = lambda x: np.tanh(np.dot(x, A))
#   res1 = np.stack([_make_explicit_ggnvp(fun)(xi)(vi) for xi, vi in zip(x, v)])
#   res2 = make_ggnvp(fun)(x)(v)
#   check_equivalent(res1, res2) 
Example #2
Source File: convnet.py    From autograd with MIT License 6 votes vote down vote up
def make_nn_funs(input_shape, layer_specs, L2_reg):
    parser = WeightsParser()
    cur_shape = input_shape
    for layer in layer_specs:
        N_weights, cur_shape = layer.build_weights_dict(cur_shape)
        parser.add_weights(layer, (N_weights,))

    def predictions(W_vect, inputs):
        """Outputs normalized log-probabilities.
        shape of inputs : [data, color, y, x]"""
        cur_units = inputs
        for layer in layer_specs:
            cur_weights = parser.get(W_vect, layer)
            cur_units = layer.forward_pass(cur_units, cur_weights)
        return cur_units

    def loss(W_vect, X, T):
        log_prior = -L2_reg * np.dot(W_vect, W_vect)
        log_lik = np.sum(predictions(W_vect, X) * T)
        return - log_prior - log_lik

    def frac_err(W_vect, X, T):
        return np.mean(np.argmax(T, axis=1) != np.argmax(pred_fun(W_vect, X), axis=1))

    return parser.N, predictions, loss, frac_err 
Example #3
Source File: test_autograd.py    From momi2 with GNU General Public License v3.0 6 votes vote down vote up
def check_gradient(f, x):
    print(x, "\n", f(x))

    print("# grad2")
    grad2 = Gradient(f)(x)
    print("# building grad1")
    g = grad(f)
    print("# computing grad1")
    grad1 = g(x)

    print("gradient1\n", grad1, "\ngradient2\n", grad2)
    np.allclose(grad1, grad2)

    # check Hessian vector product
    y = np.random.normal(size=x.shape)
    gdot = lambda u: np.dot(g(u), y)
    hess1, hess2 = grad(gdot)(x), Gradient(gdot)(x)
    print("hess1\n", hess1, "\nhess2\n", hess2)
    np.allclose(hess1, hess2) 
Example #4
Source File: standard_models.py    From pyhawkes with MIT License 6 votes vote down vote up
def objective(self, w):
        obj = 0
        N = float(sum([np.sum(d[1]) for d in self.data_list]))
        for F,S in self.data_list:
            psi = np.dot(F, w)
            lam = self.link(psi)
            obj -= np.sum(S * np.log(lam) -lam*self.dt) / N
            # assert np.isfinite(ll)

        # Add penalties
        obj += (0.5 * np.sum(w[1:]**2) / self.sigma**2) / N
        obj += np.sum(np.abs(w[1:]) * self.lmbda) / N

        # assert np.isfinite(obj)

        return obj 
Example #5
Source File: bench_rnn.py    From autograd with MIT License 6 votes vote down vote up
def setup(self):
        self.batch_size = 16
        self.dtype = "float32"
        self.D = 2**10
        self.x = 0.01 * np.random.randn(self.batch_size,self.D).astype(self.dtype)
        self.W1 = 0.01 * np.random.randn(self.D,self.D).astype(self.dtype)
        self.b1 = 0.01 * np.random.randn(self.D).astype(self.dtype)
        self.Wout = 0.01 * np.random.randn(self.D,1).astype(self.dtype)
        self.bout = 0.01 * np.random.randn(1).astype(self.dtype)
        self.l = (np.random.rand(self.batch_size,1) > 0.5).astype(self.dtype)
        self.n = 50

        def autograd_rnn(params, x, label, n):
            W, b, Wout, bout = params
            h1 = x
            for i in range(n):
                h1 = np.tanh(np.dot(h1, W) + b)
            logit = np.dot(h1, Wout) + bout
            loss = -np.sum(label * logit - (
                    logit + np.log(1 + np.exp(-logit))))
            return loss

        self.fn = autograd_rnn
        self.grad_fn = grad(self.fn) 
Example #6
Source File: optimizer.py    From tinyik with MIT License 6 votes vote down vote up
def optimize(self, x0, target):
        """Calculate an optimum argument of an objective function."""
        x = x0
        for i in range(self.maxiter):
            g = self.g(x, target)
            h = self.h(x, target)
            if i == 0:
                alpha = 0
                m = g
            else:
                alpha = - np.dot(m, np.dot(h, g)) / np.dot(m, np.dot(h, m))
                m = g + np.dot(alpha, m)
            t = - np.dot(m, g) / np.dot(m, np.dot(h, m))
            delta = np.dot(t, m)
            x = x + delta
            if np.linalg.norm(delta) < self.tol:
                break
        return x 
Example #7
Source File: train.py    From tree-regularization-public with MIT License 6 votes vote down vote up
def average_path_length(tree, X):
    """Compute average path length: cost of simulating the average
    example; this is used in the objective function.

    @param tree: DecisionTreeClassifier instance
    @param X: NumPy array (D x N)
              D := number of dimensions
              N := number of examples
    @return path_length: float
                         average path length
    """
    leaf_indices = tree.apply(X)
    leaf_counts = np.bincount(leaf_indices)
    leaf_i = np.arange(tree.tree_.node_count)
    path_length = np.dot(leaf_i, leaf_counts) / float(X.shape[0])
    return path_length 
Example #8
Source File: transitions.py    From recurrent-slds with MIT License 6 votes vote down vote up
def joint_log_probability(self, logpi, W, stateseqs, covseqs):
        K, D = self.num_states, self.covariate_dim

        # Compute the objective
        ll = 0
        for z, x in zip(stateseqs, covseqs):
            T = z.size
            assert x.ndim == 2 and x.shape[0] == T - 1
            z_prev = one_hot(z[:-1], K)
            z_next = one_hot(z[1:], K)

            # Numerator
            tmp = anp.dot(z_prev, logpi) + anp.dot(x, W)
            ll += anp.sum(tmp * z_next)

            # Denominator
            Z = amisc.logsumexp(tmp, axis=1)
            ll -= anp.sum(Z)

        return ll 
Example #9
Source File: transitions.py    From recurrent-slds with MIT License 6 votes vote down vote up
def get_log_trans_matrices(self, X):
        """
        Get log transition matrices as a function of X

        :param X: inputs/covariates
        :return: stack of transition matrices log A[t] \in Kin x Kout
        """
        # compute the contribution of the covariate to transition matrix
        psi_X = np.dot(X, self.W)

        # add the (T x Kout) and (Kin x Kout) matrices together such that they
        # broadcast into a (T x Kin x Kout) stack of matrices
        psi = psi_X[:, None, :] + self.logpi

        # apply softmax and normalize over outputs
        log_trans_matrices = psi - amisc.logsumexp(psi, axis=2, keepdims=True)

        return log_trans_matrices 
Example #10
Source File: transitions.py    From recurrent-slds with MIT License 6 votes vote down vote up
def log_prior(self):
        # Normal N(mu | mu_0, Sigma / kappa_0)
        from scipy.linalg import solve_triangular
        sigma = np.linalg.inv(self.J_0)
        mu = sigma.dot(self.h_0)
        S_chol = np.linalg.cholesky(sigma)

        # Stack log pi and W
        X = np.vstack((self.logpi, self.W)).T

        lp = 0
        for d in range(self.D_out):
            x = solve_triangular(S_chol, X[d] - mu, lower=True)
            lp += -1. / 2. * np.dot(x, x) \
                  - self.D_in / 2 * np.log(2 * np.pi) \
                  - np.log(S_chol.diagonal()).sum()

        return lp

    ### HMC 
Example #11
Source File: lgss_example.py    From variational-smc with MIT License 6 votes vote down vote up
def generate_data(model_params, T = 5, rs = npr.RandomState(0)):
    mu0, Sigma0, A, Q, C, R = model_params
    Dx = mu0.shape[0]
    Dy = R.shape[0]
    
    x_true = np.zeros((T,Dx))
    y_true = np.zeros((T,Dy))

    for t in range(T):
        if t > 0:
            x_true[t,:] = rs.multivariate_normal(np.dot(A,x_true[t-1,:]),Q)
        else:
            x_true[0,:] = rs.multivariate_normal(mu0,Sigma0)
        y_true[t,:] = rs.multivariate_normal(np.dot(C,x_true[t,:]),R)
        
    return x_true, y_true 
Example #12
Source File: multivariate_normal.py    From autograd with MIT License 5 votes vote down vote up
def solve(allow_singular):
    if allow_singular:
        return lambda A, x: np.dot(np.linalg.pinv(A), x)
    else:
        return np.linalg.solve 
Example #13
Source File: neural_net_regression.py    From autograd with MIT License 5 votes vote down vote up
def nn_predict(params, inputs, nonlinearity=np.tanh):
    for W, b in params:
        outputs = np.dot(inputs, W) + b
        inputs = nonlinearity(outputs)
    return outputs 
Example #14
Source File: bench_core.py    From autograd with MIT License 5 votes vote down vote up
def time_no_autograd_control():
    # Test whether the benchmarking machine is running slowly independent of autograd
    A = np.random.randn(200, 200)
    np.dot(A, A) 
Example #15
Source File: solver.py    From tinyik with MIT License 5 votes vote down vote up
def solve(self, angles, p=None, index=None):
        if p is None:
            p = [0., 0., 0., 1.]
        if index is None:
            index = len(self.components) - 1
        return reduce(
            lambda a, m: np.dot(m, a),
            reversed(self._matrices(angles)[:index + 1]),
            np.array(p)
        )[:3] 
Example #16
Source File: test_linalg.py    From autograd with MIT License 5 votes vote down vote up
def rand_psd(D):
    mat = npr.randn(D,D)
    return np.dot(mat, mat.T) 
Example #17
Source File: test_linalg.py    From autograd with MIT License 5 votes vote down vote up
def test_inv():
    def fun(x): return np.linalg.inv(x)
    D = 8
    mat = npr.randn(D, D)
    mat = np.dot(mat, mat) + 1.0 * np.eye(D)
    check_grads(fun)(mat) 
Example #18
Source File: test_linalg.py    From autograd with MIT License 5 votes vote down vote up
def test_cholesky_reparameterization_trick():
    def fun(A):
        rng = np.random.RandomState(0)
        z = np.dot(np.linalg.cholesky(A), rng.randn(A.shape[0]))
        return np.linalg.norm(z)
    check_symmetric_matrix_grads(fun)(rand_psd(6)) 
Example #19
Source File: solver.py    From tinyik with MIT License 5 votes vote down vote up
def solve(self, angles):
        """Calculate a position of the end-effector and return it."""
        return reduce(
            lambda a, m: np.dot(m, a),
            reversed(self._matrices(angles)),
            np.array([0., 0., 0., 1.])
        )[:3] 
Example #20
Source File: solver.py    From tinyik with MIT License 5 votes vote down vote up
def p_on_rot_plane(self, p, joint_pos, joint_axis):
        ua = joint_axis / np.linalg.norm(joint_axis)
        return p - (np.dot(p - joint_pos, ua) * ua) 
Example #21
Source File: linalg.py    From autograd with MIT License 5 votes vote down vote up
def _jvp_sylvester(argnums, dms, ans, args, _):
    a, b, q = args
    if 0 in argnums:
        da = dms[0]
        db = dms[1] if 1 in argnums else 0
    else:
        da = 0
        db = dms[0] if 1 in argnums else 0
    dq = dms[-1] if 2 in argnums else 0
    rhs = dq - anp.dot(da, ans) - anp.dot(ans, db)
    return solve_sylvester(a, b, rhs) 
Example #22
Source File: linalg.py    From autograd with MIT License 5 votes vote down vote up
def grad_solve_triangular(ans, a, b, trans=0, lower=False, **kwargs):
    tri = anp.tril if (lower ^ (_flip(a, trans) == 'N')) else anp.triu
    transpose = lambda x: x if _flip(a, trans) != 'N' else x.T
    al2d = lambda x: x if x.ndim > 1 else x[...,None]
    def vjp(g):
        v = al2d(solve_triangular(a, g, trans=_flip(a, trans), lower=lower))
        return -transpose(tri(anp.dot(v, al2d(ans).T)))
    return vjp 
Example #23
Source File: test_wrappers.py    From autograd with MIT License 5 votes vote down vote up
def test_tensor_jacobian_product():
    # This function will have an asymmetric jacobian matrix.
    fun = lambda a: np.roll(np.sin(a), 1)
    a = npr.randn(5)
    V = npr.randn(5)
    J = jacobian(fun)(a)
    check_equivalent(np.dot(V.T, J), tensor_jacobian_product(fun)(a, V)) 
Example #24
Source File: generative_adversarial_net.py    From autograd with MIT License 5 votes vote down vote up
def neural_net_predict(params, inputs):
    """Params is a list of (weights, bias) tuples.
       inputs is an (N x D) matrix."""
    inpW, inpb = params[0]
    inputs = relu(np.dot(inputs, inpW) + inpb)
    for W, b in params[1:-1]:
        outputs = batch_normalize(np.dot(inputs, W) + b)
        inputs = relu(outputs)
    outW, outb = params[-1]
    outputs = np.dot(inputs, outW) + outb
    return outputs 
Example #25
Source File: neural_net.py    From autograd with MIT License 5 votes vote down vote up
def l2_norm(params):
    """Computes l2 norm of params by flattening them into a vector."""
    flattened, _ = flatten(params)
    return np.dot(flattened, flattened) 
Example #26
Source File: logistic_regression.py    From autograd with MIT License 5 votes vote down vote up
def logistic_predictions(weights, inputs):
    # Outputs probability of a label being true according to logistic model.
    return sigmoid(np.dot(inputs, weights)) 
Example #27
Source File: convnet.py    From autograd with MIT License 5 votes vote down vote up
def forward_pass(self, inputs, param_vector):
        params = self.parser.get(param_vector, 'params')
        biases = self.parser.get(param_vector, 'biases')
        if inputs.ndim > 2:
            inputs = inputs.reshape((inputs.shape[0], np.prod(inputs.shape[1:])))
        return self.nonlinearity(np.dot(inputs[:, :], params) + biases) 
Example #28
Source File: ode_net.py    From autograd with MIT License 5 votes vote down vote up
def nn_predict(inputs, t, params):
    for W, b in params:
        outputs = np.dot(inputs, W) + b
        inputs = np.maximum(0, outputs)
    return outputs 
Example #29
Source File: ode_net.py    From autograd with MIT License 5 votes vote down vote up
def func(y, t0, A):
    return np.dot(y**3, A) 
Example #30
Source File: gmm.py    From autograd with MIT License 5 votes vote down vote up
def gmm_log_likelihood(params, data):
    cluster_lls = []
    for log_proportion, mean, cov_sqrt in zip(*unpack_gmm_params(params)):
        cov = np.dot(cov_sqrt.T, cov_sqrt)
        cluster_lls.append(log_proportion + mvn.logpdf(data, mean, cov))
    return np.sum(logsumexp(np.vstack(cluster_lls), axis=0))