Python autograd.numpy() Examples

The following are 30 code examples of autograd.numpy(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module autograd , or try the search function .
Example #1
Source File: kernel.py    From kernel-gof with MIT License 6 votes vote down vote up
def gradXY_sum(self, X, Y):
        r"""
        Compute \sum_{i=1}^d \frac{\partial^2 k(x, Y)}{\partial x_i \partial y_i}
        evaluated at each x_i in X, and y_i in Y.

        X: nx x d numpy array.
        Y: ny x d numpy array.

        Return a nx x ny numpy array of the derivatives.
        """
        gamma = 1/X.shape[1] if self.gamma is None else self.gamma

        if self.degree == 1:  # optimization, other expression is valid too
            return np.tile(gamma, (X.shape[0], X.shape[1]))

        dot = np.dot(X, Y.T)
        inside = gamma * dot + self.coef0
        to_dminus2 = inside ** (self.degree - 2)
        to_dminus1 = to_dminus2 * inside
        return (
            (self.degree * (self.degree-1) * gamma**2) * to_dminus2 * dot
            + (X.shape[1] * gamma * self.degree) * to_dminus1
        ) 
Example #2
Source File: kernel.py    From kernel-gof with MIT License 6 votes vote down vote up
def gradX_Y(self, X, Y, dim):
        """
        Compute the gradient with respect to the dimension dim of X in k(X, Y).

        X: nx x d
        Y: ny x d

        Return a numpy array of size nx x ny.
        """
        D2 = util.dist2_matrix(X, Y)
        # 1d array of length nx
        Xi = X[:, dim]
        # 1d array of length ny
        Yi = Y[:, dim]
        # nx x ny
        dim_diff = Xi[:, np.newaxis] - Yi[np.newaxis, :]

        b = self.b
        c = self.c
        Gdim = ( 2.0*b*(c**2 + D2)**(b-1) )*dim_diff
        assert Gdim.shape[0] == X.shape[0]
        assert Gdim.shape[1] == Y.shape[0]
        return Gdim 
Example #3
Source File: kernel.py    From kernel-gof with MIT License 6 votes vote down vote up
def gradX_y(self, X, y):
        """
        Compute the gradient with respect to X (the first argument of the
        kernel). Base class provides a default autograd implementation for convenience.
        Subclasses should override if this does not work.

        X: nx x d numpy array.
        y: numpy array of length d.

        Return a numpy array G of size nx x d, the derivative of k(X, y) with
        respect to X.
        """
        yrow = np.reshape(y, (1, -1))
        f = lambda X: self.eval(X, yrow)
        g = autograd.elementwise_grad(f)
        G = g(X)
        assert G.shape[0] == X.shape[0]
        assert G.shape[1] == X.shape[1]
        return G

# end class KSTKernel 
Example #4
Source File: kernel.py    From kernel-gof with MIT License 6 votes vote down vote up
def gradXY_sum(self, X, Y):
        """
        Compute
        \sum_{i=1}^d \frac{\partial^2 k(X, Y)}{\partial x_i \partial y_i}
        evaluated at each x_i in X, and y_i in Y.

        X: nx x d numpy array.
        Y: ny x d numpy array.

        Return a nx x ny numpy array of the derivatives.
        """
        b = self.b
        c = self.c
        D2 = util.dist2_matrix(X, Y)

        # d = input dimension
        d = X.shape[1]
        c2D2 = c**2 + D2
        T1 = -4.0*b*(b-1)*D2*(c2D2**(b-2) )
        T2 = -2.0*b*d*c2D2**(b-1)
        return T1 + T2

# end class KIMQ 
Example #5
Source File: kernel.py    From kernel-gof with MIT License 6 votes vote down vote up
def pair_gradX_Y(self, X, Y):
        """
        Compute the gradient with respect to X in k(X, Y), evaluated at the
        specified X and Y.

        X: n x d
        Y: n x d

        Return a numpy array of size n x d
        """
        sigma2 = self.sigma2
        Kvec = self.pair_eval(X, Y)
        # n x d
        Diff = X - Y
        G = -Kvec[:, np.newaxis]*Diff/sigma2
        return G 
Example #6
Source File: density.py    From kernel-gof with MIT License 6 votes vote down vote up
def __init__(self, means, variances, pmix=None):
        """
        means: a k x d 2d array specifying the means.
        variances: a k x d x d numpy array containing a stack of k covariance
            matrices, one for each mixture component.
        pmix: a one-dimensional length-k array of mixture weights. Sum to one.
        """
        k, d = means.shape
        if k != variances.shape[0]:
            raise ValueError('Number of components in means and variances do not match.')

        if pmix is None:
            pmix = old_div(np.ones(k),float(k))

        if np.abs(np.sum(pmix) - 1) > 1e-8:
            raise ValueError('Mixture weights do not sum to 1.')

        self.pmix = pmix
        self.means = means
        self.variances = variances 
Example #7
Source File: kernel.py    From kernel-gof with MIT License 6 votes vote down vote up
def gradXY_sum(self, X, Y):
        r"""
        Compute \sum_{i=1}^d \frac{\partial^2 k(X, Y)}{\partial x_i \partial y_i}
        evaluated at each x_i in X, and y_i in Y.

        X: nx x d numpy array.
        Y: ny x d numpy array.

        Return a nx x ny numpy array of the derivatives.
        """
        (n1, d1) = X.shape
        (n2, d2) = Y.shape
        assert d1==d2, 'Dimensions of the two inputs must be the same'
        d = d1
        sigma2 = self.sigma2
        D2 = np.sum(X**2, 1)[:, np.newaxis] - 2*np.dot(X, Y.T) + np.sum(Y**2, 1)
        K = np.exp(old_div(-D2,(2.0*sigma2)))
        G = K/sigma2*(d - old_div(D2,sigma2))
        return G 
Example #8
Source File: density.py    From kernel-gof with MIT License 6 votes vote down vote up
def grad_log(self, X):
    #    """
    #    Evaluate the gradients (with respect to the input) of the log density at
    #    each of the n points in X. This is the score function.

    #    X: n x d numpy array.
        """
        Evaluate the gradients (with respect to the input) of the log density at
        each of the n points in X. This is the score function.

        X: n x d numpy array.

        Return an n x d numpy array of gradients.
        """
        XB = np.dot(X, self.B)
        Y = 0.5*XB + self.c
        E2y = np.exp(2*Y)
        # n x dh
        Phi = old_div((E2y-1.0),(E2y+1))
        # n x dx
        T = np.dot(Phi, 0.5*self.B.T)
        S = self.b - X + T
        return S 
Example #9
Source File: test_autograd.py    From pennylane with Apache License 2.0 6 votes vote down vote up
def test_call_changing_trainability(self):
        """Test that trainability properly changes between QNode calls"""
        dev = qml.device("default.qubit", wires=2)

        @qml.qnode(dev, interface="autograd")
        def circuit(x, y, z):
            qml.RX(x, wires=0)
            qml.RY(y, wires=0)
            qml.RZ(z, wires=0)
            return qml.expval(qml.PauliZ(0))

        x = qml.numpy.array(1, requires_grad=True)
        y = qml.numpy.array(2, requires_grad=False)
        z = qml.numpy.array(3, requires_grad=True)

        res = circuit(x, y, z)

        assert circuit.get_trainable_args() == {0, 2}

        x.requires_grad = False
        y.requires_grad = True

        res = circuit(x, y, z)

        assert circuit.get_trainable_args() == {1, 2} 
Example #10
Source File: intertst.py    From kernel-gof with MIT License 6 votes vote down vote up
def __init__(self, p, gwidth2, test_locs, alpha=0.01, seed=28):
        """
        p: an instance of UnnormalizedDensity
        gwidth2: Gaussian width squared for the Gaussian kernel
        test_locs: J x d numpy array of J locations to test the difference
        alpha: significance level 
        """
        super(GaussMETest, self).__init__(p, alpha)
        self.gwidth2 = gwidth2
        self.test_locs = test_locs
        self.seed = seed
        ds = p.get_datasource()
        if ds is None:
            raise ValueError('%s test requires a density p which implements get_datasource(', str(GaussMETest))

        # Construct the ME test
        metest = tst.MeanEmbeddingTest(test_locs, gwidth2, alpha=alpha)
        self.metest = metest 
Example #11
Source File: kernel.py    From kernel-gof with MIT License 6 votes vote down vote up
def pair_eval(self, X, Y):
        """
        Evaluate k(x1, y1), k(x2, y2), ...

        Parameters
        ----------
        X, Y : n x d numpy array

        Return
        -------
        a numpy array with length n
        """
        (n1, d1) = X.shape
        (n2, d2) = Y.shape
        assert n1==n2, 'Two inputs must have the same number of instances'
        assert d1==d2, 'Two inputs must have the same dimension'
        D2 = np.sum( (X-Y)**2, 1)
        Kvec = np.exp(old_div(-D2,(2.0*self.sigma2)))
        return Kvec 
Example #12
Source File: test_autograd.py    From pennylane with Apache License 2.0 6 votes vote down vote up
def test_no_differentiable_parameters(self):
        """If there are no differentiable parameters, the output of the gradient
        function is an empty tuple, and a warning is emitted."""
        dev = qml.device("default.qubit", wires=2)

        @qml.qnode(dev, interface="autograd")
        def circuit(data1):
            qml.templates.AmplitudeEmbedding(data1, wires=[0, 1])
            return qml.expval(qml.PauliZ(0))

        grad_fn = qml.grad(circuit)
        data1 = qml.numpy.array([0, 1, 1, 0], requires_grad=False) / np.sqrt(2)

        with pytest.warns(UserWarning, match="Output seems independent of input"):
            res = grad_fn(data1)

        assert res == tuple() 
Example #13
Source File: goftest.py    From kernel-gof with MIT License 6 votes vote down vote up
def list_simulate_spectral(cov, J, n_simulate=1000, seed=82):
        """
        Simulate the null distribution using the spectrums of the covariance
        matrix.  This is intended to be used to approximate the null
        distribution.

        Return (a numpy array of simulated n*FSSD values, eigenvalues of cov)
        """
        # eigen decompose 
        eigs, _ = np.linalg.eig(cov)
        eigs = np.real(eigs)
        # sort in decreasing order 
        eigs = -np.sort(-eigs)
        sim_fssds = FSSD.simulate_null_dist(eigs, J, n_simulate=n_simulate,
                seed=seed)
        return sim_fssds, eigs 
Example #14
Source File: kernel.py    From kernel-gof with MIT License 6 votes vote down vote up
def pair_eval(self, X, Y):
        """Evaluate k(x1, y1), k(x2, y2), ...

        X: n x d where each row represents one point
        Y: n x d
        return a 1d numpy array of length n.
        """
        n1, d1 = X.shape
        n2, d2 = Y.shape
        assert n1 == n2, 'Two inputs must have the same number of instances'
        assert d1 == d2, 'Two inputs must have the same dimension'
        D2 = np.sum((X - Y)**2, axis=1)
        return np.tensordot(
            self.wts,
            np.exp(D2[np.newaxis, :] / (-2 * self.sigma2s[:, np.newaxis])),
            1) 
Example #15
Source File: goftest.py    From kernel-gof with MIT License 6 votes vote down vote up
def __init__(self, p, k, bootstrapper=bootstrapper_rademacher, alpha=0.01,
            n_simulate=500, seed=11):
        """
        p: an instance of UnnormalizedDensity
        k: a KSTKernel object
        bootstrapper: a function: (n) |-> numpy array of n weights 
            to be multiplied in the double sum of the test statistic for generating 
            bootstrap samples from the null distribution.
        alpha: significance level 
        n_simulate: The number of times to simulate from the null distribution
            by bootstrapping. Must be a positive integer.
        """
        super(KernelSteinTest, self).__init__(p, alpha)
        self.k = k
        self.bootstrapper = bootstrapper
        self.n_simulate = n_simulate
        self.seed = seed 
Example #16
Source File: kernel.py    From kernel-gof with MIT License 6 votes vote down vote up
def gradXY_sum(self, X, Y):
        r"""
        Compute \sum_{i=1}^d \frac{\partial^2 k(x, Y)}{\partial x_i \partial y_i}
        evaluated at each x_i in X, and y_i in Y.

        X: nx x d numpy array.
        Y: ny x d numpy array.

        Return a nx x ny numpy array of the derivatives.
        """
        d = X.shape[1]
        sumx2 = np.sum(X**2, axis=1)[:, np.newaxis]
        sumy2 = np.sum(Y**2, axis=1)[np.newaxis, :]
        D2 = sumx2 - 2 * np.dot(X, Y.T) + sumy2
        s = (D2[np.newaxis, :, :] / self.sigma2s[:, np.newaxis, np.newaxis])
        return np.einsum('w,wij,wij->ij',
                         self.wts / self.sigma2s, np.exp(s / -2), d - s) 
Example #17
Source File: kernel.py    From kernel-gof with MIT License 6 votes vote down vote up
def gradX_Y(self, X, Y, dim):
        """
        Compute the gradient with respect to the dimension dim of X in k(X, Y).

        X: nx x d
        Y: ny x d

        Return a numpy array of size nx x ny.
        """
        gamma = 1/X.shape[1] if self.gamma is None else self.gamma

        if self.degree == 1:  # optimization, other expression is valid too
            out = gamma * Y[np.newaxis, :, dim]  # 1 x ny
            return np.repeat(out, X.shape[0], axis=0)

        dot = np.dot(X, Y.T)
        return (self.degree * (gamma * dot + self.coef0) ** (self.degree - 1)
                * gamma * Y[np.newaxis, :, dim]) 
Example #18
Source File: kernel.py    From kernel-gof with MIT License 6 votes vote down vote up
def gradY_X(self, X, Y, dim):
        """
        Compute the gradient with respect to the dimension dim of Y in k(X, Y).

        X: nx x d
        Y: ny x d

        Return a numpy array of size nx x ny.
        """
        gamma = 1/X.shape[1] if self.gamma is None else self.gamma

        if self.degree == 1:  # optimization, other expression is valid too
            out = gamma * X[:, dim, np.newaxis]  # nx x 1
            return np.repeat(out, Y.shape[0], axis=1)

        dot = np.dot(X, Y.T)
        return (self.degree * (gamma * dot + self.coef0) ** (self.degree - 1)
                * gamma * X[:, dim, np.newaxis]) 
Example #19
Source File: regression.py    From convoys with MIT License 5 votes vote down vote up
def predict(self, x, t):
        '''Returns the value of the cumulative distribution function
        for a fitted model (using the maximum a posteriori estimate).

        :param x: feature vector (or matrix)
        :param t: time
        '''
        params = self.params['map']
        x = numpy.array(x)
        t = numpy.array(t)
        return self._predict(params, x, t) 
Example #20
Source File: goftest.py    From kernel-gof with MIT License 5 votes vote down vote up
def __call__(self, V):
        """
        :params V: a numpy array of size J x d (data matrix)

        :returns (J x d) numpy array representing witness evaluations at the J
            points.
        """
        J = V.shape[0]
        X = self.dat.data()
        n, d = X.shape
        # construct the feature tensor (n x d x J)
        fssd = FSSD(self.p, self.k, V, null_sim=None, alpha=None)

        # When X, V contain many points, this can use a lot of memory.
        # Process chunk by chunk.
        block_rows = util.constrain(50000//(d*J), 10, 5000)
        avg_rows = []
        for (f, t) in util.ChunkIterable(start=0, end=n, chunk_size=block_rows):
            assert f<t
            Xblock = X[f:t, :]
            b = Xblock.shape[0]
            F = fssd.feature_tensor(Xblock)
            Tau = np.reshape(F, [b, d*J])
            # witness evaluations computed on only a subset of data
            avg_rows.append(Tau.mean(axis=0))

        # an array of length d*J
        witness_evals = (float(b)/n)*np.sum(np.vstack(avg_rows), axis=0)
        assert len(witness_evals) == d*J
        return np.reshape(witness_evals, [J, d]) 
Example #21
Source File: goftest.py    From kernel-gof with MIT License 5 votes vote down vote up
def simulate_null_dist(eigs, J, n_simulate=2000, seed=7):
        """
        Simulate the null distribution using the spectrums of the covariance 
        matrix of the U-statistic. The simulated statistic is n*FSSD^2 where
        FSSD is an unbiased estimator.

        - eigs: a numpy array of estimated eigenvalues of the covariance
          matrix. eigs is of length d*J, where d is the input dimension, and 
        - J: the number of test locations.

        Return a numpy array of simulated statistics.
        """
        d = old_div(len(eigs),J)
        assert d>0
        # draw at most d x J x block_size values at a time
        block_size = max(20, int(old_div(1000.0,(d*J))))
        fssds = np.zeros(n_simulate)
        from_ind = 0
        with util.NumpySeedContext(seed=seed):
            while from_ind < n_simulate:
                to_draw = min(block_size, n_simulate-from_ind)
                # draw chi^2 random variables. 
                chi2 = np.random.randn(d*J, to_draw)**2

                # an array of length to_draw 
                sim_fssds = eigs.dot(chi2-1.0)
                # store 
                end_ind = from_ind+to_draw
                fssds[from_ind:end_ind] = sim_fssds
                from_ind = end_ind
        return fssds 
Example #22
Source File: goftest.py    From kernel-gof with MIT License 5 votes vote down vote up
def feature_tensor(self, X):
        """
        Compute the feature tensor which is n x d x J.
        The feature tensor can be used to compute the statistic, and the
        covariance matrix for simulating from the null distribution.

        X: n x d data numpy array

        return an n x d x J numpy array
        """
        k = self.k
        J = self.V.shape[0]
        n, d = X.shape
        # n x d matrix of gradients
        grad_logp = self.p.grad_log(X)
        #assert np.all(util.is_real_num(grad_logp))
        # n x J matrix
        #print 'V'
        #print self.V
        K = k.eval(X, self.V)
        #assert np.all(util.is_real_num(K))

        list_grads = np.array([np.reshape(k.gradX_y(X, v), (1, n, d)) for v in self.V])
        stack0 = np.concatenate(list_grads, axis=0)
        #a numpy array G of size n x d x J such that G[:, :, J]
        #    is the derivative of k(X, V_j) with respect to X.
        dKdV = np.transpose(stack0, (1, 2, 0))

        # n x d x J tensor
        grad_logp_K = util.outer_rows(grad_logp, K)
        #print 'grad_logp'
        #print grad_logp.dtype
        #print grad_logp
        #print 'K'
        #print K
        Xi = old_div((grad_logp_K + dKdV),np.sqrt(d*J))
        #Xi = (grad_logp_K + dKdV)
        return Xi 
Example #23
Source File: regression.py    From convoys with MIT License 5 votes vote down vote up
def predict_posteriori(self, x, t):
        ''' Returns the trace samples generated via the MCMC steps.

        Requires the model to be fit with `mcmc == True`.'''
        x = numpy.array(x)
        t = numpy.array(t)
        assert self._mcmc
        params = self.params['samples']
        t = numpy.expand_dims(t, -1)
        return self._predict(params, x, t) 
Example #24
Source File: density.py    From kernel-gof with MIT License 5 votes vote down vote up
def log_den(self, X):
        """
        Evaluate this log of the unnormalized density on the n points in X.

        X: n x d numpy array

        Return a one-dimensional numpy array of length n.
        """
        raise NotImplementedError() 
Example #25
Source File: kernel.py    From kernel-gof with MIT License 5 votes vote down vote up
def pair_gradXY_sum(self, X, Y):
        r"""
        Compute \sum_{i=1}^d \frac{\partial^2 k(X, Y)}{\partial x_i \partial y_i}
        evaluated at each x_i in X, and y_i in Y.

        X: n x d numpy array.
        Y: n x d numpy array.

        Return a one-dimensional length-n numpy array of the derivatives.
        """
        return sum(w * k.pair_gradXY_sum(X, Y) for w, k in zip(self.wts, self.ks)) 
Example #26
Source File: kernel.py    From kernel-gof with MIT License 5 votes vote down vote up
def pair_gradY_X(self, X, Y):
        """
        Compute the gradient with respect to Y in k(X, Y), evaluated at the
        specified X and Y.

        X: n x d
        Y: n x d

        Return a numpy array of size n x d
        """
        return sum(w * k.pair_gradY_X(X, Y) for w, k in zip(self.wts, self.ks)) 
Example #27
Source File: kernel.py    From kernel-gof with MIT License 5 votes vote down vote up
def pair_gradX_Y(self, X, Y):
        """
        Compute the gradient with respect to X in k(X, Y), evaluated at the
        specified X and Y.

        X: n x d
        Y: n x d

        Return a numpy array of size n x d
        """
        return sum(w * k.pair_gradX_Y(X, Y) for w, k in zip(self.wts, self.ks)) 
Example #28
Source File: kernel.py    From kernel-gof with MIT License 5 votes vote down vote up
def gradXY_sum(self, X, Y):
        r"""
        Compute \sum_{i=1}^d \frac{\partial^2 k(x, Y)}{\partial x_i \partial y_i}
        evaluated at each x_i in X, and y_i in Y.

        X: nx x d numpy array.
        Y: ny x d numpy array.

        Return a nx x ny numpy array of the derivatives.
        """
        return sum(w * k.gradXY_sum(X, Y) for w, k in zip(self.wts, self.ks)) 
Example #29
Source File: kernel.py    From kernel-gof with MIT License 5 votes vote down vote up
def pair_eval(self, X, Y):
        """Evaluate k(x1, y1), k(x2, y2), ...

        X: n x d where each row represents one point
        Y: n x d
        return a 1d numpy array of length n.
        """
        return sum(w * k.pair_eval(X, Y) for w, k in zip(self.wts, self.ks)) 
Example #30
Source File: kernel.py    From kernel-gof with MIT License 5 votes vote down vote up
def gradX_Y(self, X, Y, dim):
        """
        Compute the gradient with respect to the dimension dim of X in k(X, Y).

        X: nx x d
        Y: ny x d

        Return a numpy array of size nx x ny.
        """
        return sum(w * k.gradX_Y(X, Y, dim) for w, k in zip(self.wts, self.ks))