Python scipy.linalg.cho_factor() Examples

The following are 30 code examples of scipy.linalg.cho_factor(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.linalg , or try the search function .
Example #1
Source File: utils.py    From enterprise with MIT License 7 votes vote down vote up
def inv(self, logdet=False):
        if self.ndim == 1:
            inv = 1.0 / self

            if logdet:
                return inv, np.sum(np.log(self))
            else:
                return inv
        else:
            try:
                cf = sl.cho_factor(self)
                inv = sl.cho_solve(cf, np.identity(cf[0].shape[0]))
                if logdet:
                    ld = 2.0 * np.sum(np.log(np.diag(cf[0])))
            except np.linalg.LinAlgError:
                u, s, v = np.linalg.svd(self)
                inv = np.dot(u / s, u.T)
                if logdet:
                    ld = np.sum(np.log(s))
            if logdet:
                return inv, ld
            else:
                return inv 
Example #2
Source File: clustered_kde.py    From kombine with MIT License 6 votes vote down vote up
def _evaluate_point_logpdf(args):
    """
    Evaluate the Gaussian KDE at a given point `p`.  This lives outside the KDE method to allow for
    parallelization using :mod:`multipocessing`. Since :func:`map` only allows single-argument
    functions, the following arguments to be packed into a single tuple.

    :param p:
        The point to evaluate the KDE at.

    :param data:
        The `(N, ndim)`-shaped array of data used to construct the KDE.

    :param cho_factor:
        A Cholesky decomposition of the kernel covariance matrix.
    """
    point, data, cho_factor = args

    # Use Cholesky decomposition to avoid direct inversion of covariance matrix
    diff = data - point
    tdiff = la.cho_solve(cho_factor, diff.T, check_finite=False).T
    diff *= tdiff

    # Work in the log to avoid large numbers
    return logsumexp(-np.sum(diff, axis=1)/2) 
Example #3
Source File: clustered_kde.py    From kombine with MIT License 6 votes vote down vote up
def _set_bandwidth(self):
        r"""
        Use Scott's rule to set the kernel bandwidth:

        .. math::

            \mathcal{K} = n^{-1/(d+4)} \Sigma^{1/2}

        Also store Cholesky decomposition for later.
        """
        if self.size > 0 and self._cov is not None:
            self._kernel_cov = self._cov * self.size ** (-2/(self.ndim + 4))

            # Used to evaluate PDF with cho_solve()
            self._cho_factor = la.cho_factor(self._kernel_cov)

            # Make sure the estimated PDF integrates to 1.0
            self._lognorm = self.ndim/2 * np.log(2*np.pi) + np.log(self.size) +\
                np.sum(np.log(np.diag(self._cho_factor[0])))

        else:
            self._lognorm = -np.inf 
Example #4
Source File: clustered_kde.py    From kombine with MIT License 6 votes vote down vote up
def __init__(self, data):
        self._data = np.atleast_2d(data)

        self._mean = np.mean(data, axis=0)
        self._cov = None

        if self.data.shape[0] > 1:
            try:
                self._cov = np.cov(data.T)

                # Try factoring now to see if regularization is needed
                la.cho_factor(self._cov)

            except la.LinAlgError:
                self._cov = oas_cov(data)

        self._set_bandwidth()

        # store transformation variables for drawing random values
        alphas = np.std(data, axis=0)
        ms = 1./alphas
        m_i, m_j = np.meshgrid(ms, ms)
        ms = m_i * m_j
        self._draw_cov = ms * self._kernel_cov
        self._scale_fac = alphas 
Example #5
Source File: correlated_likelihood.py    From kombine with MIT License 6 votes vote down vote up
def log_likelihood(self, p):
        p = self.to_params(p)

        v = self.rvs(p)

        res = self.vs - v - p['mu']

        cov = p['nu']*p['nu']*np.diag(self.dvs*self.dvs)
        cov += generate_covariance(self.ts, p['sigma'], p['tau'])

        cfactor = sl.cho_factor(cov)
        cc, lower = cfactor

        n = self.ts.shape[0]

        return -0.5*n*np.log(2.0*np.pi) - np.sum(np.log(np.diag(cc))) - 0.5*np.dot(res, sl.cho_solve(cfactor, res)) 
Example #6
Source File: linalg_covmat.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def mvn_loglike(x, sigma):
    '''loglike multivariate normal

    assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)

    brute force from formula
    no checking of correct inputs
    use of inv and log-det should be replace with something more efficient
    '''
    #see numpy thread
    #Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1)
    sigmainv = linalg.inv(sigma)
    logdetsigma = np.log(np.linalg.det(sigma))
    nobs = len(x)

    llf = - np.dot(x, np.dot(sigmainv, x))
    llf -= nobs * np.log(2 * np.pi)
    llf -= logdetsigma
    llf *= 0.5
    return llf 
Example #7
Source File: kalman_filter.py    From Splunking-Crime with GNU Affero General Public License v3.0 6 votes vote down vote up
def standardized_forecasts_error(self):
        """
        Standardized forecast errors
        """
        if self._standardized_forecasts_error is None:
            from scipy import linalg
            self._standardized_forecasts_error = np.zeros(
                self.forecasts_error.shape, dtype=self.dtype)

            for t in range(self.forecasts_error_cov.shape[2]):
                if self.nmissing[t] > 0:
                    self._standardized_forecasts_error[:, t] = np.nan
                if self.nmissing[t] < self.k_endog:
                    mask = ~self.missing[:, t].astype(bool)
                    F = self.forecasts_error_cov[np.ix_(mask, mask, [t])]
                    upper, _ = linalg.cho_factor(F[:, :, 0])
                    self._standardized_forecasts_error[mask, t] = (
                        linalg.solve_triangular(
                            upper, self.forecasts_error[mask, t]
                        )
                    )

        return self._standardized_forecasts_error 
Example #8
Source File: mcmc.py    From deep_gp_random_features with Apache License 2.0 6 votes vote down vote up
def do_sampleF2(Y, X, current_F1, log_theta):

    log_theta_sigma2, log_theta_lengthscale, log_theta_lambda = unpack_log_theta(log_theta)

    n = Y.shape[0]

    K_F2 = covariance_function(current_F1, current_F1, log_theta_sigma2[1], log_theta_lengthscale[1]) + np.eye(n) * 1e-9
    K_Y = K_F2 + np.eye(n) * np.exp(log_theta_lambda)
    L_K_Y, lower_K_Y = linalg.cho_factor(K_Y, lower=True)
    K_inv_Y = linalg.cho_solve((L_K_Y, lower_K_Y), Y)

    mu = np.dot(K_F2, K_inv_Y)

    K_inv_K = linalg.cho_solve((L_K_Y, lower_K_Y), K_F2)
    Sigma = K_F2 - np.dot(K_F2, K_inv_K)

    L_Sigma = linalg.cholesky(Sigma, lower=True)
    proposed_F2 = mu + np.dot(L_Sigma, np.random.normal(0.0, 1.0, [n, 1]))

    return proposed_F2

## Unpack the vector of parameters into their three elements 
Example #9
Source File: linalg_covmat.py    From Splunking-Crime with GNU Affero General Public License v3.0 6 votes vote down vote up
def mvn_loglike(x, sigma):
    '''loglike multivariate normal

    assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)

    brute force from formula
    no checking of correct inputs
    use of inv and log-det should be replace with something more efficient
    '''
    #see numpy thread
    #Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1)
    sigmainv = linalg.inv(sigma)
    logdetsigma = np.log(np.linalg.det(sigma))
    nobs = len(x)

    llf = - np.dot(x, np.dot(sigmainv, x))
    llf -= nobs * np.log(2 * np.pi)
    llf -= logdetsigma
    llf *= 0.5
    return llf 
Example #10
Source File: linalg.py    From sporco with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def cho_factor(A, rho, lower=False, check_finite=True):
    r"""Compute Cholesky factorisation of either :math:`A^T A + \rho I` or
    :math:`A A^T + \rho I`, depending on which matrix is smaller.

    Parameters
    ----------
    A : array_like
      Array :math:`A`
    rho : float
      Scalar :math:`\rho`
    lower : bool, optional (default False)
      Flag indicating whether lower or upper triangular factors are
      computed
    check_finite : bool, optional (default False)
      Flag indicating whether the input array should be checked for Inf
      and NaN values

    Returns
    -------
    c : ndarray
      Matrix containing lower or upper triangular Cholesky factor,
      as returned by :func:`scipy.linalg.cho_factor`
    lwr : bool
      Flag indicating whether the factor is lower or upper triangular
    """

    N, M = A.shape
    # If N < M it is cheaper to factorise A*A^T + rho*I and then use the
    # matrix inversion lemma to compute the inverse of A^T*A + rho*I
    if N >= M:
        c, lwr = linalg.cho_factor(
            A.T.dot(A) + rho * np.identity(M, dtype=A.dtype), lower=lower,
            check_finite=check_finite)
    else:
        c, lwr = linalg.cho_factor(
            A.dot(A.T) + rho * np.identity(N, dtype=A.dtype), lower=lower,
            check_finite=check_finite)
    return c, lwr 
Example #11
Source File: mvnormal.py    From cgpm with Apache License 2.0 5 votes vote down vote up
def __init__(self, Sigma):
    self._cholesky = la.cho_factor(Sigma) 
Example #12
Source File: signal_base.py    From enterprise with MIT License 5 votes vote down vote up
def _solve_NX(self, X):
        """Solves :math:`N^{-1}X`, where :math:`X`
        is a 1-d or 2-d array.
        """
        if X.ndim == 1:
            X = X.reshape(X.shape[0], 1)

        NX = X / self._nvec[:, None]
        for slc, block in zip(self._slices, self._blocks):
            Xblock = X[slc, :]
            if slc.stop - slc.start > 1:
                cf = sl.cho_factor(block + np.diag(self._nvec[slc]))
                NX[slc] = sl.cho_solve(cf, Xblock)
        return NX.squeeze() 
Example #13
Source File: signal_base.py    From enterprise with MIT License 5 votes vote down vote up
def _get_logdet(self):
        """Returns log determinant of :math:`N+UJU^{T}` where :math:`U`
        is a quantization matrix.
        """
        if len(self._idx) > 0:
            logdet = np.sum(np.log(self._nvec[self._idx]))
        else:
            logdet = 0
        for slc, block in zip(self._slices, self._blocks):
            if slc.stop - slc.start > 1:
                cf = sl.cho_factor(block + np.diag(self._nvec[slc]))
                logdet += np.sum(2 * np.log(np.diag(cf[0])))
            else:
                logdet += np.sum(np.log(self._nvec[slc]))
        return logdet 
Example #14
Source File: linalg_covmat.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def mvn_nloglike_obs(x, sigma):
    '''loglike multivariate normal

    assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)

    brute force from formula
    no checking of correct inputs
    use of inv and log-det should be replace with something more efficient
    '''
    #see numpy thread
    #Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1)

    #Still wasteful to calculate pinv first
    sigmainv = linalg.inv(sigma)
    cholsigmainv = linalg.cholesky(sigmainv)
    #2 * np.sum(np.log(np.diagonal(np.linalg.cholesky(A)))) #Dag mailinglist
    # logdet not needed ???
    #logdetsigma = 2 * np.sum(np.log(np.diagonal(cholsigmainv)))
    x_whitened = np.dot(cholsigmainv, x)

    #sigmainv = linalg.cholesky(sigma)
    logdetsigma = np.log(np.linalg.det(sigma))

    sigma2 = 1. # error variance is included in sigma

    llike  =  0.5 * (np.log(sigma2) - 2.* np.log(np.diagonal(cholsigmainv))
                          + (x_whitened**2)/sigma2
                          +  np.log(2*np.pi))

    return llike, (x_whitened**2) 
Example #15
Source File: likelihood.py    From radvel with MIT License 5 votes vote down vote up
def logprob(self):
        """
        Return GP log-likelihood given the data and model.
        log-likelihood is computed using Cholesky decomposition as:
        .. math::
           lnL = -0.5r^TK^{-1}r - 0.5ln[det(K)] - 0.5N*ln(2pi)
        where r = vector of residuals (GPLikelihood._resids),
        K = covariance matrix, and N = number of datapoints.
        Priors are not applied here.
        Constant has been omitted.
        Returns:
            float: Natural log of likelihood
        """
        # update the Kernel object hyperparameter values
        self.update_kernel_params()

        r = self._resids()

        self.kernel.compute_covmatrix(self.errorbars())

        K = self.kernel.covmatrix

        # solve alpha = inverse(K)*r
        try:
            alpha = cho_solve(cho_factor(K),r)

            # compute determinant of K
            (s,d) = np.linalg.slogdet(K)

            # calculate likelihood
            like = -.5 * (np.dot(r, alpha) + d + self.N*np.log(2.*np.pi))

            return like

        except (np.linalg.linalg.LinAlgError, ValueError):
            warnings.warn("Non-positive definite kernel detected.", RuntimeWarning)
            return -np.inf 
Example #16
Source File: likelihood.py    From radvel with MIT License 5 votes vote down vote up
def predict(self, xpred):
        """ Realize the GP using the current values of the hyperparameters at values x=xpred.
            Used for making GP plots.
            Args:
                xpred (np.array): numpy array of x values for realizing the GP
            Returns:
                tuple: tuple containing:
                    np.array: the numpy array of predictive means \n
                    np.array: the numpy array of predictive standard deviations
        """

        self.update_kernel_params()

        r = np.array([self._resids()]).T

        self.kernel.compute_distances(self.x, self.x)
        K = self.kernel.compute_covmatrix(self.errorbars())

        self.kernel.compute_distances(xpred, self.x)
        Ks = self.kernel.compute_covmatrix(0.)

        L = cho_factor(K)
        alpha = cho_solve(L, r)
        mu = np.dot(Ks, alpha).flatten()

        self.kernel.compute_distances(xpred, xpred)
        Kss = self.kernel.compute_covmatrix(0.)
        B = cho_solve(L, Ks.T)
        var = np.array(np.diag(Kss - np.dot(Ks, B))).flatten()
        stdev = np.sqrt(var)

        # set the default distances back to their regular values
        self.kernel.compute_distances(self.x, self.x)

        return mu, stdev 
Example #17
Source File: signal_base.py    From enterprise with MIT License 5 votes vote down vote up
def _solve_ZNX(self, X, Z):
        """Solves :math:`Z^T N^{-1}X`, where :math:`X`
        and :math:`Z` are 1-d or 2-d arrays.
        """
        if X.ndim == 1:
            X = X.reshape(X.shape[0], 1)
        if Z.ndim == 1:
            Z = Z.reshape(Z.shape[0], 1)

        n, m = Z.shape[1], X.shape[1]
        ZNX = np.zeros((n, m))
        if len(self._idx) > 0:
            ZNXr = np.dot(Z[self._idx, :].T, X[self._idx, :] / self._nvec[self._idx, None])
        else:
            ZNXr = 0
        for slc, block in zip(self._slices, self._blocks):
            Zblock = Z[slc, :]
            Xblock = X[slc, :]

            if slc.stop - slc.start > 1:
                cf = sl.cho_factor(block + np.diag(self._nvec[slc]))
                bx = sl.cho_solve(cf, Xblock)
            else:
                bx = Xblock / self._nvec[slc][:, None]
            ZNX += np.dot(Zblock.T, bx)
        ZNX += ZNXr
        return ZNX.squeeze() if len(ZNX) > 1 else float(ZNX) 
Example #18
Source File: linalg.py    From sporco with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def cho_solve_ATAI(A, rho, b, c, lwr, check_finite=True):
    r"""Solve the linear system :math:`(A^T A + \rho I)\mathbf{x} = \mathbf{b}`
    or :math:`(A^T A + \rho I)X = B` using :func:`scipy.linalg.cho_solve`.

    Parameters
    ----------
    A : array_like
      Matrix :math:`A`
    rho : float
      Scalar :math:`\rho`
    b : array_like
      Vector :math:`\mathbf{b}` or matrix :math:`B`
    c : array_like
      Matrix containing lower or upper triangular Cholesky factor,
      as returned by :func:`scipy.linalg.cho_factor`
    lwr : bool
      Flag indicating whether the factor is lower or upper triangular

    Returns
    -------
    x : ndarray
      Solution to the linear system
    """

    N, M = A.shape
    if N >= M:
        x = linalg.cho_solve((c, lwr), b, check_finite=check_finite)
    else:
        x = (b - A.T.dot(linalg.cho_solve((c, lwr), A.dot(b),
                                          check_finite=check_finite))) / rho
    return x 
Example #19
Source File: linalg.py    From sporco with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def cho_solve_AATI(A, rho, b, c, lwr, check_finite=True):
    r"""Solve the linear system :math:`(A A^T + \rho I)\mathbf{x} = \mathbf{b}`
    or :math:`(A A^T + \rho I)X = B` using :func:`scipy.linalg.cho_solve`.

    Parameters
    ----------
    A : array_like
      Matrix :math:`A`
    rho : float
      Scalar :math:`\rho`
    b : array_like
      Vector :math:`\mathbf{b}` or matrix :math:`B`
    c : array_like
      Matrix containing lower or upper triangular Cholesky factor,
      as returned by :func:`scipy.linalg.cho_factor`
    lwr : bool
      Flag indicating whether the factor is lower or upper triangular

    Returns
    -------
    x : ndarray
      Solution to the linear system
    """

    N, M = A.shape
    if N >= M:
        x = (b - linalg.cho_solve((c, lwr), b.dot(A).T,
                                  check_finite=check_finite).T.dot(A.T)) / rho
    else:
        x = linalg.cho_solve((c, lwr), b.T, check_finite=check_finite).T
    return x 
Example #20
Source File: linalg.py    From compas with MIT License 5 votes vote down vote up
def _chofactor(A):
    r"""Returns the Cholesky factorisation/decomposition matrix.

    Parameters
    ----------
    A : array
        Matrix A represented as an (m x m) array.

    Returns
    -------
    array
        Matrix (m x m) with upper/lower triangle containing Cholesky factor of A.

    Notes
    -----
    The Cholesky factorisation decomposes a Hermitian positive-definite matrix
    into the product of a lower/upper triangular matrix and its transpose.

    .. math::

        \mathbf{A} = \mathbf{L} \mathbf{L}^{\mathrm{T}}

    Examples
    --------
    >>> _chofactor(array([[25, 15, -5], [15, 18, 0], [-5, 0, 11]]))
    (array([[ 5.,  3., -1.],
           [15.,  3.,  1.],
           [-5.,  0.,  3.]]), False)

    """
    return cho_factor(A) 
Example #21
Source File: sgcrf.py    From sgcrfpy with MIT License 5 votes vote down vote up
def check_pd(A, lower=True):
    """
    Checks if A is PD.
    If so returns True and Cholesky decomposition,
    otherwise returns False and None
    """
    try:
        return True, np.tril(cho_factor(A, lower=lower)[0])
    except LinAlgError as err:
        if 'not positive definite' in str(err):
            return False, None 
Example #22
Source File: test_white_signals.py    From enterprise with MIT License 5 votes vote down vote up
def logdet(self):
        Sigma = np.diag(1 / self.J) + np.dot(self.U.T, self.U / self.N[:, None])
        cf = sl.cho_factor(Sigma)
        ld = np.sum(np.log(self.N)) + np.sum(np.log(self.J))
        ld += np.sum(2 * np.log(np.diag(cf[0])))
        return ld 
Example #23
Source File: test_white_signals.py    From enterprise with MIT License 5 votes vote down vote up
def solve(self, other):
        if other.ndim == 1:
            Nx = np.array(other / self.N)
        elif other.ndim == 2:
            Nx = np.array(other / self.N[:, None])
        UNx = np.dot(self.U.T, Nx)

        Sigma = np.diag(1 / self.J) + np.dot(self.U.T, self.U / self.N[:, None])
        cf = sl.cho_factor(Sigma)
        if UNx.ndim == 1:
            tmp = np.dot(self.U, sl.cho_solve(cf, UNx)) / self.N
        else:
            tmp = np.dot(self.U, sl.cho_solve(cf, UNx)) / self.N[:, None]
        return Nx - tmp 
Example #24
Source File: beamforming.py    From pyroomacoustics with MIT License 5 votes vote down vote up
def rake_mvdr_filters(self, source, interferer, R_n, delay=0.03, epsilon=5e-3):
        """
        Compute the time-domain filters of the minimum variance distortionless
        response beamformer.
        """

        H = build_rir_matrix(
            self.R,
            (source, interferer),
            self.Lg,
            self.fs,
            epsilon=epsilon,
            unit_damping=True,
        )
        L = H.shape[1] // 2

        # the constraint vector
        kappa = int(delay * self.fs)
        h = H[:, kappa]

        # We first assume the sample are uncorrelated
        R_xx = np.dot(H[:, :L], H[:, :L].T)
        K_nq = np.dot(H[:, L:], H[:, L:].T) + R_n

        # Compute the TD filters
        C = la.cho_factor(R_xx + K_nq, check_finite=False)
        g_val = la.cho_solve(C, h)

        g_val /= np.inner(h, g_val)
        self.filters = g_val.reshape((self.M, self.Lg))

        # compute and return SNR
        num = np.inner(g_val.T, np.dot(R_xx, g_val))
        denom = np.inner(np.dot(g_val.T, K_nq), g_val)

        return num / denom 
Example #25
Source File: mcmc.py    From deep_gp_random_features with Apache License 2.0 5 votes vote down vote up
def log_p_Y_given_F1(Y, F1, log_theta):

    log_theta_sigma2, log_theta_lengthscale, log_theta_lambda = unpack_log_theta(log_theta)

    n = Y.shape[0]

    K_Y = covariance_function(F1, F1, log_theta_sigma2[1], log_theta_lengthscale[1]) + np.eye(n) * np.exp(log_theta_lambda)
    L_K_Y, lower_K_Y = linalg.cho_factor(K_Y, lower=True)

    nu = linalg.solve_triangular(L_K_Y, Y, lower=True)

    return -np.sum(np.log(np.diagonal(L_K_Y))) - 0.5 * np.dot(nu.transpose(), nu)


## Elliptical Slice Sampling to sample from the posterior over latent variables at layer 1 (the latent variables at layer 2 are integrated out analytically) 
Example #26
Source File: infinitephasescreen.py    From aotools with GNU Lesser General Public License v3.0 5 votes vote down vote up
def makeAMatrix(self):
        """
        Calculates the "A" matrix, that uses the existing data to find a new 
        component of the new phase vector.
        """
        # Cholsky solve can fail - if so do brute force inversion
        try:
            cf = linalg.cho_factor(self.cov_mat_zz)
            inv_cov_zz = linalg.cho_solve(cf, numpy.identity(self.cov_mat_zz.shape[0]))
        except linalg.LinAlgError:
            raise linalg.LinAlgError("Could not invert Covariance Matrix to for A and B Matrices. Try with a larger pixel scale")

        self.A_mat = self.cov_mat_xz.dot(inv_cov_zz) 
Example #27
Source File: signal_base.py    From enterprise with MIT License 5 votes vote down vote up
def __init__(self, x):
            if sps.issparse(x):
                x = x.toarray()
            self.cf = sl.cho_factor(x) 
Example #28
Source File: linalg_covmat.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def mvn_nloglike_obs(x, sigma):
    '''loglike multivariate normal

    assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)

    brute force from formula
    no checking of correct inputs
    use of inv and log-det should be replace with something more efficient
    '''
    #see numpy thread
    #Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1)

    #Still wasteful to calculate pinv first
    sigmainv = linalg.inv(sigma)
    cholsigmainv = linalg.cholesky(sigmainv)
    #2 * np.sum(np.log(np.diagonal(np.linalg.cholesky(A)))) #Dag mailinglist
    # logdet not needed ???
    #logdetsigma = 2 * np.sum(np.log(np.diagonal(cholsigmainv)))
    x_whitened = np.dot(cholsigmainv, x)

    #sigmainv = linalg.cholesky(sigma)
    logdetsigma = np.log(np.linalg.det(sigma))

    sigma2 = 1. # error variance is included in sigma

    llike  =  0.5 * (np.log(sigma2) - 2.* np.log(np.diagonal(cholsigmainv))
                          + (x_whitened**2)/sigma2
                          +  np.log(2*np.pi))

    return llike, (x_whitened**2) 
Example #29
Source File: infinite_atmospheric_layer.py    From hcipy with MIT License 5 votes vote down vote up
def _make_AB_matrices(self):
		# Vertical
		n = self.num_stencils_vertical
		cov_zz = self.cov_matrix_vertical[:n,:n]
		cov_xz = self.cov_matrix_vertical[n:, :n]
		cov_zx = self.cov_matrix_vertical[:n, n:]
		cov_xx = self.cov_matrix_vertical[n:, n:]

		cf = linalg.cho_factor(cov_zz)
		inv_cov_zz = linalg.cho_solve(cf, np.eye(cov_zz.shape[0]))

		self.A_vertical = cov_xz.dot(inv_cov_zz)

		BBt = cov_xx - self.A_vertical.dot(cov_zx)

		U, S, Vt = np.linalg.svd(BBt)
		L = np.sqrt(S[:self.input_grid.dims[0]])

		self.B_vertical = U * L

		# Horizontal
		n = self.num_stencils_horizontal
		cov_zz = self.cov_matrix_horizontal[:n,:n]
		cov_xz = self.cov_matrix_horizontal[n:, :n]
		cov_zx = self.cov_matrix_horizontal[:n, n:]
		cov_xx = self.cov_matrix_horizontal[n:, n:]

		cf = linalg.cho_factor(cov_zz)
		inv_cov_zz = linalg.cho_solve(cf, np.eye(cov_zz.shape[0]))

		self.A_horizontal = cov_xz.dot(inv_cov_zz)

		BBt = cov_xx - self.A_horizontal.dot(cov_zx)

		U, S, Vt = np.linalg.svd(BBt)
		L = np.sqrt(S[:self.input_grid.dims[1]])

		self.B_horizontal = U * L 
Example #30
Source File: kde.py    From Splunking-Crime with GNU Affero General Public License v3.0 4 votes vote down vote up
def integrate_kde(self, other):
        """
        Computes the integral of the product of this  kernel density estimate
        with another.

        Parameters
        ----------
        other : gaussian_kde instance
            The other kde.

        Returns
        -------
        value : scalar
            The result of the integral.

        Raises
        ------
        ValueError
            If the KDEs have different dimensionality.

        """
        if other.d != self.d:
            raise ValueError("KDEs are not the same dimensionality")

        # we want to iterate over the smallest number of points
        if other.n < self.n:
            small = other
            large = self
        else:
            small = self
            large = other

        sum_cov = small.covariance + large.covariance
        sum_cov_chol = linalg.cho_factor(sum_cov)
        result = 0.0
        for i in range(small.n):
            mean = small.dataset[:, i, newaxis]
            diff = large.dataset - mean
            tdiff = linalg.cho_solve(sum_cov_chol, diff)

            energies = sum(diff * tdiff, axis=0) / 2.0
            result += sum(exp(-energies), axis=0)

        sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
        norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det

        result /= norm_const * large.n * small.n

        return result