Python scipy.linalg.pinvh() Examples

The following are 20 code examples of scipy.linalg.pinvh(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.linalg , or try the search function .
Example #1
Source File: empirical_covariance_.py    From twitter-stock-recommendation with MIT License 6 votes vote down vote up
def _set_covariance(self, covariance):
        """Saves the covariance and precision estimates

        Storage is done accordingly to `self.store_precision`.
        Precision stored only if invertible.

        Parameters
        ----------
        covariance : 2D ndarray, shape (n_features, n_features)
            Estimated covariance matrix to be stored, and from which precision
            is computed.

        """
        covariance = check_array(covariance)
        # set covariance
        self.covariance_ = covariance
        # set precision
        if self.store_precision:
            self.precision_ = linalg.pinvh(covariance)
        else:
            self.precision_ = None 
Example #2
Source File: finemap.py    From focus with GNU General Public License v3.0 6 votes vote down vote up
def assoc_test(weights, gwas, ldmat, heterogeneity=False):
    """
    TWAS association test.

    :param weights: numpy.ndarray of eQTL weights
    :param gwas: pyfocus.GWAS object
    :param ldmat: numpy.ndarray LD matrix
    :param heterogeneity:  bool estimate variance from multiplicative random effect

    :return: tuple (beta, se)
    """

    p = ldmat.shape[0]
    assoc = np.dot(weights, gwas.Z)
    if heterogeneity:
        resid = assoc - gwas.Z
        resid_var = mdot([resid, lin.pinvh(ldmat), resid]) / p
    else:
        resid_var = 1

    se = np.sqrt(resid_var * mdot([weights, ldmat, weights]))

    return assoc, se 
Example #3
Source File: empirical_covariance_.py    From Splunking-Crime with GNU Affero General Public License v3.0 6 votes vote down vote up
def _set_covariance(self, covariance):
        """Saves the covariance and precision estimates

        Storage is done accordingly to `self.store_precision`.
        Precision stored only if invertible.

        Parameters
        ----------
        covariance : 2D ndarray, shape (n_features, n_features)
            Estimated covariance matrix to be stored, and from which precision
            is computed.

        """
        covariance = check_array(covariance)
        # set covariance
        self.covariance_ = covariance
        # set precision
        if self.store_precision:
            self.precision_ = linalg.pinvh(covariance)
        else:
            self.precision_ = None 
Example #4
Source File: empirical_covariance_.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def _set_covariance(self, covariance):
        """Saves the covariance and precision estimates

        Storage is done accordingly to `self.store_precision`.
        Precision stored only if invertible.

        Parameters
        ----------
        covariance : 2D ndarray, shape (n_features, n_features)
            Estimated covariance matrix to be stored, and from which precision
            is computed.

        """
        covariance = check_array(covariance)
        # set covariance
        self.covariance_ = covariance
        # set precision
        if self.store_precision:
            self.precision_ = linalg.pinvh(covariance)
        else:
            self.precision_ = None 
Example #5
Source File: extmath.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def pinvh(a, cond=None, rcond=None, lower=True):
    return linalg.pinvh(a, cond, rcond, lower) 
Example #6
Source File: dpgmm.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def _get_covars(self):
        return [pinvh(c) for c in self._get_precisions()] 
Example #7
Source File: extmath.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def pinvh(a, cond=None, rcond=None, lower=True):
    return linalg.pinvh(a, cond, rcond, lower) 
Example #8
Source File: empirical_covariance_.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def get_precision(self):
        """Getter for the precision matrix.

        Returns
        -------
        precision_ : array-like,
            The precision matrix associated to the current covariance object.

        """
        if self.store_precision:
            precision = self.precision_
        else:
            precision = linalg.pinvh(self.covariance_)
        return precision 
Example #9
Source File: bounding.py    From dynesty with MIT License 5 votes vote down vote up
def __init__(self, ndim, cov=None):
        self.n = ndim

        if cov is None:
            cov = np.identity(self.n)
        self.cov = cov
        self.am = lalg.pinvh(self.cov)
        self.axes = lalg.sqrtm(self.cov)
        self.axes_inv = lalg.pinvh(self.axes)

        detsign, detln = linalg.slogdet(self.am)
        self.vol_cube = np.exp(self.n * np.log(2.) - 0.5 * detln)
        self.expand = 1. 
Example #10
Source File: bounding.py    From dynesty with MIT License 5 votes vote down vote up
def __init__(self, ndim, cov=None):
        self.n = ndim

        if cov is None:
            cov = np.identity(self.n)
        self.cov = cov
        self.am = lalg.pinvh(self.cov)
        self.axes = lalg.sqrtm(self.cov)
        self.axes_inv = lalg.pinvh(self.axes)

        detsign, detln = linalg.slogdet(self.am)
        self.vol_ball = np.exp(logvol_prefactor(self.n) - 0.5 * detln)
        self.expand = 1. 
Example #11
Source File: bounding.py    From dynesty with MIT License 5 votes vote down vote up
def __init__(self, ctr, cov):
        self.n = len(ctr)  # dimension
        self.ctr = np.array(ctr)  # center coordinates
        self.cov = np.array(cov)  # covariance matrix
        self.am = lalg.pinvh(cov)  # precision matrix (inverse of covariance)
        self.axes = lalg.cholesky(cov, lower=True)  # transformation axes

        # Volume of ellipsoid is the volume of an n-sphere divided
        # by the (determinant of the) Jacobian associated with the
        # transformation, which by definition is the precision matrix.
        detsign, detln = linalg.slogdet(self.am)
        self.vol = np.exp(logvol_prefactor(self.n) - 0.5 * detln)

        # The eigenvalues (l) of `a` are (a^-2, b^-2, ...) where
        # (a, b, ...) are the lengths of principle axes.
        # The eigenvectors (v) are the normalized principle axes.
        l, v = lalg.eigh(self.cov)
        if np.all((l > 0.) & (np.isfinite(l))):
            self.axlens = np.sqrt(l)
        else:
            raise ValueError("The input precision matrix defining the "
                             "ellipsoid {0} is apparently singular with "
                             "l={1} and v={2}.".format(self.cov, l, v))

        # Scaled eigenvectors are the principle axes, where `paxes[:,i]` is the
        # i-th axis. Multiplying this matrix by a vector will transform a
        # point in the unit n-sphere to a point in the ellipsoid.
        self.paxes = np.dot(v, np.diag(self.axlens))

        # Amount by which volume was increased after initialization (i.e.
        # cumulative factor from `scale_to_vol`).
        self.expand = 1. 
Example #12
Source File: dpgmm.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def _get_covars(self):
        return [pinvh(c) for c in self._get_precisions()] 
Example #13
Source File: empirical_covariance_.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def get_precision(self):
        """Getter for the precision matrix.

        Returns
        -------
        precision_ : array-like,
            The precision matrix associated to the current covariance object.

        """
        if self.store_precision:
            precision = self.precision_
        else:
            precision = linalg.pinvh(self.covariance_)
        return precision 
Example #14
Source File: finemap.py    From focus with GNU General Public License v3.0 5 votes vote down vote up
def get_resid(zscores, swld, wcor):
    """
    Regress out the average pleiotropic signal tagged by TWAS at the region

    :param zscores: numpy.ndarray TWAS zscores
    :param swld: numpy.ndarray intercept variable
    :param wcor: numpy.ndarray predicted expression correlation

    :return: tuple (residual TWAS zscores, intercept z-score)
    """
    m, m = wcor.shape
    m, p = swld.shape

    # create mean factor
    intercept = swld.dot(np.ones(p))

    # estimate under the null for variance components, i.e. V = SW LD SW
    wcor_inv, rank = lin.pinvh(wcor, return_rank=True)

    numer = mdot([intercept.T, wcor_inv, zscores])
    denom = mdot([intercept.T, wcor_inv, intercept])
    alpha = numer / denom
    resid = zscores - intercept * alpha

    s2 = mdot([resid, wcor_inv, resid]) / (rank - 1)
    inter_se = np.sqrt(s2 / denom)
    inter_z = alpha / inter_se

    return resid, inter_z 
Example #15
Source File: empirical_covariance_.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def get_precision(self):
        """Getter for the precision matrix.

        Returns
        -------
        precision_ : array-like
            The precision matrix associated to the current covariance object.

        """
        if self.store_precision:
            precision = self.precision_
        else:
            precision = linalg.pinvh(self.covariance_)
        return precision 
Example #16
Source File: robust_covariance.py    From Splunking-Crime with GNU Affero General Public License v3.0 4 votes vote down vote up
def fit(self, X, y=None):
        """Fits a Minimum Covariance Determinant with the FastMCD algorithm.

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Training data, where n_samples is the number of samples
            and n_features is the number of features.

        y : not used, present for API consistence purpose.

        Returns
        -------
        self : object
            Returns self.

        """
        X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
        random_state = check_random_state(self.random_state)
        n_samples, n_features = X.shape
        # check that the empirical covariance is full rank
        if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
            warnings.warn("The covariance matrix associated to your dataset "
                          "is not full rank")
        # compute and store raw estimates
        raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
            X, support_fraction=self.support_fraction,
            cov_computation_method=self._nonrobust_covariance,
            random_state=random_state)
        if self.assume_centered:
            raw_location = np.zeros(n_features)
            raw_covariance = self._nonrobust_covariance(X[raw_support],
                                                        assume_centered=True)
            # get precision matrix in an optimized way
            precision = linalg.pinvh(raw_covariance)
            raw_dist = np.sum(np.dot(X, precision) * X, 1)
        self.raw_location_ = raw_location
        self.raw_covariance_ = raw_covariance
        self.raw_support_ = raw_support
        self.location_ = raw_location
        self.support_ = raw_support
        self.dist_ = raw_dist
        # obtain consistency at normal models
        self.correct_covariance(X)
        # re-weight estimator
        self.reweight_covariance(X)

        return self 
Example #17
Source File: train.py    From focus with GNU General Public License v3.0 4 votes vote down vote up
def _train_gblup(y, Z, X, include_ses=False, p_threshold=0.01):
    log = logging.getLogger(pyfocus.LOG)

    try:
        from limix.qc import normalise_covariance
    except ImportError as ie:
        log.error("Training submodule requires limix>=2.0.0 and sklearn to be installed.")
        raise
    from numpy.linalg import multi_dot as mdot
    from scipy.linalg import pinvh

    log.debug("Initializing GBLUP model")

    attrs = dict()

    # estimate heritability using limix
    K_cis = np.dot(Z, Z.T)
    K_cis = normalise_covariance(K_cis)
    fe_var, s2u, s2e, logl, fixed_betas, pval = _fit_cis_herit(y, K_cis, X)
    yresid = y - np.dot(X, fixed_betas)

    if pval > p_threshold:
        log.info("h2g pvalue {} greater than threshold {}. Skipping".format(pval, p_threshold))
        return None

    attrs["h2g"] = s2u / (fe_var + s2u + s2e)
    attrs["h2g.logl"] = logl
    attrs["h2g.pvalue"] = pval

    # Total variance
    n, p = Z.shape

    # ridge solution (i.e. rrBLUP)
    # this will be slower than normal GBLUP when p > n but is a little bit more flexible
    ZtZpDinv = pinvh(np.dot(Z.T, Z) + np.eye(p) * (s2e / s2u))
    betas = mdot([ZtZpDinv, Z.T, yresid])

    if include_ses:
        # TODO: come back to this with matrix operations rather than list comprehensions
        # jack-knife standard-errors over the fast leave-one-out estimates using rrBLUP
        """
        h = np.array([mdot([Z[i], ZtZpDinv, Z[i]]) for i in range(n)])
        e = yresid - np.dot(Z, betas)
        beta_jk = [betas - np.dot(ZtZpDinv, Z[i] * e[i]) / (1 - h[i]) for i in range(n)]
        ses = np.sqrt(np.mean(beta_jk, axis=0) * (n - 1))
        """
        ses = None
    else:
        ses = None

    return betas, ses, attrs 
Example #18
Source File: test_bayes.py    From Mastering-Elasticsearch-7.0 with MIT License 4 votes vote down vote up
def test_bayesian_ridge_score_values():
    """Check value of score on toy example.

    Compute log marginal likelihood with equation (36) in Sparse Bayesian
    Learning and the Relevance Vector Machine (Tipping, 2001):

    - 0.5 * (log |Id/alpha + X.X^T/lambda| +
             y^T.(Id/alpha + X.X^T/lambda).y + n * log(2 * pi))
    + lambda_1 * log(lambda) - lambda_2 * lambda
    + alpha_1 * log(alpha) - alpha_2 * alpha

    and check equality with the score computed during training.
    """

    X, y = diabetes.data, diabetes.target
    n_samples = X.shape[0]
    # check with initial values of alpha and lambda (see code for the values)
    eps = np.finfo(np.float64).eps
    alpha_ = 1. / (np.var(y) + eps)
    lambda_ = 1.

    # value of the parameters of the Gamma hyperpriors
    alpha_1 = 0.1
    alpha_2 = 0.1
    lambda_1 = 0.1
    lambda_2 = 0.1

    # compute score using formula of docstring
    score = lambda_1 * log(lambda_) - lambda_2 * lambda_
    score += alpha_1 * log(alpha_) - alpha_2 * alpha_
    M = 1. / alpha_ * np.eye(n_samples) + 1. / lambda_ * np.dot(X, X.T)
    M_inv = pinvh(M)
    score += - 0.5 * (fast_logdet(M) + np.dot(y.T, np.dot(M_inv, y)) +
                      n_samples * log(2 * np.pi))

    # compute score with BayesianRidge
    clf = BayesianRidge(alpha_1=alpha_1, alpha_2=alpha_2,
                        lambda_1=lambda_1, lambda_2=lambda_2,
                        n_iter=1, fit_intercept=False, compute_score=True)
    clf.fit(X, y)

    assert_almost_equal(clf.scores_[0], score, decimal=9) 
Example #19
Source File: robust_covariance.py    From twitter-stock-recommendation with MIT License 4 votes vote down vote up
def fit(self, X, y=None):
        """Fits a Minimum Covariance Determinant with the FastMCD algorithm.

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Training data, where n_samples is the number of samples
            and n_features is the number of features.

        y : not used, present for API consistence purpose.

        Returns
        -------
        self : object
            Returns self.

        """
        X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
        random_state = check_random_state(self.random_state)
        n_samples, n_features = X.shape
        # check that the empirical covariance is full rank
        if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
            warnings.warn("The covariance matrix associated to your dataset "
                          "is not full rank")
        # compute and store raw estimates
        raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
            X, support_fraction=self.support_fraction,
            cov_computation_method=self._nonrobust_covariance,
            random_state=random_state)
        if self.assume_centered:
            raw_location = np.zeros(n_features)
            raw_covariance = self._nonrobust_covariance(X[raw_support],
                                                        assume_centered=True)
            # get precision matrix in an optimized way
            precision = linalg.pinvh(raw_covariance)
            raw_dist = np.sum(np.dot(X, precision) * X, 1)
        self.raw_location_ = raw_location
        self.raw_covariance_ = raw_covariance
        self.raw_support_ = raw_support
        self.location_ = raw_location
        self.support_ = raw_support
        self.dist_ = raw_dist
        # obtain consistency at normal models
        self.correct_covariance(X)
        # re-weight estimator
        self.reweight_covariance(X)

        return self 
Example #20
Source File: robust_covariance.py    From Mastering-Elasticsearch-7.0 with MIT License 4 votes vote down vote up
def fit(self, X, y=None):
        """Fits a Minimum Covariance Determinant with the FastMCD algorithm.

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Training data, where n_samples is the number of samples
            and n_features is the number of features.

        y
            not used, present for API consistence purpose.

        Returns
        -------
        self : object

        """
        X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
        random_state = check_random_state(self.random_state)
        n_samples, n_features = X.shape
        # check that the empirical covariance is full rank
        if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
            warnings.warn("The covariance matrix associated to your dataset "
                          "is not full rank")
        # compute and store raw estimates
        raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
            X, support_fraction=self.support_fraction,
            cov_computation_method=self._nonrobust_covariance,
            random_state=random_state)
        if self.assume_centered:
            raw_location = np.zeros(n_features)
            raw_covariance = self._nonrobust_covariance(X[raw_support],
                                                        assume_centered=True)
            # get precision matrix in an optimized way
            precision = linalg.pinvh(raw_covariance)
            raw_dist = np.sum(np.dot(X, precision) * X, 1)
        self.raw_location_ = raw_location
        self.raw_covariance_ = raw_covariance
        self.raw_support_ = raw_support
        self.location_ = raw_location
        self.support_ = raw_support
        self.dist_ = raw_dist
        # obtain consistency at normal models
        self.correct_covariance(X)
        # re-weight estimator
        self.reweight_covariance(X)

        return self