Python scipy.linalg.solve() Examples

The following are 30 code examples of scipy.linalg.solve(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.linalg , or try the search function .
Example #1
Source File: utils.py    From vnpy_crypto with MIT License 7 votes vote down vote up
def log_multivariate_normal_density(X, means, covars, min_covar=1.e-7):
    """Log probability for full covariance matrices. """
    if hasattr(linalg, 'solve_triangular'):
        # only in scipy since 0.9
        solve_triangular = linalg.solve_triangular
    else:
        # slower, but works
        solve_triangular = linalg.solve
    n_samples, n_dim = X.shape
    nmix = len(means)
    log_prob = np.empty((n_samples, nmix))
    for c, (mu, cv) in enumerate(zip(means, covars)):
        try:
            cv_chol = linalg.cholesky(cv, lower=True)
        except linalg.LinAlgError:
            # The model is most probabily stuck in a component with too
            # few observations, we need to reinitialize this components
            cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
                                      lower=True)
        cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
        cv_sol = solve_triangular(cv_chol, (X - mu).T, lower=True).T
        log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) + \
                                     n_dim * np.log(2 * np.pi) + cv_log_det)

    return log_prob 
Example #2
Source File: nonlin.py    From Computable with MIT License 6 votes vote down vote up
def solve(self, f, tol=0):
        dx = -self.alpha*f

        n = len(self.dx)
        if n == 0:
            return dx

        df_f = np.empty(n, dtype=f.dtype)
        for k in xrange(n):
            df_f[k] = vdot(self.df[k], f)

        try:
            gamma = solve(self.a, df_f)
        except LinAlgError:
            # singular; reset the Jacobian approximation
            del self.dx[:]
            del self.df[:]
            return dx

        for m in xrange(n):
            dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
        return dx 
Example #3
Source File: ridge.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def _solve_cholesky(X, y, alpha):
    # w = inv(X^t X + alpha*Id) * X.T y
    n_samples, n_features = X.shape
    n_targets = y.shape[1]

    A = safe_sparse_dot(X.T, X, dense_output=True)
    Xy = safe_sparse_dot(X.T, y, dense_output=True)

    one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])

    if one_alpha:
        A.flat[::n_features + 1] += alpha[0]
        return linalg.solve(A, Xy, sym_pos=True,
                            overwrite_a=True).T
    else:
        coefs = np.empty([n_targets, n_features], dtype=X.dtype)
        for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
            A.flat[::n_features + 1] += current_alpha
            coef[:] = linalg.solve(A, target, sym_pos=True,
                                   overwrite_a=False).ravel()
            A.flat[::n_features + 1] -= current_alpha
        return coefs 
Example #4
Source File: decompose_kavan.py    From skinning_decomposition_kavan with MIT License 6 votes vote down vote up
def optimize_restpose(Tr, W, C):
    num_bones, num_verts = W.shape
    # reshape Tr according to section 4.3 into 
    # a matrix of d * P of 4-vectors
    Tr1 = Tr.reshape((-1, num_bones, 4))
    new_verts0 = []
    for j in xrange(num_verts):
        # multiply with weights and sum over num_bones axis
        Lambda = (Tr1 * W[np.newaxis,:,j,np.newaxis]).sum(axis=1)
        # convert to system matrix - last column must be one, 
        # so subtract from rhs
        gamma = C[:,j] - Lambda[:,3]
        Lambda = Lambda[:,:3]
        v = linalg.solve(np.dot(Lambda.T, Lambda), 
                         np.dot(Lambda.T, gamma))
        new_verts0.append(v)
    return np.array(new_verts0) 
Example #5
Source File: nonlin.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def solve(self, f, tol=0):
        dx = -self.alpha*f

        n = len(self.dx)
        if n == 0:
            return dx

        df_f = np.empty(n, dtype=f.dtype)
        for k in xrange(n):
            df_f[k] = vdot(self.df[k], f)

        try:
            gamma = solve(self.a, df_f)
        except LinAlgError:
            # singular; reset the Jacobian approximation
            del self.dx[:]
            del self.df[:]
            return dx

        for m in xrange(n):
            dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
        return dx 
Example #6
Source File: nonlin.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def matvec(self, f):
        dx = -f/self.alpha

        n = len(self.dx)
        if n == 0:
            return dx

        df_f = np.empty(n, dtype=f.dtype)
        for k in xrange(n):
            df_f[k] = vdot(self.df[k], f)

        b = np.empty((n, n), dtype=f.dtype)
        for i in xrange(n):
            for j in xrange(n):
                b[i,j] = vdot(self.df[i], self.dx[j])
                if i == j and self.w0 != 0:
                    b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
        gamma = solve(b, df_f)

        for m in xrange(n):
            dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
        return dx 
Example #7
Source File: test_Calculus.py    From PySpice with GNU General Public License v3.0 6 votes vote down vote up
def compute_finite_difference_coefficients(derivative_order, grid_size):

    # from http://www.scientificpython.net/pyblog/uniform-finite-differences-all-orders

    n = 2*grid_size -1
    A = np.tile(np.arange(grid_size), (n,1)).T
    B = np.tile(np.arange(1-grid_size,grid_size), (grid_size,1))
    M = (B**A)/gamma(A+1)

    r = np.zeros(grid_size)
    r[derivative_order] = 1

    D = np.zeros((grid_size, grid_size))
    for k in range(grid_size):
        indexes = k + np.arange(grid_size)
        D[:,k] = solve(M[:,indexes], r)

    return D

#################################################################################################### 
Example #8
Source File: nonlin.py    From lambda-packs with MIT License 6 votes vote down vote up
def solve(self, f, tol=0):
        dx = -self.alpha*f

        n = len(self.dx)
        if n == 0:
            return dx

        df_f = np.empty(n, dtype=f.dtype)
        for k in xrange(n):
            df_f[k] = vdot(self.df[k], f)

        try:
            gamma = solve(self.a, df_f)
        except LinAlgError:
            # singular; reset the Jacobian approximation
            del self.dx[:]
            del self.df[:]
            return dx

        for m in xrange(n):
            dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
        return dx 
Example #9
Source File: tools_fri_doa_plane.py    From FRIDA with MIT License 6 votes vote down vote up
def compute_mtx_obj(GtG_lst, Tbeta_lst, Rc0, num_bands, K):
    """
    compute the matrix (M) in the objective function:
        min   c^H M c
        s.t.  c0^H c = 1

    :param GtG_lst: list of G^H * G
    :param Tbeta_lst: list of Teoplitz matrices for beta-s
    :param Rc0: right dual matrix for the annihilating filter (same of each block -> not a list)
    :return:
    """
    mtx = np.zeros((K + 1, K + 1), dtype=float)  # <= assume G, Tbeta and Rc0 are real-valued
    for loop in range(num_bands):
        Tbeta_loop = Tbeta_lst[loop]
        GtG_loop = GtG_lst[loop]
        mtx += np.dot(Tbeta_loop.T,
                      linalg.solve(np.dot(Rc0, linalg.solve(GtG_loop, Rc0.T)),
                                   Tbeta_loop)
                      )
    return mtx 
Example #10
Source File: test_solve_toeplitz.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_solve_equivalence():
    # For toeplitz matrices, solve_toeplitz() should be equivalent to solve().
    random = np.random.RandomState(1234)
    for n in (1, 2, 3, 10):
        c = random.randn(n)
        if random.rand() < 0.5:
            c = c + 1j * random.randn(n)
        r = random.randn(n)
        if random.rand() < 0.5:
            r = r + 1j * random.randn(n)
        y = random.randn(n)
        if random.rand() < 0.5:
            y = y + 1j * random.randn(n)

        # Check equivalence when both the column and row are provided.
        actual = solve_toeplitz((c,r), y)
        desired = solve(toeplitz(c, r=r), y)
        assert_allclose(actual, desired)

        # Check equivalence when the column is provided but not the row.
        actual = solve_toeplitz(c, b=y)
        desired = solve(toeplitz(c), y)
        assert_allclose(actual, desired) 
Example #11
Source File: common_slow.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def mkk2ab(mk, k):
    """
    Transforms MK and M TD matrices into A and B matrices.
    Args:
        mk (numpy.ndarray): TD MK-matrix;
        k (numpy.ndarray): TD K-matrix;

    Returns:
        A and B submatrices.
    """
    if numpy.iscomplexobj(mk) or numpy.iscomplexobj(k):
        raise ValueError("MK- and/or K-matrixes are complex-valued: no transform is possible")
    m = solve(k.T, mk.T).T
    a = 0.5 * (m + k)
    b = 0.5 * (m - k)
    return a, b 
Example #12
Source File: rbf.py    From pwtools with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fit(self):
        """Solve linear system for the weights.

        The weights  `self.w` (:math:`\mathbf w`) are found from: :math:`\mathbf
        G\,\mathbf w = \mathbf z` or if :math:`r` is given :math:`(\mathbf G +
        r\,\mathbf I)\,\mathbf w = \mathbf z`.

        with centers == points (center vectors are all data points). Then G is
        quadratic. Updates ``self.w``.

        Notes
        -----
        ``self.r != None`` : linear system solver
            Use :func:`scipy.linalg.solve`. For :math:`r=0`, this always yields
            perfect interpolation at the data points. May be numerically
            unstable in that case. Use :math:`r>0` to increase stability (try
            small values such as ``1e-10`` first) or create smooth fitting (generate
            more stiff functions with higher `r`). Behaves similar to ``lstsq``
            but appears to be numerically more stable (no small noise in
            solution) .. but `r` it is another parameter that needs to be
            tuned.
        ``self.r = None`` : least squares solver
            Use :func:`scipy.linalg.lstsq`. Numerically more stable than
            direct solver w/o regularization. Will mostly be the same as
            the interpolation result, but will not go thru all points for
            very noisy data. May create small noise in solution (plot fit
            with high point density).
        """
        # this test may be expensive for big data sets
        assert (self.centers == self.points).all(), "centers == points not fulfilled"
        # re-use self.distsq if possible
        if self.distsq is None:
            self.distsq = self.get_distsq()
        G = self.rbf(self.distsq, self.p)
        if self.r is None:
            self.w, res, rnk, svs = linalg.lstsq(G, self.values)
        else:
            self.w = linalg.solve(G + np.identity(G.shape[0])*self.r, self.values) 
Example #13
Source File: nonlin.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def solve(self, f, tol=0):
        return -f*self.alpha 
Example #14
Source File: test_solve_toeplitz.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_multiple_rhs():
    random = np.random.RandomState(1234)
    c = random.randn(4)
    r = random.randn(4)
    for offset in [0, 1j]:
        for yshape in ((4,), (4, 3), (4, 3, 2)):
            y = random.randn(*yshape) + offset
            actual = solve_toeplitz((c,r), b=y)
            desired = solve(toeplitz(c, r=r), y)
            assert_equal(actual.shape, yshape)
            assert_equal(desired.shape, yshape)
            assert_allclose(actual, desired) 
Example #15
Source File: amflss.py    From QuantEcon.lectures.code with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def additive_decomp(self):
        """
        Return values for the martingale decomposition 
            - ν         : unconditional mean difference in Y
            - H         : coefficient for the (linear) martingale component (κ_a)
            - g         : coefficient for the stationary component g(x)
            - Y_0       : it should be the function of X_0 (for now set it to 0.0)
        """
        I = np.identity(self.nx)
        A_res = la.solve(I - self.A, I)
        g = self.D @ A_res
        H = self.F + self.D @ A_res @ self.B

        return self.ν, H, g 
Example #16
Source File: nonlin.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def rsolve(self, v, tol=0):
        """Evaluate w = M^-H v"""
        if self.collapsed is not None:
            return solve(self.collapsed.T.conj(), v)
        return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs) 
Example #17
Source File: nonlin.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def __init__(self, jacobian):
        self.jacobian = jacobian
        self.matvec = jacobian.solve
        self.update = jacobian.update
        if hasattr(jacobian, 'setup'):
            self.setup = jacobian.setup
        if hasattr(jacobian, 'rsolve'):
            self.rmatvec = jacobian.rsolve 
Example #18
Source File: nonlin.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def _solve(v, alpha, cs, ds):
        """Evaluate w = M^-1 v"""
        if len(cs) == 0:
            return v/alpha

        # (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1

        axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])

        c0 = cs[0]
        A = alpha * np.identity(len(cs), dtype=c0.dtype)
        for i, d in enumerate(ds):
            for j, c in enumerate(cs):
                A[i,j] += dotc(d, c)

        q = np.zeros(len(cs), dtype=c0.dtype)
        for j, d in enumerate(ds):
            q[j] = dotc(d, v)
        q /= alpha
        q = solve(A, q)

        w = v/alpha
        for c, qc in zip(cs, q):
            w = axpy(c, w, w.size, -qc)

        return w 
Example #19
Source File: nonlin.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def solve(self, v, tol=0):
        """Evaluate w = M^-1 v"""
        if self.collapsed is not None:
            return solve(self.collapsed, v)
        return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds) 
Example #20
Source File: nonlin.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def solve(self, rhs, tol=0):
        if 'tol' in self.method_kw:
            sol, info = self.method(self.op, rhs, **self.method_kw)
        else:
            sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw)
        return sol 
Example #21
Source File: test_solve_toeplitz.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_native_list_arguments():
    c = [1,2,4,7]
    r = [1,3,9,12]
    y = [5,1,4,2]
    actual = solve_toeplitz((c,r), y)
    desired = solve(toeplitz(c, r=r), y)
    assert_allclose(actual, desired) 
Example #22
Source File: kernel_pca.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def _fit_inverse_transform(self, X_transformed, X):
        if hasattr(X, "tocsr"):
            raise NotImplementedError("Inverse transform not implemented for "
                                      "sparse matrices!")

        n_samples = X_transformed.shape[0]
        K = self._get_kernel(X_transformed)
        K.flat[::n_samples + 1] += self.alpha
        self.dual_coef_ = linalg.solve(K, X, sym_pos=True, overwrite_a=True)
        self.X_transformed_fit_ = X_transformed 
Example #23
Source File: test_base.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_linear_regression_sample_weights():
    # TODO: loop over sparse data as well

    rng = np.random.RandomState(0)

    # It would not work with under-determined systems
    for n_samples, n_features in ((6, 5), ):

        y = rng.randn(n_samples)
        X = rng.randn(n_samples, n_features)
        sample_weight = 1.0 + rng.rand(n_samples)

        for intercept in (True, False):

            # LinearRegression with explicit sample_weight
            reg = LinearRegression(fit_intercept=intercept)
            reg.fit(X, y, sample_weight=sample_weight)
            coefs1 = reg.coef_
            inter1 = reg.intercept_

            assert_equal(reg.coef_.shape, (X.shape[1], ))  # sanity checks
            assert_greater(reg.score(X, y), 0.5)

            # Closed form of the weighted least square
            # theta = (X^T W X)^(-1) * X^T W y
            W = np.diag(sample_weight)
            if intercept is False:
                X_aug = X
            else:
                dummy_column = np.ones(shape=(n_samples, 1))
                X_aug = np.concatenate((dummy_column, X), axis=1)

            coefs2 = linalg.solve(X_aug.T.dot(W).dot(X_aug),
                                  X_aug.T.dot(W).dot(y))

            if intercept is False:
                assert_array_almost_equal(coefs1, coefs2)
            else:
                assert_array_almost_equal(coefs1, coefs2[1:])
                assert_almost_equal(inter1, coefs2[0]) 
Example #24
Source File: gpc.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def predict_proba(self, X):
        """Return probability estimates for the test vector X.

        Parameters
        ----------
        X : array-like, shape = (n_samples, n_features)

        Returns
        -------
        C : array-like, shape = (n_samples, n_classes)
            Returns the probability of the samples for each class in
            the model. The columns correspond to the classes in sorted
            order, as they appear in the attribute ``classes_``.
        """
        check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])

        # Based on Algorithm 3.2 of GPML
        K_star = self.kernel_(self.X_train_, X)  # K_star =k(x_star)
        f_star = K_star.T.dot(self.y_train_ - self.pi_)  # Line 4
        v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star)  # Line 5
        # Line 6 (compute np.diag(v.T.dot(v)) via einsum)
        var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v)

        # Line 7:
        # Approximate \int log(z) * N(z | f_star, var_f_star)
        # Approximation is due to Williams & Barber, "Bayesian Classification
        # with Gaussian Processes", Appendix A: Approximate the logistic
        # sigmoid by a linear combination of 5 error functions.
        # For information on how this integral can be computed see
        # blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html
        alpha = 1 / (2 * var_f_star)
        gamma = LAMBDAS * f_star
        integrals = np.sqrt(np.pi / alpha) \
            * erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2))) \
            / (2 * np.sqrt(var_f_star * 2 * np.pi))
        pi_star = (COEFS * integrals).sum(axis=0) + .5 * COEFS.sum()

        return np.vstack((1 - pi_star, pi_star)).T 
Example #25
Source File: bench_basic.py    From Computable with MIT License 5 votes vote down vote up
def bench_random(self):
        numpy_solve = nl.solve
        scipy_solve = sl.solve
        print()
        print('      Solving system of linear equations')
        print('      ==================================')

        print('      |    contiguous     |   non-contiguous ')
        print('----------------------------------------------')
        print(' size |  scipy  | numpy   |  scipy  | numpy ')

        for size,repeat in [(20,1000),(100,150),(500,2),(1000,1)][:-1]:
            repeat *= 2
            print('%5s' % size, end=' ')
            sys.stdout.flush()

            a = random([size,size])
            # larger diagonal ensures non-singularity:
            for i in range(size):
                a[i,i] = 10*(.1+a[i,i])
            b = random([size])

            print('| %6.2f ' % measure('scipy_solve(a,b)',repeat), end=' ')
            sys.stdout.flush()

            print('| %6.2f ' % measure('numpy_solve(a,b)',repeat), end=' ')
            sys.stdout.flush()

            a = a[-1::-1,-1::-1]  # turn into a non-contiguous array
            assert_(not a.flags['CONTIGUOUS'])

            print('| %6.2f ' % measure('scipy_solve(a,b)',repeat), end=' ')
            sys.stdout.flush()

            print('| %6.2f ' % measure('numpy_solve(a,b)',repeat), end=' ')
            sys.stdout.flush()

            print('   (secs for %s calls)' % (repeat)) 
Example #26
Source File: nonlin.py    From Computable with MIT License 5 votes vote down vote up
def solve(self, rhs, tol=0):
        if 'tol' in self.method_kw:
            sol, info = self.method(self.op, rhs, **self.method_kw)
        else:
            sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw)
        return sol 
Example #27
Source File: nonlin.py    From Computable with MIT License 5 votes vote down vote up
def solve(self, f, tol=0):
        return -f*self.beta 
Example #28
Source File: nonlin.py    From Computable with MIT License 5 votes vote down vote up
def solve(self, f, tol=0):
        return -f*self.alpha 
Example #29
Source File: nonlin.py    From Computable with MIT License 5 votes vote down vote up
def solve(self, f, tol=0):
        return -f / self.d 
Example #30
Source File: compare_mcmc_samplers_stochvol.py    From particles with MIT License 5 votes vote down vote up
def h(self, x, y):
        gy = self.grad_log_lik(y)
        lhs = x - self.tod * np.matmul(self.A, y + (0.25 * self.delta) * gy)
        rhs = linalg.solve(self.tod * self.A + np.eye(self.T), gy)
        # TODO pre-computing the inverse here may speed things up
        return np.dot(lhs, rhs)