Python scipy.linalg.solve_triangular() Examples

The following are 30 code examples of scipy.linalg.solve_triangular(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.linalg , or try the search function .
Example #1
Source File: mmd_test.py    From opt-mmd with BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def linear_hotelling_test(X, Y, reg=0):
    n, p = X.shape
    Z = X - Y
    Z_bar = Z.mean(axis=0)

    Z -= Z_bar
    S = Z.T.dot(Z)
    S /= (n - 1)
    if reg:
        S[::p + 1] += reg
    # z' inv(S) z = z' inv(L L') z = z' inv(L)' inv(L) z = ||inv(L) z||^2
    L = linalg.cholesky(S, lower=True, overwrite_a=True)
    Linv_Z_bar = linalg.solve_triangular(L, Z_bar, lower=True, overwrite_b=True)
    stat = n * Linv_Z_bar.dot(Linv_Z_bar)

    p_val = stats.chi2.sf(stat, p)
    return p_val, stat 
Example #2
Source File: utils.py    From vnpy_crypto with MIT License 7 votes vote down vote up
def log_multivariate_normal_density(X, means, covars, min_covar=1.e-7):
    """Log probability for full covariance matrices. """
    if hasattr(linalg, 'solve_triangular'):
        # only in scipy since 0.9
        solve_triangular = linalg.solve_triangular
    else:
        # slower, but works
        solve_triangular = linalg.solve
    n_samples, n_dim = X.shape
    nmix = len(means)
    log_prob = np.empty((n_samples, nmix))
    for c, (mu, cv) in enumerate(zip(means, covars)):
        try:
            cv_chol = linalg.cholesky(cv, lower=True)
        except linalg.LinAlgError:
            # The model is most probabily stuck in a component with too
            # few observations, we need to reinitialize this components
            cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
                                      lower=True)
        cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
        cv_sol = solve_triangular(cv_chol, (X - mu).T, lower=True).T
        log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) + \
                                     n_dim * np.log(2 * np.pi) + cv_log_det)

    return log_prob 
Example #3
Source File: ols.py    From fastats with MIT License 7 votes vote down vote up
def ols_cholesky(A, b):
    """
    Ordinary Least-Squares Regression Coefficients
    Estimation.

    If (A.T @ A) @ x = A.T @ b and A is full rank
    then there exists an upper triangular matrix
    R such that:

    (R.T @ R) @ x = A.T @ b
    R.T @ w = A.T @ b
    R @ x = w

    Find R using Cholesky decomposition.
    """
    R = cholesky(A.T @ A)
    w = solve_triangular(R, A.T @ b, trans='T')
    return solve_triangular(R, w) 
Example #4
Source File: gmm.py    From bhmm with GNU Lesser General Public License v3.0 7 votes vote down vote up
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
    """Log probability for full covariance matrices.
    """
    n_samples, n_dim = X.shape
    nmix = len(means)
    log_prob = np.empty((n_samples, nmix))
    for c, (mu, cv) in enumerate(zip(means, covars)):
        try:
            cv_chol = linalg.cholesky(cv, lower=True)
        except linalg.LinAlgError:
            # The model is most probably stuck in a component with too
            # few observations, we need to reinitialize this components
            cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
                                      lower=True)
        cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
        cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
        log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
                                 n_dim * np.log(2 * np.pi) + cv_log_det)

    return log_prob 
Example #5
Source File: minpack.py    From Splunking-Crime with GNU Affero General Public License v3.0 6 votes vote down vote up
def _wrap_func(func, xdata, ydata, transform):
    if transform is None:
        def func_wrapped(params):
            return func(xdata, *params) - ydata
    elif transform.ndim == 1:
        def func_wrapped(params):
            return transform * (func(xdata, *params) - ydata)
    else:
        # Chisq = (y - yd)^T C^{-1} (y-yd)
        # transform = L such that C = L L^T
        # C^{-1} = L^{-T} L^{-1}
        # Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
        # Define (y-yd)' = L^{-1} (y-yd)
        # by solving
        # L (y-yd)' = (y-yd)
        # and minimize (y-yd)'^T (y-yd)'
        def func_wrapped(params):
            return solve_triangular(transform, func(xdata, *params) - ydata, lower=True)
    return func_wrapped 
Example #6
Source File: gpei_constrained_chooser.py    From Milano with Apache License 2.0 6 votes vote down vote up
def pred_constraint_voilation(self, cand, comp, vals):
        # The primary covariances for prediction.
        comp_cov   = self.cov(self.constraint_amp2, self.constraint_ls, comp)
        cand_cross = self.cov(self.constraint_amp2, self.constraint_ls, comp,
                              cand)

        # Compute the required Cholesky.
        obsv_cov  = comp_cov + self.constraint_noise*np.eye(comp.shape[0])
        obsv_chol = spla.cholesky(obsv_cov, lower=True)

        cov_grad_func = getattr(gp, 'grad_' + self.cov_func.__name__)
        cand_cross_grad = cov_grad_func(self.constraint_ls, comp, cand)

        # Predictive things.
        # Solve the linear systems.
        alpha  = spla.cho_solve((obsv_chol, True), self.ff)
        beta   = spla.solve_triangular(obsv_chol, cand_cross, lower=True)

        # Predict the marginal means and variances at candidates.
        func_m = np.dot(cand_cross.T, alpha)# + self.constraint_mean
        func_m = sps.norm.cdf(func_m*self.constraint_gain)

        return func_m

    # Compute EI over hyperparameter samples 
Example #7
Source File: cpu.py    From ggmm with MIT License 6 votes vote down vote up
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
    '''Log probability for full covariance matrices.
    '''
    n_samples, n_dimensions = X.shape
    nmix = len(means)
    log_prob = np.empty((n_samples, nmix))
    for c, (mu, cv) in enumerate(zip(means, covars)):
        try:
            cv_chol = linalg.cholesky(cv, lower=True)
        except linalg.LinAlgError:
            # The model is most probably stuck in a component with too
            # few observations, we need to reinitialize this components
            cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dimensions),
                                      lower=True)
        cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
        cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
        log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
                                 n_dimensions * np.log(2 * np.pi) + cv_log_det)

    return log_prob 
Example #8
Source File: minpack.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def _wrap_func(func, xdata, ydata, transform):
    if transform is None:
        def func_wrapped(params):
            return func(xdata, *params) - ydata
    elif transform.ndim == 1:
        def func_wrapped(params):
            return transform * (func(xdata, *params) - ydata)
    else:
        # Chisq = (y - yd)^T C^{-1} (y-yd)
        # transform = L such that C = L L^T
        # C^{-1} = L^{-T} L^{-1}
        # Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
        # Define (y-yd)' = L^{-1} (y-yd)
        # by solving
        # L (y-yd)' = (y-yd)
        # and minimize (y-yd)'^T (y-yd)'
        def func_wrapped(params):
            return solve_triangular(transform, func(xdata, *params) - ydata, lower=True)
    return func_wrapped 
Example #9
Source File: transitions.py    From recurrent-slds with MIT License 6 votes vote down vote up
def log_prior(self):
        # Normal N(mu | mu_0, Sigma / kappa_0)
        from scipy.linalg import solve_triangular
        sigma = np.linalg.inv(self.J_0)
        mu = sigma.dot(self.h_0)
        S_chol = np.linalg.cholesky(sigma)

        # Stack log pi and W
        X = np.vstack((self.logpi, self.W)).T

        lp = 0
        for d in range(self.D_out):
            x = solve_triangular(S_chol, X[d] - mu, lower=True)
            lp += -1. / 2. * np.dot(x, x) \
                  - self.D_in / 2 * np.log(2 * np.pi) \
                  - np.log(S_chol.diagonal()).sum()

        return lp

    ### HMC 
Example #10
Source File: gmm.py    From Splunking-Crime with GNU Affero General Public License v3.0 6 votes vote down vote up
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
    """Log probability for full covariance matrices."""
    n_samples, n_dim = X.shape
    nmix = len(means)
    log_prob = np.empty((n_samples, nmix))
    for c, (mu, cv) in enumerate(zip(means, covars)):
        try:
            cv_chol = linalg.cholesky(cv, lower=True)
        except linalg.LinAlgError:
            # The model is most probably stuck in a component with too
            # few observations, we need to reinitialize this components
            try:
                cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
                                          lower=True)
            except linalg.LinAlgError:
                raise ValueError("'covars' must be symmetric, "
                                 "positive-definite")

        cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
        cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
        log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
                                 n_dim * np.log(2 * np.pi) + cv_log_det)

    return log_prob 
Example #11
Source File: kalman_filter.py    From Splunking-Crime with GNU Affero General Public License v3.0 6 votes vote down vote up
def standardized_forecasts_error(self):
        """
        Standardized forecast errors
        """
        if self._standardized_forecasts_error is None:
            from scipy import linalg
            self._standardized_forecasts_error = np.zeros(
                self.forecasts_error.shape, dtype=self.dtype)

            for t in range(self.forecasts_error_cov.shape[2]):
                if self.nmissing[t] > 0:
                    self._standardized_forecasts_error[:, t] = np.nan
                if self.nmissing[t] < self.k_endog:
                    mask = ~self.missing[:, t].astype(bool)
                    F = self.forecasts_error_cov[np.ix_(mask, mask, [t])]
                    upper, _ = linalg.cho_factor(F[:, :, 0])
                    self._standardized_forecasts_error[mask, t] = (
                        linalg.solve_triangular(
                            upper, self.forecasts_error[mask, t]
                        )
                    )

        return self._standardized_forecasts_error 
Example #12
Source File: stats.py    From hmmlearn with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
    """Log probability for full covariance matrices."""
    n_samples, n_dim = X.shape
    nmix = len(means)
    log_prob = np.empty((n_samples, nmix))
    for c, (mu, cv) in enumerate(zip(means, covars)):
        try:
            cv_chol = linalg.cholesky(cv, lower=True)
        except linalg.LinAlgError:
            # The model is most probably stuck in a component with too
            # few observations, we need to reinitialize this components
            try:
                cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
                                          lower=True)
            except linalg.LinAlgError:
                raise ValueError("'covars' must be symmetric, "
                                 "positive-definite")

        cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
        cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
        log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
                                 n_dim * np.log(2 * np.pi) + cv_log_det)

    return log_prob 
Example #13
Source File: GPConstrainedEIChooser.py    From auptimizer with GNU General Public License v3.0 6 votes vote down vote up
def pred_constraint_voilation(self, cand, comp, vals):
        # The primary covariances for prediction.
        comp_cov   = self.cov(self.constraint_amp2, self.constraint_ls, comp)
        cand_cross = self.cov(self.constraint_amp2, self.constraint_ls, comp,
                              cand)

        # Compute the required Cholesky.
        obsv_cov  = comp_cov + self.constraint_noise*np.eye(comp.shape[0])
        obsv_chol = spla.cholesky(obsv_cov, lower=True)

        cov_grad_func = getattr(gp, 'grad_' + self.cov_func.__name__)
        cand_cross_grad = cov_grad_func(self.constraint_ls, comp, cand)

        # Predictive things.
        # Solve the linear systems.
        alpha  = spla.cho_solve((obsv_chol, True), self.ff)
        beta   = spla.solve_triangular(obsv_chol, cand_cross, lower=True)

        # Predict the marginal means and variances at candidates.
        func_m = np.dot(cand_cross.T, alpha)# + self.constraint_mean
        func_m = sps.norm.cdf(func_m*self.constraint_gain)

        return func_m

    # Compute EI over hyperparameter samples 
Example #14
Source File: minpack.py    From lambda-packs with MIT License 6 votes vote down vote up
def _wrap_func(func, xdata, ydata, transform):
    if transform is None:
        def func_wrapped(params):
            return func(xdata, *params) - ydata
    elif transform.ndim == 1:
        def func_wrapped(params):
            return transform * (func(xdata, *params) - ydata)
    else:
        # Chisq = (y - yd)^T C^{-1} (y-yd)
        # transform = L such that C = L L^T
        # C^{-1} = L^{-T} L^{-1}
        # Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
        # Define (y-yd)' = L^{-1} (y-yd)
        # by solving
        # L (y-yd)' = (y-yd)
        # and minimize (y-yd)'^T (y-yd)'
        def func_wrapped(params):
            return solve_triangular(transform, func(xdata, *params) - ydata, lower=True)
    return func_wrapped 
Example #15
Source File: gmm.py    From twitter-stock-recommendation with MIT License 6 votes vote down vote up
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
    """Log probability for full covariance matrices."""
    n_samples, n_dim = X.shape
    nmix = len(means)
    log_prob = np.empty((n_samples, nmix))
    for c, (mu, cv) in enumerate(zip(means, covars)):
        try:
            cv_chol = linalg.cholesky(cv, lower=True)
        except linalg.LinAlgError:
            # The model is most probably stuck in a component with too
            # few observations, we need to reinitialize this components
            try:
                cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
                                          lower=True)
            except linalg.LinAlgError:
                raise ValueError("'covars' must be symmetric, "
                                 "positive-definite")

        cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
        cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
        log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
                                 n_dim * np.log(2 * np.pi) + cv_log_det)

    return log_prob 
Example #16
Source File: filt.py    From pyins with MIT License 6 votes vote down vote up
def _kalman_correct(x, P, z, H, R, gain_factor, gain_curve):
    PHT = np.dot(P, H.T)

    S = np.dot(H, PHT) + R
    e = z - H.dot(x)
    L = cholesky(S, lower=True)
    inn = solve_triangular(L, e, lower=True)

    if gain_curve is not None:
        q = (np.dot(inn, inn) / inn.shape[0]) ** 0.5
        f = gain_curve(q)
        if f == 0:
            return inn
        L *= (q / f) ** 0.5

    K = cho_solve((L, True), PHT.T, overwrite_b=True).T
    if gain_factor is not None:
        K *= gain_factor[:, None]

    U = -K.dot(H)
    U[np.diag_indices_from(U)] += 1
    x += K.dot(z - H.dot(x))
    P[:] = U.dot(P).dot(U.T) + K.dot(R).dot(K.T)

    return inn 
Example #17
Source File: lda.py    From pgmult with MIT License 5 votes vote down vote up
def solve_triangular(L, a):
    return _solve_triangular(L, a, lower=True, trans='T') 
Example #18
Source File: matrices.py    From mici with MIT License 5 votes vote down vote up
def _left_matrix_multiply(self, other):
        return sla.solve_triangular(
            self._inverse_array, other, lower=self.lower, check_finite=False) 
Example #19
Source File: matrices.py    From mici with MIT License 5 votes vote down vote up
def _right_matrix_multiply(self, other):
        return sla.solve_triangular(
            self._inverse_array, other.T, lower=self.lower, trans=1, 
            check_finite=False).T 
Example #20
Source File: nutsjump.py    From PTMCMCSampler with MIT License 5 votes vote down vote up
def update_cf(self):
        """Update the Cholesky factor of the inverse mass matrix
        
        NOTE: this function is different from the one in GradientJump!
        """
        # Since we are adaptively tuning the step size epsilon, we should at
        # least keep the determinant of this guy equal to what it was before.
        new_cov_cf = sl.cholesky(self.mm_inv, lower=True)

        ldet_old = np.sum(np.log(np.diag(self.cov_cf)))
        ldet_new = np.sum(np.log(np.diag(new_cov_cf)))

        self.cov_cf = np.exp((ldet_old-ldet_new)/self.ndim) * new_cov_cf
        self.cov_cfi = sl.solve_triangular(self.cov_cf, np.eye(len(self.cov_cf)), trans=0, lower=True) 
Example #21
Source File: nutsjump.py    From PTMCMCSampler with MIT License 5 votes vote down vote up
def set_cf(self):
        """Update the Cholesky factor of the inverse mass matrix"""
        self.cov_cf = sl.cholesky(self.mm_inv, lower=True)
        self.cov_cfi = sl.solve_triangular(self.cov_cf, np.eye(len(self.cov_cf)), trans=0, lower=True) 
Example #22
Source File: matrices.py    From mici with MIT License 5 votes vote down vote up
def _construct_array(self):
        #return self @ np.identity(self.shape[0])
        return sla.solve_triangular(
            self._inverse_array, np.identity(self.shape[0]), lower=self.lower, 
            check_finite=False) 
Example #23
Source File: minpack.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def _wrap_jac(jac, xdata, transform):
    if transform is None:
        def jac_wrapped(params):
            return jac(xdata, *params)
    elif transform.ndim == 1:
        def jac_wrapped(params):
            return transform[:, np.newaxis] * np.asarray(jac(xdata, *params))
    else:
        def jac_wrapped(params):
            return solve_triangular(transform, np.asarray(jac(xdata, *params)), lower=True)
    return jac_wrapped 
Example #24
Source File: mcmc.py    From deep_gp_random_features with Apache License 2.0 5 votes vote down vote up
def log_p_Y_given_F1(Y, F1, log_theta):

    log_theta_sigma2, log_theta_lengthscale, log_theta_lambda = unpack_log_theta(log_theta)

    n = Y.shape[0]

    K_Y = covariance_function(F1, F1, log_theta_sigma2[1], log_theta_lengthscale[1]) + np.eye(n) * np.exp(log_theta_lambda)
    L_K_Y, lower_K_Y = linalg.cho_factor(K_Y, lower=True)

    nu = linalg.solve_triangular(L_K_Y, Y, lower=True)

    return -np.sum(np.log(np.diagonal(L_K_Y))) - 0.5 * np.dot(nu.transpose(), nu)


## Elliptical Slice Sampling to sample from the posterior over latent variables at layer 1 (the latent variables at layer 2 are integrated out analytically) 
Example #25
Source File: bayesian.py    From nni with MIT License 5 votes vote down vote up
def predict(self, train_x):
        """Predict the result.
        Args:
            train_x: A list of NetworkDescriptor.
        Returns:
            y_mean: The predicted mean.
            y_std: The predicted standard deviation.
        """
        k_trans = np.exp(-np.power(edit_distance_matrix(train_x, self._x), 2))
        y_mean = k_trans.dot(self._alpha_vector)  # Line 4 (y_mean = f_star)

        # compute inverse K_inv of K based on its Cholesky
        # decomposition L and its inverse L_inv
        l_inv = solve_triangular(
            self._l_matrix.T, np.eye(
                self._l_matrix.shape[0]))
        k_inv = l_inv.dot(l_inv.T)
        # Compute variance of predictive distribution
        y_var = np.ones(len(train_x), dtype=np.float)
        y_var -= np.einsum("ij,ij->i", np.dot(k_trans, k_inv), k_trans)

        # Check if any of the variances is negative because of
        # numerical issues. If yes: set the variance to 0.
        y_var_negative = y_var < 0
        if np.any(y_var_negative):
            y_var[y_var_negative] = 0.0
        return y_mean, np.sqrt(y_var) 
Example #26
Source File: mcmc_batch_comparison.py    From emukit with Apache License 2.0 5 votes vote down vote up
def _get_mu_L(self, x_pred: np.ndarray, N: int=None, woodbury_inv: bool=False, with_index: int=None) -> Tuple:
        """
        Returns posterior mean and cholesky decomposition of the posterior samples
        
        :param x_pred: locations where the mean and posterior covariance are computed
        :param N: number of posterior samples
        :param woodbury_inv: boolean indicating whether the function should return woodbury_inv vector as well
        :param with_index: index of the specific posterior sample the function should return
        :return params: tuple containing the posterior means and choleskies of the covariances. Also woodbury inverses and woodbury choleskies if woodbury_inv is true
        """
        indices = np.arange(self.samples['f'].shape[0])
        if N is not None:
            indices = np.random.choice(indices, N)
        if with_index is not None:
            indices = np.array([with_index], dtype=int)
        N = len(indices)
        x_pred = np.atleast_2d(x_pred)
        f2_mu = np.empty((N,x_pred.shape[0]))
        f2_L = np.empty((N,x_pred.shape[0], x_pred.shape[0]))
        k_x1_x2 = self.kern.K(self.X, x_pred)
        k_x2_x2 = self.kern.K(x_pred)
        for ni, i in enumerate(indices):
            L_div_k_x1_x2 = la.solve_triangular(self.samples['L_K'][i,:,:], k_x1_x2, lower=True, overwrite_b=False)
            f2_mu[ni,:] = np.dot(L_div_k_x1_x2.T, self.samples['eta'][i,:]) #self.L_div_f[i,:])
            f2_cov = k_x2_x2 - np.dot(L_div_k_x1_x2.T, L_div_k_x1_x2)
            f2_L[ni,:,:] = jitchol(f2_cov)
        if woodbury_inv:
            w_inv = np.empty((N,self.X.shape[0],self.X.shape[0]))
            w_chol = np.empty((N,self.X.shape[0],))
            for ni, i in enumerate(indices):
                L_Kinv = la.inv(self.samples['L_K'][i,:,:])
                w_inv[ni,:,:] = L_Kinv.T @ L_Kinv
                w_chol[ni,:] = (L_Kinv.T @ self.samples['eta'][i,:, None])[:, 0] # (Kinv @ self.samples['eta'][i,:, None])[:, 0] # (L_Kinv.T @ self.samples['eta'][i,:, None])[:, 0]  # self.L_div_f[i,:]
            return f2_mu, f2_L, w_inv, w_chol
        else:
            return f2_mu, f2_L 
Example #27
Source File: linear_algebra.py    From klustakwik2 with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def trisolve(self, x):
        out = zeros(len(x))
        if len(self.unmasked):
            out[self.unmasked] = solve_triangular(self.block, x[self.unmasked], lower=True)
        out[self.masked] = x[self.masked]/self.diagonal
        return out 
Example #28
Source File: gp_utils.py    From bananas with Apache License 2.0 5 votes vote down vote up
def solve_triangular_base(amat, b, lower):
  """ Solves amat*x=b when amat is a triangular matrix. """
  if amat.size == 0 and b.shape[0] == 0:
    return np.zeros((b.shape))
  else:
    return solve_triangular(amat, b, lower=lower) 
Example #29
Source File: salsa_estimator.py    From dragonfly with MIT License 5 votes vote down vote up
def _solve_triangular_common(A, b, lower):
  """ Solves Ax=b when A is a triangular matrix. """
  if A.size == 0 and b.shape[0] == 0:
    return np.zeros((b.shape))
  else:
    return solve_triangular(A, b, lower=lower) 
Example #30
Source File: smc_samplers.py    From particles with MIT License 5 votes vote down vote up
def step(self, x):
            z = stats.norm.rvs(size=x.shape)
            y = self.mu + np.dot(z, self.L.T)
            zx = solve_triangular(self.L, np.transpose(x - self.mu),
                                  lower=True)
            delta_lp = (0.5 * np.sum(z * z, axis=1)
                        - 0.5 * np.sum(zx * zx, axis=0))
            return y, delta_lp