Python scipy.linalg.cho_solve() Examples
The following are 30
code examples of scipy.linalg.cho_solve().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
scipy.linalg
, or try the search function
.
Example #1
Source File: utils.py From enterprise with MIT License | 7 votes |
def inv(self, logdet=False): if self.ndim == 1: inv = 1.0 / self if logdet: return inv, np.sum(np.log(self)) else: return inv else: try: cf = sl.cho_factor(self) inv = sl.cho_solve(cf, np.identity(cf[0].shape[0])) if logdet: ld = 2.0 * np.sum(np.log(np.diag(cf[0]))) except np.linalg.LinAlgError: u, s, v = np.linalg.svd(self) inv = np.dot(u / s, u.T) if logdet: ld = np.sum(np.log(s)) if logdet: return inv, ld else: return inv
Example #2
Source File: GPConstrainedEIChooser.py From auptimizer with GNU General Public License v3.0 | 6 votes |
def pred_constraint_voilation(self, cand, comp, vals): # The primary covariances for prediction. comp_cov = self.cov(self.constraint_amp2, self.constraint_ls, comp) cand_cross = self.cov(self.constraint_amp2, self.constraint_ls, comp, cand) # Compute the required Cholesky. obsv_cov = comp_cov + self.constraint_noise*np.eye(comp.shape[0]) obsv_chol = spla.cholesky(obsv_cov, lower=True) cov_grad_func = getattr(gp, 'grad_' + self.cov_func.__name__) cand_cross_grad = cov_grad_func(self.constraint_ls, comp, cand) # Predictive things. # Solve the linear systems. alpha = spla.cho_solve((obsv_chol, True), self.ff) beta = spla.solve_triangular(obsv_chol, cand_cross, lower=True) # Predict the marginal means and variances at candidates. func_m = np.dot(cand_cross.T, alpha)# + self.constraint_mean func_m = sps.norm.cdf(func_m*self.constraint_gain) return func_m # Compute EI over hyperparameter samples
Example #3
Source File: filt.py From pyins with MIT License | 6 votes |
def _kalman_correct(x, P, z, H, R, gain_factor, gain_curve): PHT = np.dot(P, H.T) S = np.dot(H, PHT) + R e = z - H.dot(x) L = cholesky(S, lower=True) inn = solve_triangular(L, e, lower=True) if gain_curve is not None: q = (np.dot(inn, inn) / inn.shape[0]) ** 0.5 f = gain_curve(q) if f == 0: return inn L *= (q / f) ** 0.5 K = cho_solve((L, True), PHT.T, overwrite_b=True).T if gain_factor is not None: K *= gain_factor[:, None] U = -K.dot(H) U[np.diag_indices_from(U)] += 1 x += K.dot(z - H.dot(x)) P[:] = U.dot(P).dot(U.T) + K.dot(R).dot(K.T) return inn
Example #4
Source File: gpei_constrained_chooser.py From Milano with Apache License 2.0 | 6 votes |
def pred_constraint_voilation(self, cand, comp, vals): # The primary covariances for prediction. comp_cov = self.cov(self.constraint_amp2, self.constraint_ls, comp) cand_cross = self.cov(self.constraint_amp2, self.constraint_ls, comp, cand) # Compute the required Cholesky. obsv_cov = comp_cov + self.constraint_noise*np.eye(comp.shape[0]) obsv_chol = spla.cholesky(obsv_cov, lower=True) cov_grad_func = getattr(gp, 'grad_' + self.cov_func.__name__) cand_cross_grad = cov_grad_func(self.constraint_ls, comp, cand) # Predictive things. # Solve the linear systems. alpha = spla.cho_solve((obsv_chol, True), self.ff) beta = spla.solve_triangular(obsv_chol, cand_cross, lower=True) # Predict the marginal means and variances at candidates. func_m = np.dot(cand_cross.T, alpha)# + self.constraint_mean func_m = sps.norm.cdf(func_m*self.constraint_gain) return func_m # Compute EI over hyperparameter samples
Example #5
Source File: linalg_covmat.py From Splunking-Crime with GNU Affero General Public License v3.0 | 6 votes |
def mvn_loglike(x, sigma): '''loglike multivariate normal assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs) brute force from formula no checking of correct inputs use of inv and log-det should be replace with something more efficient ''' #see numpy thread #Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1) sigmainv = linalg.inv(sigma) logdetsigma = np.log(np.linalg.det(sigma)) nobs = len(x) llf = - np.dot(x, np.dot(sigmainv, x)) llf -= nobs * np.log(2 * np.pi) llf -= logdetsigma llf *= 0.5 return llf
Example #6
Source File: bayesian.py From nni with MIT License | 6 votes |
def first_fit(self, train_x, train_y): """ Fit the regressor for the first time. """ train_x, train_y = np.array(train_x), np.array(train_y) self._x = np.copy(train_x) self._y = np.copy(train_y) self._distance_matrix = edit_distance_matrix(self._x) k_matrix = bourgain_embedding_matrix(self._distance_matrix) k_matrix[np.diag_indices_from(k_matrix)] += self.alpha self._l_matrix = cholesky(k_matrix, lower=True) # Line 2 self._alpha_vector = cho_solve( (self._l_matrix, True), self._y) # Line 3 self._first_fitted = True return self
Example #7
Source File: mcmc.py From deep_gp_random_features with Apache License 2.0 | 6 votes |
def do_sampleF2(Y, X, current_F1, log_theta): log_theta_sigma2, log_theta_lengthscale, log_theta_lambda = unpack_log_theta(log_theta) n = Y.shape[0] K_F2 = covariance_function(current_F1, current_F1, log_theta_sigma2[1], log_theta_lengthscale[1]) + np.eye(n) * 1e-9 K_Y = K_F2 + np.eye(n) * np.exp(log_theta_lambda) L_K_Y, lower_K_Y = linalg.cho_factor(K_Y, lower=True) K_inv_Y = linalg.cho_solve((L_K_Y, lower_K_Y), Y) mu = np.dot(K_F2, K_inv_Y) K_inv_K = linalg.cho_solve((L_K_Y, lower_K_Y), K_F2) Sigma = K_F2 - np.dot(K_F2, K_inv_K) L_Sigma = linalg.cholesky(Sigma, lower=True) proposed_F2 = mu + np.dot(L_Sigma, np.random.normal(0.0, 1.0, [n, 1])) return proposed_F2 ## Unpack the vector of parameters into their three elements
Example #8
Source File: linalg_covmat.py From vnpy_crypto with MIT License | 6 votes |
def mvn_loglike(x, sigma): '''loglike multivariate normal assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs) brute force from formula no checking of correct inputs use of inv and log-det should be replace with something more efficient ''' #see numpy thread #Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1) sigmainv = linalg.inv(sigma) logdetsigma = np.log(np.linalg.det(sigma)) nobs = len(x) llf = - np.dot(x, np.dot(sigmainv, x)) llf -= nobs * np.log(2 * np.pi) llf -= logdetsigma llf *= 0.5 return llf
Example #9
Source File: gpnarx.py From pyflux with BSD 3-Clause "New" or "Revised" License | 6 votes |
def variance_values(self, beta): """ Covariance matrix for the estimated function Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- Covariance matrix for the estimated function """ parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])]) L = self._L(parm) v = la.cho_solve((L, True), self.kernel.K(parm)) return self.kernel.K(parm) - np.dot(v.T, v)
Example #10
Source File: correlated_likelihood.py From kombine with MIT License | 6 votes |
def log_likelihood(self, p): p = self.to_params(p) v = self.rvs(p) res = self.vs - v - p['mu'] cov = p['nu']*p['nu']*np.diag(self.dvs*self.dvs) cov += generate_covariance(self.ts, p['sigma'], p['tau']) cfactor = sl.cho_factor(cov) cc, lower = cfactor n = self.ts.shape[0] return -0.5*n*np.log(2.0*np.pi) - np.sum(np.log(np.diag(cc))) - 0.5*np.dot(res, sl.cho_solve(cfactor, res))
Example #11
Source File: clustered_kde.py From kombine with MIT License | 6 votes |
def _evaluate_point_logpdf(args): """ Evaluate the Gaussian KDE at a given point `p`. This lives outside the KDE method to allow for parallelization using :mod:`multipocessing`. Since :func:`map` only allows single-argument functions, the following arguments to be packed into a single tuple. :param p: The point to evaluate the KDE at. :param data: The `(N, ndim)`-shaped array of data used to construct the KDE. :param cho_factor: A Cholesky decomposition of the kernel covariance matrix. """ point, data, cho_factor = args # Use Cholesky decomposition to avoid direct inversion of covariance matrix diff = data - point tdiff = la.cho_solve(cho_factor, diff.T, check_finite=False).T diff *= tdiff # Work in the log to avoid large numbers return logsumexp(-np.sum(diff, axis=1)/2)
Example #12
Source File: clustered_kde.py From kombine with MIT License | 6 votes |
def _set_bandwidth(self): r""" Use Scott's rule to set the kernel bandwidth: .. math:: \mathcal{K} = n^{-1/(d+4)} \Sigma^{1/2} Also store Cholesky decomposition for later. """ if self.size > 0 and self._cov is not None: self._kernel_cov = self._cov * self.size ** (-2/(self.ndim + 4)) # Used to evaluate PDF with cho_solve() self._cho_factor = la.cho_factor(self._kernel_cov) # Make sure the estimated PDF integrates to 1.0 self._lognorm = self.ndim/2 * np.log(2*np.pi) + np.log(self.size) +\ np.sum(np.log(np.diag(self._cho_factor[0]))) else: self._lognorm = -np.inf
Example #13
Source File: likelihood.py From radvel with MIT License | 5 votes |
def logprob(self): """ Return GP log-likelihood given the data and model. log-likelihood is computed using Cholesky decomposition as: .. math:: lnL = -0.5r^TK^{-1}r - 0.5ln[det(K)] - 0.5N*ln(2pi) where r = vector of residuals (GPLikelihood._resids), K = covariance matrix, and N = number of datapoints. Priors are not applied here. Constant has been omitted. Returns: float: Natural log of likelihood """ # update the Kernel object hyperparameter values self.update_kernel_params() r = self._resids() self.kernel.compute_covmatrix(self.errorbars()) K = self.kernel.covmatrix # solve alpha = inverse(K)*r try: alpha = cho_solve(cho_factor(K),r) # compute determinant of K (s,d) = np.linalg.slogdet(K) # calculate likelihood like = -.5 * (np.dot(r, alpha) + d + self.N*np.log(2.*np.pi)) return like except (np.linalg.linalg.LinAlgError, ValueError): warnings.warn("Non-positive definite kernel detected.", RuntimeWarning) return -np.inf
Example #14
Source File: sgcrf.py From sgcrfpy with MIT License | 5 votes |
def chol_inv(B, lower=True): """ Returns the inverse of matrix A, where A = B*B.T, ie B is the Cholesky decomposition of A. Solves Ax = I given B is the cholesky factorization of A. """ return cho_solve((B, lower), np.eye(B.shape[0]))
Example #15
Source File: gpnarx.py From pyflux with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _alpha(self, L): """ Covariance-derived term to construct expectations. See Rasmussen & Williams. Parameters ---------- L : np.ndarray Cholesky triangular Returns ---------- np.ndarray (alpha) """ return la.cho_solve((L.T, True), la.cho_solve((L, True), np.transpose(self.data)))
Example #16
Source File: GPEIperSecChooser.py From auptimizer with GNU General Public License v3.0 | 5 votes |
def _sample_noisy(self, comp, vals): def logprob(hypers): mean = hypers[0] amp2 = hypers[1] noise = hypers[2] # This is pretty hacky, but keeps things sane. if mean > np.max(vals) or mean < np.min(vals): return -np.inf if amp2 < 0 or noise < 0: return -np.inf cov = amp2 * (self.cov_func(self.ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + noise*np.eye(comp.shape[0]) chol = spla.cholesky(cov, lower=True) solve = spla.cho_solve((chol, True), vals - mean) lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(vals-mean, solve) # Roll in noise horseshoe prior. lp += np.log(np.log(1 + (self.noise_scale/noise)**2)) #lp -= 0.5*(np.log(noise)/self.noise_scale)**2 # Roll in amplitude lognormal prior lp -= 0.5*(np.log(amp2)/self.amp2_scale)**2 return lp hypers = util.slice_sample(np.array([self.mean, self.amp2, self.noise]), logprob, compwise=False) self.mean = hypers[0] self.amp2 = hypers[1] self.noise = hypers[2]
Example #17
Source File: GPEIperSecChooser.py From auptimizer with GNU General Public License v3.0 | 5 votes |
def _sample_time_ls(self, comp, vals): def logprob(ls): if np.any(ls < 0) or np.any(ls > self.time_max_ls): return -np.inf cov = self.time_amp2 * (self.cov_func(ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + self.time_noise*np.eye(comp.shape[0]) chol = spla.cholesky(cov, lower=True) solve = spla.cho_solve((chol, True), vals - self.time_mean) lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(vals-self.time_mean, solve) return lp self.time_ls = util.slice_sample(self.time_ls, logprob, compwise=True)
Example #18
Source File: GPEIperSecChooser.py From auptimizer with GNU General Public License v3.0 | 5 votes |
def _sample_ls(self, comp, vals): def logprob(ls): if np.any(ls < 0) or np.any(ls > self.max_ls): return -np.inf cov = self.amp2 * (self.cov_func(ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + self.noise*np.eye(comp.shape[0]) chol = spla.cholesky(cov, lower=True) solve = spla.cho_solve((chol, True), vals - self.mean) lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(vals-self.mean, solve) return lp self.ls = util.slice_sample(self.ls, logprob, compwise=True)
Example #19
Source File: GPEIperSecChooser.py From auptimizer with GNU General Public License v3.0 | 5 votes |
def _sample_time_noisy(self, comp, vals): def logprob(hypers): mean = hypers[0] amp2 = hypers[1] noise = hypers[2] # This is pretty hacky, but keeps things sane. if mean > np.max(vals) or mean < np.min(vals): return -np.inf if amp2 < 0 or noise < 0: return -np.inf cov = amp2 * (self.cov_func(self.time_ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + noise*np.eye(comp.shape[0]) chol = spla.cholesky(cov, lower=True) solve = spla.cho_solve((chol, True), vals - mean) lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(vals-mean, solve) # Roll in noise horseshoe prior. lp += np.log(np.log(1 + (self.time_noise_scale/noise)**2)) #lp -= 0.5*(np.log(noise)/self.time_noise_scale)**2 # Roll in amplitude lognormal prior lp -= 0.5*(np.log(np.sqrt(amp2))/self.time_amp2_scale)**2 return lp hypers = util.slice_sample(np.array([self.time_mean, self.time_amp2, self.time_noise]), logprob, compwise=False) self.time_mean = hypers[0] self.time_amp2 = hypers[1] self.time_noise = hypers[2]
Example #20
Source File: linalg.py From sporco with BSD 3-Clause "New" or "Revised" License | 5 votes |
def cho_solve_AATI(A, rho, b, c, lwr, check_finite=True): r"""Solve the linear system :math:`(A A^T + \rho I)\mathbf{x} = \mathbf{b}` or :math:`(A A^T + \rho I)X = B` using :func:`scipy.linalg.cho_solve`. Parameters ---------- A : array_like Matrix :math:`A` rho : float Scalar :math:`\rho` b : array_like Vector :math:`\mathbf{b}` or matrix :math:`B` c : array_like Matrix containing lower or upper triangular Cholesky factor, as returned by :func:`scipy.linalg.cho_factor` lwr : bool Flag indicating whether the factor is lower or upper triangular Returns ------- x : ndarray Solution to the linear system """ N, M = A.shape if N >= M: x = (b - linalg.cho_solve((c, lwr), b.dot(A).T, check_finite=check_finite).T.dot(A.T)) / rho else: x = linalg.cho_solve((c, lwr), b.T, check_finite=check_finite).T return x
Example #21
Source File: linalg.py From sporco with BSD 3-Clause "New" or "Revised" License | 5 votes |
def cho_solve_ATAI(A, rho, b, c, lwr, check_finite=True): r"""Solve the linear system :math:`(A^T A + \rho I)\mathbf{x} = \mathbf{b}` or :math:`(A^T A + \rho I)X = B` using :func:`scipy.linalg.cho_solve`. Parameters ---------- A : array_like Matrix :math:`A` rho : float Scalar :math:`\rho` b : array_like Vector :math:`\mathbf{b}` or matrix :math:`B` c : array_like Matrix containing lower or upper triangular Cholesky factor, as returned by :func:`scipy.linalg.cho_factor` lwr : bool Flag indicating whether the factor is lower or upper triangular Returns ------- x : ndarray Solution to the linear system """ N, M = A.shape if N >= M: x = linalg.cho_solve((c, lwr), b, check_finite=check_finite) else: x = (b - A.T.dot(linalg.cho_solve((c, lwr), A.dot(b), check_finite=check_finite))) / rho return x
Example #22
Source File: likelihood.py From radvel with MIT License | 5 votes |
def predict(self, xpred): """ Realize the GP using the current values of the hyperparameters at values x=xpred. Used for making GP plots. Args: xpred (np.array): numpy array of x values for realizing the GP Returns: tuple: tuple containing: np.array: the numpy array of predictive means \n np.array: the numpy array of predictive standard deviations """ self.update_kernel_params() r = np.array([self._resids()]).T self.kernel.compute_distances(self.x, self.x) K = self.kernel.compute_covmatrix(self.errorbars()) self.kernel.compute_distances(xpred, self.x) Ks = self.kernel.compute_covmatrix(0.) L = cho_factor(K) alpha = cho_solve(L, r) mu = np.dot(Ks, alpha).flatten() self.kernel.compute_distances(xpred, xpred) Kss = self.kernel.compute_covmatrix(0.) B = cho_solve(L, Ks.T) var = np.array(np.diag(Kss - np.dot(Ks, B))).flatten() stdev = np.sqrt(var) # set the default distances back to their regular values self.kernel.compute_distances(self.x, self.x) return mu, stdev
Example #23
Source File: signal_base.py From enterprise with MIT License | 5 votes |
def _solve_ZNX(self, X, Z): """Solves :math:`Z^T N^{-1}X`, where :math:`X` and :math:`Z` are 1-d or 2-d arrays. """ if X.ndim == 1: X = X.reshape(X.shape[0], 1) if Z.ndim == 1: Z = Z.reshape(Z.shape[0], 1) n, m = Z.shape[1], X.shape[1] ZNX = np.zeros((n, m)) if len(self._idx) > 0: ZNXr = np.dot(Z[self._idx, :].T, X[self._idx, :] / self._nvec[self._idx, None]) else: ZNXr = 0 for slc, block in zip(self._slices, self._blocks): Zblock = Z[slc, :] Xblock = X[slc, :] if slc.stop - slc.start > 1: cf = sl.cho_factor(block + np.diag(self._nvec[slc])) bx = sl.cho_solve(cf, Xblock) else: bx = Xblock / self._nvec[slc][:, None] ZNX += np.dot(Zblock.T, bx) ZNX += ZNXr return ZNX.squeeze() if len(ZNX) > 1 else float(ZNX)
Example #24
Source File: lobpcg.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def _applyConstraints(blockVectorV, factYBY, blockVectorBY, blockVectorY): """Changes blockVectorV in place.""" gramYBV = np.dot(blockVectorBY.T, blockVectorV) tmp = cho_solve(factYBY, gramYBV) blockVectorV -= np.dot(blockVectorY, tmp)
Example #25
Source File: linalg_covmat.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def mvn_nloglike_obs(x, sigma): '''loglike multivariate normal assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs) brute force from formula no checking of correct inputs use of inv and log-det should be replace with something more efficient ''' #see numpy thread #Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1) #Still wasteful to calculate pinv first sigmainv = linalg.inv(sigma) cholsigmainv = linalg.cholesky(sigmainv) #2 * np.sum(np.log(np.diagonal(np.linalg.cholesky(A)))) #Dag mailinglist # logdet not needed ??? #logdetsigma = 2 * np.sum(np.log(np.diagonal(cholsigmainv))) x_whitened = np.dot(cholsigmainv, x) #sigmainv = linalg.cholesky(sigma) logdetsigma = np.log(np.linalg.det(sigma)) sigma2 = 1. # error variance is included in sigma llike = 0.5 * (np.log(sigma2) - 2.* np.log(np.diagonal(cholsigmainv)) + (x_whitened**2)/sigma2 + np.log(2*np.pi)) return llike, (x_whitened**2)
Example #26
Source File: GPEIOptChooser.py From auptimizer with GNU General Public License v3.0 | 5 votes |
def _sample_noiseless(self, comp, vals): def logprob(hypers): mean = hypers[0] amp2 = hypers[1] noise = 1e-3 # This is pretty hacky, but keeps things sane. if mean > np.max(vals) or mean < np.min(vals): return -np.inf if amp2 < 0: return -np.inf cov = (amp2 * (self.cov_func(self.ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + noise*np.eye(comp.shape[0])) chol = spla.cholesky(cov, lower=True) solve = spla.cho_solve((chol, True), vals - mean) lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(vals-mean, solve) # Roll in amplitude lognormal prior lp -= 0.5*(np.log(np.sqrt(amp2))/self.amp2_scale)**2 return lp hypers = util.slice_sample(np.array( [self.mean, self.amp2, self.noise]), logprob, compwise=False) self.mean = hypers[0] self.amp2 = hypers[1] self.noise = 1e-3
Example #27
Source File: linalg_decomp_1.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def yt_minv_y(self, y): '''xSigmainvx doesn't use stored cholesky yet ''' return np.dot(x,linalg.cho_solve(linalg.cho_factor(self.m),x)) #same as #lower = False #if cholesky(sigma) is used, default is upper #np.dot(x,linalg.cho_solve((self.cholsigma, lower),x))
Example #28
Source File: signal_base.py From enterprise with MIT License | 5 votes |
def _solve_NX(self, X): """Solves :math:`N^{-1}X`, where :math:`X` is a 1-d or 2-d array. """ if X.ndim == 1: X = X.reshape(X.shape[0], 1) NX = X / self._nvec[:, None] for slc, block in zip(self._slices, self._blocks): Xblock = X[slc, :] if slc.stop - slc.start > 1: cf = sl.cho_factor(block + np.diag(self._nvec[slc])) NX[slc] = sl.cho_solve(cf, Xblock) return NX.squeeze()
Example #29
Source File: signal_base.py From enterprise with MIT License | 5 votes |
def __call__(self, other): return sl.cho_solve(self.cf, other)
Example #30
Source File: mvnormal.py From cgpm with Apache License 2.0 | 5 votes |
def solve(self, Y): return la.cho_solve(self._cholesky, Y)