Python scipy.linalg.pinv2() Examples
The following are 8
code examples of scipy.linalg.pinv2().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
scipy.linalg
, or try the search function
.
Example #1
Source File: elm.py From SVM-CNN with Apache License 2.0 | 5 votes |
def _fit_regression(self, y): """ fit regression using pseudo-inverse or supplied regressor """ if (self.regressor is None): self.coefs_ = safe_sparse_dot(pinv2(self.hidden_activations_), y) else: self.regressor.fit(self.hidden_activations_, y) self.fitted_ = True
Example #2
Source File: test_utils.py From twitter-stock-recommendation with MIT License | 5 votes |
def test_pinvh_nonpositive(): a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64) a = np.dot(a, a.T) u, s, vt = np.linalg.svd(a) s[0] *= -1 a = np.dot(u * s, vt) # a is now symmetric non-positive and singular a_pinv = pinv2(a) a_pinvh = pinvh(a) assert_almost_equal(a_pinv, a_pinvh)
Example #3
Source File: linalg.py From vnpy_crypto with MIT License | 4 votes |
def pinv2(a, cond=None, rcond=None): """Compute the (Moore-Penrose) pseudo-inverse of a matrix. Calculate a generalized inverse of a matrix using its singular-value decomposition and including all 'large' singular values. Parameters ---------- a : array, shape (M, N) Matrix to be pseudo-inverted cond, rcond : float or None Cutoff for 'small' singular values. Singular values smaller than rcond*largest_singular_value are considered zero. If None or -1, suitable machine precision is used. Returns ------- B : array, shape (N, M) Raises LinAlgError if SVD computation does not converge Examples -------- >>> from numpy import * >>> a = random.randn(9, 6) >>> B = linalg.pinv2(a) >>> allclose(a, dot(a, dot(B, a))) True >>> allclose(B, dot(B, dot(a, B))) True """ a = asarray_chkfinite(a) u, s, vh = decomp_svd(a) t = u.dtype.char if rcond is not None: cond = rcond if cond in [None, -1]: cond = {0: feps*1e3, 1: eps*1e6}[_array_precision[t]] m, n = a.shape cutoff = cond*numpy.maximum.reduce(s) psigma = zeros((m, n), t) for i in range(len(s)): if s[i] > cutoff: psigma[i, i] = 1.0/conjugate(s[i]) # XXX: use lapack/blas routines for dot return transpose(conjugate(dot(dot(u, psigma), vh)))
Example #4
Source File: pls_.py From Mastering-Elasticsearch-7.0 with MIT License | 4 votes |
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06, norm_y_weights=False): """Inner loop of the iterative NIPALS algorithm. Provides an alternative to the svd(X'Y); returns the first left and right singular vectors of X'Y. See PLS for the meaning of the parameters. It is similar to the Power method for determining the eigenvectors and eigenvalues of a X'Y. """ y_score = Y[:, [0]] x_weights_old = 0 ite = 1 X_pinv = Y_pinv = None eps = np.finfo(X.dtype).eps # Inner loop of the Wold algo. while True: # 1.1 Update u: the X weights if mode == "B": if X_pinv is None: # We use slower pinv2 (same as np.linalg.pinv) for stability # reasons X_pinv = pinv2(X, check_finite=False) x_weights = np.dot(X_pinv, y_score) else: # mode A # Mode A regress each X column on y_score x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score) # If y_score only has zeros x_weights will only have zeros. In # this case add an epsilon to converge to a more acceptable # solution if np.dot(x_weights.T, x_weights) < eps: x_weights += eps # 1.2 Normalize u x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps # 1.3 Update x_score: the X latent scores x_score = np.dot(X, x_weights) # 2.1 Update y_weights if mode == "B": if Y_pinv is None: Y_pinv = pinv2(Y, check_finite=False) # compute once pinv(Y) y_weights = np.dot(Y_pinv, x_score) else: # Mode A regress each Y column on x_score y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score) # 2.2 Normalize y_weights if norm_y_weights: y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps # 2.3 Update y_score: the Y latent scores y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps) # y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG x_weights_diff = x_weights - x_weights_old if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1: break if ite == max_iter: warnings.warn('Maximum number of iterations reached', ConvergenceWarning) break x_weights_old = x_weights ite += 1 return x_weights, y_weights, ite
Example #5
Source File: pls_.py From Splunking-Crime with GNU Affero General Public License v3.0 | 4 votes |
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06, norm_y_weights=False): """Inner loop of the iterative NIPALS algorithm. Provides an alternative to the svd(X'Y); returns the first left and right singular vectors of X'Y. See PLS for the meaning of the parameters. It is similar to the Power method for determining the eigenvectors and eigenvalues of a X'Y. """ y_score = Y[:, [0]] x_weights_old = 0 ite = 1 X_pinv = Y_pinv = None eps = np.finfo(X.dtype).eps # Inner loop of the Wold algo. while True: # 1.1 Update u: the X weights if mode == "B": if X_pinv is None: # We use slower pinv2 (same as np.linalg.pinv) for stability # reasons X_pinv = pinv2(X, check_finite=False) x_weights = np.dot(X_pinv, y_score) else: # mode A # Mode A regress each X column on y_score x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score) # If y_score only has zeros x_weights will only have zeros. In # this case add an epsilon to converge to a more acceptable # solution if np.dot(x_weights.T, x_weights) < eps: x_weights += eps # 1.2 Normalize u x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps # 1.3 Update x_score: the X latent scores x_score = np.dot(X, x_weights) # 2.1 Update y_weights if mode == "B": if Y_pinv is None: Y_pinv = pinv2(Y, check_finite=False) # compute once pinv(Y) y_weights = np.dot(Y_pinv, x_score) else: # Mode A regress each Y column on x_score y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score) # 2.2 Normalize y_weights if norm_y_weights: y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps # 2.3 Update y_score: the Y latent scores y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps) # y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG x_weights_diff = x_weights - x_weights_old if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1: break if ite == max_iter: warnings.warn('Maximum number of iterations reached') break x_weights_old = x_weights ite += 1 return x_weights, y_weights, ite
Example #6
Source File: linalg.py From Splunking-Crime with GNU Affero General Public License v3.0 | 4 votes |
def pinv2(a, cond=None, rcond=None): """Compute the (Moore-Penrose) pseudo-inverse of a matrix. Calculate a generalized inverse of a matrix using its singular-value decomposition and including all 'large' singular values. Parameters ---------- a : array, shape (M, N) Matrix to be pseudo-inverted cond, rcond : float or None Cutoff for 'small' singular values. Singular values smaller than rcond*largest_singular_value are considered zero. If None or -1, suitable machine precision is used. Returns ------- B : array, shape (N, M) Raises LinAlgError if SVD computation does not converge Examples -------- >>> from numpy import * >>> a = random.randn(9, 6) >>> B = linalg.pinv2(a) >>> allclose(a, dot(a, dot(B, a))) True >>> allclose(B, dot(B, dot(a, B))) True """ a = asarray_chkfinite(a) u, s, vh = decomp_svd(a) t = u.dtype.char if rcond is not None: cond = rcond if cond in [None, -1]: cond = {0: feps*1e3, 1: eps*1e6}[_array_precision[t]] m, n = a.shape cutoff = cond*numpy.maximum.reduce(s) psigma = zeros((m, n), t) for i in range(len(s)): if s[i] > cutoff: psigma[i, i] = 1.0/conjugate(s[i]) # XXX: use lapack/blas routines for dot return transpose(conjugate(dot(dot(u, psigma), vh)))
Example #7
Source File: signal.py From sensormotion with MIT License | 4 votes |
def baseline(y, deg=None, max_it=None, tol=None): """ Computes the baseline of a given data. Iteratively performs a polynomial fitting in the data to detect its baseline. At every iteration, the fitting weights on the regions with peaks are reduced to identify the baseline only. Parameters ---------- y : ndarray Data to detect the baseline. deg : int Degree of the polynomial that will estimate the data baseline. A low degree may fail to detect all the baseline present, while a high degree may make the data too oscillatory, especially at the edges. max_it : int Maximum number of iterations to perform. tol : float Tolerance to use when comparing the difference between the current fit coefficients and the ones from the last iteration. The iteration procedure will stop when the difference between them is lower than *tol*. Returns ------- baseline : ndarray Array with the baseline amplitude for every original point in *y* """ # for not repeating ourselves in `envelope` if deg is None: deg = 3 if max_it is None: max_it = 100 if tol is None: tol = 1e-3 order = deg + 1 coeffs = np.ones(order) # try to avoid numerical issues cond = math.pow(y.max(), 1.0 / order) x = np.linspace(0.0, cond, y.size) base = y.copy() vander = np.vander(x, order) vander_pinv = la.pinv2(vander) for _ in range(max_it): coeffs_new = np.dot(vander_pinv, y) if la.norm(coeffs_new - coeffs) / la.norm(coeffs) < tol: break coeffs = coeffs_new base = np.dot(vander, coeffs) y = np.minimum(y, base) return base
Example #8
Source File: pls_.py From twitter-stock-recommendation with MIT License | 4 votes |
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06, norm_y_weights=False): """Inner loop of the iterative NIPALS algorithm. Provides an alternative to the svd(X'Y); returns the first left and right singular vectors of X'Y. See PLS for the meaning of the parameters. It is similar to the Power method for determining the eigenvectors and eigenvalues of a X'Y. """ y_score = Y[:, [0]] x_weights_old = 0 ite = 1 X_pinv = Y_pinv = None eps = np.finfo(X.dtype).eps # Inner loop of the Wold algo. while True: # 1.1 Update u: the X weights if mode == "B": if X_pinv is None: # We use slower pinv2 (same as np.linalg.pinv) for stability # reasons X_pinv = pinv2(X, check_finite=False) x_weights = np.dot(X_pinv, y_score) else: # mode A # Mode A regress each X column on y_score x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score) # If y_score only has zeros x_weights will only have zeros. In # this case add an epsilon to converge to a more acceptable # solution if np.dot(x_weights.T, x_weights) < eps: x_weights += eps # 1.2 Normalize u x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps # 1.3 Update x_score: the X latent scores x_score = np.dot(X, x_weights) # 2.1 Update y_weights if mode == "B": if Y_pinv is None: Y_pinv = pinv2(Y, check_finite=False) # compute once pinv(Y) y_weights = np.dot(Y_pinv, x_score) else: # Mode A regress each Y column on x_score y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score) # 2.2 Normalize y_weights if norm_y_weights: y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps # 2.3 Update y_score: the Y latent scores y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps) # y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG x_weights_diff = x_weights - x_weights_old if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1: break if ite == max_iter: warnings.warn('Maximum number of iterations reached') break x_weights_old = x_weights ite += 1 return x_weights, y_weights, ite