Python scipy.sparse.linalg.LinearOperator() Examples
The following are 30
code examples of scipy.sparse.linalg.LinearOperator().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
scipy.sparse.linalg
, or try the search function
.
Example #1
Source File: gw_iter.py From pyscf with Apache License 2.0 | 7 votes |
def gw_comp_veff(self, vext, comega=1j*0.0): """ This computes an effective field (scalar potential) given the external scalar potential as follows: (1-v\chi_{0})V_{eff} = V_{ext} = X_{a}^{n}V_{\mu}^{ab}X_{b}^{m} * v\chi_{0}v * X_{a}^{n}V_{nu}^{ab}X_{b}^{m} returns V_{eff} as list for all n states(self.nn[s]). """ from scipy.sparse.linalg import LinearOperator self.comega_current = comega veff_op = LinearOperator((self.nprod,self.nprod), matvec=self.gw_vext2veffmatvec, dtype=self.dtypeComplex) from scipy.sparse.linalg import lgmres resgm, info = lgmres(veff_op, np.require(vext, dtype=self.dtypeComplex, requirements='C'), atol=self.gw_iter_tol, maxiter=self.maxiter) if info != 0: print("LGMRES has not achieved convergence: exitCode = {}".format(info)) return resgm
Example #2
Source File: linear_operators.py From edm2016 with Apache License 2.0 | 6 votes |
def get_subset_lin_op(lin_op, sub_idx): """ Subset a linear operator to the indices in `sub_idx`. Equivalent to A' = A[sub_idx, :] :param LinearOperator lin_op: input linear operator :param np.ndarray[int] sub_idx: subset index :return: the subset linear operator :rtype: LinearOperator """ if lin_op is None: return None if type(lin_op) is IndexOperator: # subsetting IndexOperator yields a new IndexOperator return IndexOperator(lin_op.index_map[sub_idx], dim_x=lin_op.dim_x) elif isinstance(lin_op, MatrixLinearOperator): # subsetting a matrix multiplication operation yields a new matrix return MatrixLinearOperator(lin_op.A[sub_idx, :]) # in the general case, append a sub-indexing operator return IndexOperator(sub_idx, dim_x=lin_op.shape[0]) * lin_op
Example #3
Source File: lobpcg.py From Splunking-Crime with GNU Affero General Public License v3.0 | 6 votes |
def _makeOperator(operatorInput, expectedShape): """Takes a dense numpy array or a sparse matrix or a function and makes an operator performing matrix * blockvector products. Examples -------- >>> A = _makeOperator( arrayA, (n, n) ) >>> vectorB = A( vectorX ) """ if operatorInput is None: def ident(x): return x operator = LinearOperator(expectedShape, ident, matmat=ident) else: operator = aslinearoperator(operatorInput) if operator.shape != expectedShape: raise ValueError('operator has invalid shape') return operator
Example #4
Source File: tr_interior_point.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def lagrangian_hessian(self, z, v): """Returns scaled Lagrangian Hessian""" # Compute Hessian in relation to x and s Hx = self.lagrangian_hessian_x(z, v) if self.n_ineq > 0: S_Hs_S = self.lagrangian_hessian_s(z, v) # The scaled Lagragian Hessian is: # [ Hx 0 ] # [ 0 S Hs S ] def matvec(vec): vec_x = self.get_variables(vec) vec_s = self.get_slack(vec) if self.n_ineq > 0: return np.hstack((Hx.dot(vec_x), S_Hs_S*vec_s)) else: return Hx.dot(vec_x) return LinearOperator((self.n_vars+self.n_ineq, self.n_vars+self.n_ineq), matvec)
Example #5
Source File: block.py From block with Apache License 2.0 | 6 votes |
def build_full(self, shape, fill_val): m, n = shape if fill_val == 0: return shape else: def matvec(v): return v.sum() * fill_val * np.ones(m) def rmatvec(v): return v.sum() * fill_val * np.ones(n) def matmat(M): return M.sum(axis=0) * fill_val * np.ones((m, M.shape[1])) return sla.LinearOperator(shape=shape, matvec=matvec, rmatvec=rmatvec, matmat=matmat, dtype=self.dtype)
Example #6
Source File: implicit.py From reveal-graph-embedding with Apache License 2.0 | 6 votes |
def get_pagerank_with_teleportation_from_transition_matrix(rw_transition, rw_transition_t, rho): number_of_nodes = rw_transition.shape[0] # Set up the random walk with teleportation matrix. non_teleportation = 1-rho mv = lambda l, v: non_teleportation*l.dot(v) + (rho/number_of_nodes)*np.ones_like(v) teleport = lambda vec: mv(rw_transition_t, vec) rw_transition_operator = spla.LinearOperator(rw_transition.shape, matvec=teleport, dtype=np.float64) # Calculate stationary distribution. try: eigenvalue, stationary_distribution = spla.eigs(rw_transition_operator, k=1, which='LM', return_eigenvectors=True) except spla.ArpackNoConvergence as e: print("ARPACK has not converged.") eigenvalue = e.eigenvalues stationary_distribution = e.eigenvectors stationary_distribution = stationary_distribution.flatten().real/stationary_distribution.sum() return stationary_distribution
Example #7
Source File: lbfgsb.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def todense(self): """Return a dense array representation of this operator. Returns ------- arr : ndarray, shape=(n, n) An array with the same shape and containing the same data represented by this `LinearOperator`. """ s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho I = np.eye(*self.shape, dtype=self.dtype) Hk = I for i in range(n_corrs): A1 = I - s[i][:, np.newaxis] * y[i][np.newaxis, :] * rho[i] A2 = I - y[i][:, np.newaxis] * s[i][np.newaxis, :] * rho[i] Hk = np.dot(A1, np.dot(Hk, A2)) + (rho[i] * s[i][:, np.newaxis] * s[i][np.newaxis, :]) return Hk
Example #8
Source File: common.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def left_multiply(J, d, copy=True): """Compute diag(d) J. If `copy` is False, `J` is modified in place (unless being LinearOperator). """ if copy and not isinstance(J, LinearOperator): J = J.copy() if issparse(J): J.data *= np.repeat(d, np.diff(J.indptr)) # scikit-learn recipe. elif isinstance(J, LinearOperator): J = left_multiplied_operator(J, d) else: J *= d[:, np.newaxis] return J
Example #9
Source File: common.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def right_multiply(J, d, copy=True): """Compute J diag(d). If `copy` is False, `J` is modified in place (unless being LinearOperator). """ if copy and not isinstance(J, LinearOperator): J = J.copy() if issparse(J): J.data *= d.take(J.indices, mode='clip') # scikit-learn recipe. elif isinstance(J, LinearOperator): J = right_multiplied_operator(J, d) else: J *= d return J
Example #10
Source File: common.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def regularized_lsq_operator(J, diag): """Return a matrix arising in regularized least squares as LinearOperator. The matrix is [ J ] [ D ] where D is diagonal matrix with elements from `diag`. """ J = aslinearoperator(J) m, n = J.shape def matvec(x): return np.hstack((J.matvec(x), diag * x)) def rmatvec(x): x1 = x[:m] x2 = x[m:] return J.rmatvec(x1) + diag * x2 return LinearOperator((m + n, n), matvec=matvec, rmatvec=rmatvec)
Example #11
Source File: common.py From lambda-packs with MIT License | 6 votes |
def regularized_lsq_operator(J, diag): """Return a matrix arising in regularized least squares as LinearOperator. The matrix is [ J ] [ D ] where D is diagonal matrix with elements from `diag`. """ J = aslinearoperator(J) m, n = J.shape def matvec(x): return np.hstack((J.matvec(x), diag * x)) def rmatvec(x): x1 = x[:m] x2 = x[m:] return J.rmatvec(x1) + diag * x2 return LinearOperator((m + n, n), matvec=matvec, rmatvec=rmatvec)
Example #12
Source File: common.py From lambda-packs with MIT License | 6 votes |
def right_multiply(J, d, copy=True): """Compute J diag(d). If `copy` is False, `J` is modified in place (unless being LinearOperator). """ if copy and not isinstance(J, LinearOperator): J = J.copy() if issparse(J): J.data *= d.take(J.indices, mode='clip') # scikit-learn recipe. elif isinstance(J, LinearOperator): J = right_multiplied_operator(J, d) else: J *= d return J
Example #13
Source File: dogbox.py From lambda-packs with MIT License | 6 votes |
def lsmr_operator(Jop, d, active_set): """Compute LinearOperator to use in LSMR by dogbox algorithm. `active_set` mask is used to excluded active variables from computations of matrix-vector products. """ m, n = Jop.shape def matvec(x): x_free = x.ravel().copy() x_free[active_set] = 0 return Jop.matvec(x * d) def rmatvec(x): r = d * Jop.rmatvec(x) r[active_set] = 0 return r return LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec, dtype=float)
Example #14
Source File: lobpcg.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def _makeOperator(operatorInput, expectedShape): """Takes a dense numpy array or a sparse matrix or a function and makes an operator performing matrix * blockvector products. Examples -------- >>> A = _makeOperator( arrayA, (n, n) ) >>> vectorB = A( vectorX ) """ if operatorInput is None: def ident(x): return x operator = LinearOperator(expectedShape, ident, matmat=ident) else: operator = aslinearoperator(operatorInput) if operator.shape != expectedShape: raise ValueError('operator has invalid shape') return operator
Example #15
Source File: test_arpack.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def test_eigsh_for_k_greater(): # Test eigsh() for k beyond limits. A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse A = generate_matrix(4, sparse=False) M_dense = generate_matrix_symmetric(4, pos_definite=True) M_sparse = generate_matrix_symmetric(4, pos_definite=True, sparse=True) M_linop = aslinearoperator(M_dense) eig_tuple1 = eigh(A, b=M_dense) eig_tuple2 = eigh(A, b=M_sparse) with suppress_warnings() as sup: sup.filter(RuntimeWarning) assert_equal(eigsh(A, M=M_dense, k=4), eig_tuple1) assert_equal(eigsh(A, M=M_dense, k=5), eig_tuple1) assert_equal(eigsh(A, M=M_sparse, k=5), eig_tuple2) # M as LinearOperator assert_raises(TypeError, eigsh, A, M=M_linop, k=4) # Test 'A' for different types assert_raises(TypeError, eigsh, aslinearoperator(A), k=4) assert_raises(TypeError, eigsh, A_sparse, M=M_dense, k=4)
Example #16
Source File: test_arpack.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def test_eigs_for_k_greater(): # Test eigs() for k beyond limits. A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse A = generate_matrix(4, sparse=False) M_dense = np.random.random((4, 4)) M_sparse = generate_matrix(4, sparse=True) M_linop = aslinearoperator(M_dense) eig_tuple1 = eig(A, b=M_dense) eig_tuple2 = eig(A, b=M_sparse) with suppress_warnings() as sup: sup.filter(RuntimeWarning) assert_equal(eigs(A, M=M_dense, k=3), eig_tuple1) assert_equal(eigs(A, M=M_dense, k=4), eig_tuple1) assert_equal(eigs(A, M=M_dense, k=5), eig_tuple1) assert_equal(eigs(A, M=M_sparse, k=5), eig_tuple2) # M as LinearOperator assert_raises(TypeError, eigs, A, M=M_linop, k=3) # Test 'A' for different types assert_raises(TypeError, eigs, aslinearoperator(A), k=3) assert_raises(TypeError, eigs, A_sparse, k=3)
Example #17
Source File: tr_interior_point.py From ip-nonlinear-solver with BSD 3-Clause "New" or "Revised" License | 6 votes |
def lagrangian_hessian(self, z, v): """Returns scaled Lagrangian Hessian""" # Compute Hessian in relation to x and s Hx = self.lagrangian_hessian_x(z, v) if self.n_ineq > 0: S_Hs_S = self.lagrangian_hessian_s(z, v) # The scaled Lagragian Hessian is: # [ Hx 0 ] # [ 0 S Hs S ] def matvec(vec): vec_x = self.get_variables(vec) vec_s = self.get_slack(vec) if self.n_ineq > 0: return np.hstack((Hx.dot(vec_x), S_Hs_S*vec_s)) else: return Hx.dot(vec_x) return LinearOperator((self.n_vars+self.n_ineq, self.n_vars+self.n_ineq), matvec)
Example #18
Source File: test_iterative.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def _check_reentrancy(solver, is_reentrant): def matvec(x): A = np.array([[1.0, 0, 0], [0, 2.0, 0], [0, 0, 3.0]]) y, info = solver(A, x) assert_equal(info, 0) return y b = np.array([1, 1./2, 1./3]) op = LinearOperator((3, 3), matvec=matvec, rmatvec=matvec, dtype=b.dtype) if not is_reentrant: assert_raises(RuntimeError, solver, op, b) else: y, info = solver(op, b) assert_equal(info, 0) assert_allclose(y, [1, 1, 1])
Example #19
Source File: test_iterative.py From Computable with MIT License | 6 votes |
def _check_reentrancy(solver, is_reentrant): def matvec(x): A = np.array([[1.0, 0, 0], [0, 2.0, 0], [0, 0, 3.0]]) y, info = solver(A, x) assert_equal(info, 0) return y b = np.array([1, 1./2, 1./3]) op = LinearOperator((3, 3), matvec=matvec, rmatvec=matvec, dtype=b.dtype) if not is_reentrant: assert_raises(RuntimeError, solver, op, b) else: y, info = solver(op, b) assert_equal(info, 0) assert_allclose(y, [1, 1, 1]) #------------------------------------------------------------------------------
Example #20
Source File: linear_operators.py From edm2016 with Apache License 2.0 | 6 votes |
def rmatvec_nd(lin_op, x): """ Project a 1D or 2D numpy or sparse array using rmatvec. This is different from rmatvec because it applies rmatvec to each row and column. If x is n x n and lin_op is n x k, the result will be k x k. :param LinearOperator lin_op: The linear operator to apply to x :param np.ndarray|sp.spmatrix x: array/matrix to be projected :return: the projected array :rtype: np.ndarray|sp.spmatrix """ if x is None or lin_op is None: return x if isinstance(x, sp.spmatrix): y = x.toarray() elif np.isscalar(x): y = np.array(x, ndmin=1) else: y = np.copy(x) proj_func = lambda z: lin_op.rmatvec(z) for j in range(y.ndim): if y.shape[j] == lin_op.shape[0]: y = np.apply_along_axis(proj_func, j, y) return y
Example #21
Source File: common.py From lambda-packs with MIT License | 6 votes |
def left_multiply(J, d, copy=True): """Compute diag(d) J. If `copy` is False, `J` is modified in place (unless being LinearOperator). """ if copy and not isinstance(J, LinearOperator): J = J.copy() if issparse(J): J.data *= np.repeat(d, np.diff(J.indptr)) # scikit-learn recipe. elif isinstance(J, LinearOperator): J = left_multiplied_operator(J, d) else: J *= d[:, np.newaxis] return J
Example #22
Source File: lobpcg.py From lambda-packs with MIT License | 6 votes |
def _makeOperator(operatorInput, expectedShape): """Takes a dense numpy array or a sparse matrix or a function and makes an operator performing matrix * blockvector products. Examples -------- >>> A = _makeOperator( arrayA, (n, n) ) >>> vectorB = A( vectorX ) """ if operatorInput is None: def ident(x): return x operator = LinearOperator(expectedShape, ident, matmat=ident) else: operator = aslinearoperator(operatorInput) if operator.shape != expectedShape: raise ValueError('operator has invalid shape') return operator
Example #23
Source File: common.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def evaluate_quadratic(J, g, s, diag=None): """Compute values of a quadratic function arising in least squares. The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s. Parameters ---------- J : ndarray, sparse matrix or LinearOperator, shape (m, n) Jacobian matrix, affects the quadratic term. g : ndarray, shape (n,) Gradient, defines the linear term. s : ndarray, shape (k, n) or (n,) Array containing steps as rows. diag : ndarray, shape (n,), optional Addition diagonal part, affects the quadratic term. If None, assumed to be 0. Returns ------- values : ndarray with shape (k,) or float Values of the function. If `s` was 2-dimensional then ndarray is returned, otherwise float is returned. """ if s.ndim == 1: Js = J.dot(s) q = np.dot(Js, Js) if diag is not None: q += np.dot(s * diag, s) else: Js = J.dot(s.T) q = np.sum(Js**2, axis=0) if diag is not None: q += np.sum(diag * s**2, axis=1) l = np.dot(s, g) return 0.5 * q + l # Utility functions to work with bound constraints.
Example #24
Source File: parametrization.py From spins-b with GNU General Public License v3.0 | 5 votes |
def calculate_gradient(self) -> LinearOperator: """Calculates the gradient of the parametrization. Note that implementations should consider caching the gradient if the operation is expensive. Returns: A linear operator that represents the Jacobian of the parametrization. """ raise NotImplementedError('calculate_gradient not defined')
Example #25
Source File: test_iterative.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def check_precond_dummy(solver, case): tol = 1e-8 def identity(b,which=None): """trivial preconditioner""" return b A = case.A M,N = A.shape D = spdiags([1.0/A.diagonal()], [0], M, N) b = arange(A.shape[0], dtype=float) x0 = 0*b precond = LinearOperator(A.shape, identity, rmatvec=identity) if solver is qmr: x, info = solver(A, b, M1=precond, M2=precond, x0=x0, tol=tol) else: x, info = solver(A, b, M=precond, x0=x0, tol=tol) assert_equal(info,0) assert_normclose(A.dot(x), b, tol) A = aslinearoperator(A) A.psolve = identity A.rpsolve = identity x, info = solver(A, b, x0=x0, tol=tol) assert_equal(info,0) assert_normclose(A*x, b, tol=tol)
Example #26
Source File: linalg.py From RBF with MIT License | 5 votes |
def __init__(self, A, drop_tol=0.005, fill_factor=2.0, normalize_inplace=False): # the spilu and gmres functions are most efficient with csc sparse. If the # matrix is already csc then this will do nothing A = sp.csc_matrix(A) n = row_norms(A) if normalize_inplace: divide_rows(A, n, inplace=True) else: A = divide_rows(A, n, inplace=False).tocsc() LOGGER.debug( 'computing the ILU decomposition of a %s by %s sparse matrix with %s ' 'nonzeros ' % (A.shape + (A.nnz,))) ilu = spla.spilu( A, drop_rule='basic', drop_tol=drop_tol, fill_factor=fill_factor) LOGGER.debug('done') M = spla.LinearOperator(A.shape, ilu.solve) self.A = A self.M = M self.n = n
Example #27
Source File: optim.py From imitation with MIT License | 5 votes |
def ngstep(x0, obj0, objgrad0, obj_and_kl_func, hvpx0_func, max_kl, damping, max_cg_iter, enable_bt): ''' Natural gradient step using hessian-vector products Args: x0: current point obj0: objective value at x0 objgrad0: grad of objective value at x0 obj_and_kl_func: function mapping a point x to the objective and kl values hvpx0_func: function mapping a vector v to the KL Hessian-vector product H(x0)v max_kl: max kl divergence limit. Triggers a line search. damping: multiple of I to mix with Hessians for Hessian-vector products max_cg_iter: max conjugate gradient iterations for solving for natural gradient step ''' assert x0.ndim == 1 and x0.shape == objgrad0.shape # Solve for step direction damped_hvp_func = lambda v: hvpx0_func(v) + damping*v hvpop = ssl.LinearOperator(shape=(x0.shape[0], x0.shape[0]), matvec=damped_hvp_func) step, _ = ssl.cg(hvpop, -objgrad0, maxiter=max_cg_iter) fullstep = step / np.sqrt(.5 * step.dot(damped_hvp_func(step)) / max_kl + 1e-8) # Line search on objective with a hard KL wall if not enable_bt: return x0+fullstep, 0 def barrierobj(p): obj, kl = obj_and_kl_func(p) return np.inf if kl > 2*max_kl else obj xnew, num_bt_steps = btlinesearch( f=barrierobj, x0=x0, fx0=obj0, g=objgrad0, dx=fullstep, accept_ratio=.1, shrink_factor=.5, max_steps=10) return xnew, num_bt_steps
Example #28
Source File: spherically_project.py From FastSurfer with Apache License 2.0 | 5 votes |
def laplaceTria(v, t, k=10): """ Compute linear finite-element method Laplace-Beltrami spectrum """ from scipy.sparse.linalg import LinearOperator, eigsh, splu useCholmod = True try: from sksparse.cholmod import cholesky except ImportError: useCholmod = False if useCholmod: print("Solver: cholesky decomp - performance optimal ...") else: print("Package scikit-sparse not found (Cholesky decomp)") print("Solver: spsolve (LU decomp) - performance not optimal ...") # import numpy as np # from shapeDNA import computeABtria A, M = computeABtria(v, t, lump=True) # turns out it is much faster to use cholesky and pass operator sigma = -0.01 if useCholmod: chol = cholesky(A - sigma * M) OPinv = LinearOperator(matvec=chol, shape=A.shape, dtype=A.dtype) else: lu = splu(A - sigma * M) OPinv = LinearOperator(matvec=lu.solve, shape=A.shape, dtype=A.dtype) eigenvalues, eigenvectors = eigsh(A, k, M, sigma=sigma, OPinv=OPinv) return eigenvalues, eigenvectors
Example #29
Source File: common.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def evaluate_quadratic(J, g, s, diag=None): """Compute values of a quadratic function arising in least squares. The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s. Parameters ---------- J : ndarray, sparse matrix or LinearOperator, shape (m, n) Jacobian matrix, affects the quadratic term. g : ndarray, shape (n,) Gradient, defines the linear term. s : ndarray, shape (k, n) or (n,) Array containing steps as rows. diag : ndarray, shape (n,), optional Addition diagonal part, affects the quadratic term. If None, assumed to be 0. Returns ------- values : ndarray with shape (k,) or float Values of the function. If `s` was 2-dimensional then ndarray is returned, otherwise float is returned. """ if s.ndim == 1: Js = J.dot(s) q = np.dot(Js, Js) if diag is not None: q += np.dot(s * diag, s) else: Js = J.dot(s.T) q = np.sum(Js**2, axis=0) if diag is not None: q += np.sum(diag * s**2, axis=1) l = np.dot(s, g) return 0.5 * q + l # Utility functions to work with bound constraints.
Example #30
Source File: test_iterative.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_leftright_precond(self): """Check that QMR works with left and right preconditioners""" from scipy.sparse.linalg.dsolve import splu from scipy.sparse.linalg.interface import LinearOperator n = 100 dat = ones(n) A = spdiags([-2*dat, 4*dat, -dat], [-1,0,1],n,n) b = arange(n,dtype='d') L = spdiags([-dat/2, dat], [-1,0], n, n) U = spdiags([4*dat, -dat], [0,1], n, n) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "splu requires CSC matrix format") L_solver = splu(L) U_solver = splu(U) def L_solve(b): return L_solver.solve(b) def U_solve(b): return U_solver.solve(b) def LT_solve(b): return L_solver.solve(b,'T') def UT_solve(b): return U_solver.solve(b,'T') M1 = LinearOperator((n,n), matvec=L_solve, rmatvec=LT_solve) M2 = LinearOperator((n,n), matvec=U_solve, rmatvec=UT_solve) with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") x,info = qmr(A, b, tol=1e-8, maxiter=15, M1=M1, M2=M2) assert_equal(info,0) assert_normclose(A*x, b, tol=1e-8)