Python scipy.sparse.SparseEfficiencyWarning() Examples
The following are 21
code examples of scipy.sparse.SparseEfficiencyWarning().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
scipy.sparse
, or try the search function
.
Example #1
Source File: test_linsolve.py From Computable with MIT License | 6 votes |
def test_example_comparison(self): with warnings.catch_warnings(): warnings.simplefilter("ignore", category=SparseEfficiencyWarning) row = array([0,0,1,2,2,2]) col = array([0,2,2,0,1,2]) data = array([1,2,3,-4,5,6]) sM = csr_matrix((data,(row,col)), shape=(3,3), dtype=float) M = sM.todense() row = array([0,0,1,1,0,0]) col = array([0,2,1,1,0,0]) data = array([1,1,1,1,1,1]) sN = csr_matrix((data, (row,col)), shape=(3,3), dtype=float) N = sN.todense() sX = spsolve(sM, sN) X = scipy.linalg.solve(M, N) assert_array_almost_equal(X, sX.todense())
Example #2
Source File: test_expm_multiply.py From Computable with MIT License | 6 votes |
def test_sparse_expm_multiply_interval(self): with warnings.catch_warnings(): warnings.simplefilter("ignore", category=SparseEfficiencyWarning) np.random.seed(1234) start = 0.1 stop = 3.2 n = 40 k = 3 endpoint = True for num in (14, 13, 2): A = scipy.sparse.rand(n, n, density=0.05) B = np.random.randn(n, k) v = np.random.randn(n) for target in (B, v): X = expm_multiply(A, target, start=start, stop=stop, num=num, endpoint=endpoint) samples = np.linspace(start=start, stop=stop, num=num, endpoint=endpoint) for solution, t in zip(X, samples): assert_allclose(solution, scipy.linalg.expm(t*A).dot(target))
Example #3
Source File: test_expm_multiply.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def test_sparse_expm_multiply_interval(self): np.random.seed(1234) start = 0.1 stop = 3.2 n = 40 k = 3 endpoint = True for num in (14, 13, 2): A = scipy.sparse.rand(n, n, density=0.05) B = np.random.randn(n, k) v = np.random.randn(n) for target in (B, v): X = expm_multiply(A, target, start=start, stop=stop, num=num, endpoint=endpoint) samples = np.linspace(start=start, stop=stop, num=num, endpoint=endpoint) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "splu requires CSC matrix format") sup.filter(SparseEfficiencyWarning, "spsolve is more efficient when sparse b is in the CSC matrix format") for solution, t in zip(X, samples): assert_allclose(solution, scipy.linalg.expm(t*A).dot(target))
Example #4
Source File: test_expm_multiply.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def test_sparse_expm_multiply(self): np.random.seed(1234) n = 40 k = 3 nsamples = 10 for i in range(nsamples): A = scipy.sparse.rand(n, n, density=0.05) B = np.random.randn(n, k) observed = expm_multiply(A, B) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "splu requires CSC matrix format") sup.filter(SparseEfficiencyWarning, "spsolve is more efficient when sparse b is in the CSC matrix format") expected = scipy.linalg.expm(A).dot(B) assert_allclose(observed, expected)
Example #5
Source File: test_matfuncs.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_padecases_dtype_sparse_float(self): # float32 and complex64 lead to errors in spsolve/UMFpack dtype = np.float64 for scale in [1e-2, 1e-1, 5e-1, 1, 10]: a = scale * speye(3, 3, dtype=dtype, format='csc') e = exp(scale) * eye(3, dtype=dtype) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a csc_matrix is expensive.") exact_onenorm = _expm(a, use_exact_onenorm=True).toarray() inexact_onenorm = _expm(a, use_exact_onenorm=False).toarray() assert_array_almost_equal_nulp(exact_onenorm, e, nulp=100) assert_array_almost_equal_nulp(inexact_onenorm, e, nulp=100)
Example #6
Source File: similarity.py From polara with MIT License | 5 votes |
def set_diagonal_values(mat, val=1): # disable warning when setting diagonal elements of sparse similarity matrix with warnings.catch_warnings(): warnings.simplefilter('ignore', category=SparseEfficiencyWarning) mat.setdiag(val)
Example #7
Source File: test_input_b.py From PyPardisoProject with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_input_b_sparse(): A, b = create_test_A_b_rand() bsparse = sp.csr_matrix(b) with pytest.warns(SparseEfficiencyWarning): x = ps.solve(A, bsparse) np.testing.assert_array_almost_equal(A*x, b)
Example #8
Source File: matrix.py From scprep with GNU General Public License v3.0 | 5 votes |
def _no_warning_dia_matrix(*args, **kwargs): """Helper function to silently create diagonal matrix""" with warnings.catch_warnings(): warnings.filterwarnings( "ignore", category=sparse.SparseEfficiencyWarning, message="Constructing a DIA matrix with [0-9]*" " diagonals is inefficient", ) return sparse.dia_matrix(*args, **kwargs)
Example #9
Source File: test_umfpack.py From scikit-umfpack with BSD 3-Clause "New" or "Revised" License | 5 votes |
def setUp(self): self.mgr = warnings.catch_warnings() self.mgr.__enter__() warnings.simplefilter('ignore', SparseEfficiencyWarning)
Example #10
Source File: test_interface.py From scikit-umfpack with BSD 3-Clause "New" or "Revised" License | 5 votes |
def setUp(self): self.a = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5) self.b = np.array([1, 2, 3, 4, 5], dtype=np.float64) self.b2 = np.array([5, 4, 3, 2, 1], dtype=np.float64) self.mgr = warnings.catch_warnings() self.mgr.__enter__() warnings.simplefilter('ignore', SparseEfficiencyWarning)
Example #11
Source File: test_m_phate.py From m-phate with GNU General Public License v3.0 | 5 votes |
def test_diagonalize_interslice_kernels(): n = 15 m = 8 kernels = [np.arange(n**2).reshape(n,n) + i*n**2 for i in range(m)] with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=sparse.SparseEfficiencyWarning) K = m_phate.kernel._diagonalize_interslice_kernels(kernels, method='csr') D = m_phate.kernel._diagonalize_interslice_kernels(kernels, method='dia') assert (D.tocsr() - K).nnz == 0
Example #12
Source File: test_matfuncs.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_padecases_dtype_sparse_complex(self): # float32 and complex64 lead to errors in spsolve/UMFpack dtype = np.complex128 for scale in [1e-2, 1e-1, 5e-1, 1, 10]: a = scale * speye(3, 3, dtype=dtype, format='csc') e = exp(scale) * eye(3, dtype=dtype) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a csc_matrix is expensive.") assert_array_almost_equal_nulp(expm(a).toarray(), e, nulp=100)
Example #13
Source File: test_linsolve.py From Computable with MIT License | 5 votes |
def test_non_square(self): with warnings.catch_warnings(): warnings.simplefilter("ignore", category=SparseEfficiencyWarning) # A is not square. A = ones((3, 4)) b = ones((4, 1)) assert_raises(ValueError, spsolve, A, b) # A2 and b2 have incompatible shapes. A2 = csc_matrix(eye(3)) b2 = array([1.0, 2.0]) assert_raises(ValueError, spsolve, A2, b2)
Example #14
Source File: test_iterative.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_leftright_precond(self): """Check that QMR works with left and right preconditioners""" from scipy.sparse.linalg.dsolve import splu from scipy.sparse.linalg.interface import LinearOperator n = 100 dat = ones(n) A = spdiags([-2*dat, 4*dat, -dat], [-1,0,1],n,n) b = arange(n,dtype='d') L = spdiags([-dat/2, dat], [-1,0], n, n) U = spdiags([4*dat, -dat], [0,1], n, n) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "splu requires CSC matrix format") L_solver = splu(L) U_solver = splu(U) def L_solve(b): return L_solver.solve(b) def U_solve(b): return U_solver.solve(b) def LT_solve(b): return L_solver.solve(b,'T') def UT_solve(b): return U_solver.solve(b,'T') M1 = LinearOperator((n,n), matvec=L_solve, rmatvec=LT_solve) M2 = LinearOperator((n,n), matvec=U_solve, rmatvec=UT_solve) with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") x,info = qmr(A, b, tol=1e-8, maxiter=15, M1=M1, M2=M2) assert_equal(info,0) assert_normclose(A*x, b, tol=1e-8)
Example #15
Source File: test_extmath.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_randomized_svd_sparse_warnings(): # randomized_svd throws a warning for lil and dok matrix rng = np.random.RandomState(42) X = make_low_rank_matrix(50, 20, effective_rank=10, random_state=rng) n_components = 5 for cls in (sparse.lil_matrix, sparse.dok_matrix): X = cls(X) assert_warns_message( sparse.SparseEfficiencyWarning, "Calculating SVD of a {} is expensive. " "csr_matrix is more efficient.".format(cls.__name__), randomized_svd, X, n_components, n_iter=1, power_iteration_normalizer='none')
Example #16
Source File: test_expm_multiply.py From Computable with MIT License | 5 votes |
def test_sparse_expm_multiply(self): with warnings.catch_warnings(): warnings.simplefilter("ignore", category=SparseEfficiencyWarning) np.random.seed(1234) n = 40 k = 3 nsamples = 10 for i in range(nsamples): A = scipy.sparse.rand(n, n, density=0.05) B = np.random.randn(n, k) observed = expm_multiply(A, B) expected = scipy.linalg.expm(A).dot(B) assert_allclose(observed, expected)
Example #17
Source File: test_matfuncs.py From Computable with MIT License | 5 votes |
def test_padecases_dtype_sparse_complex(self): # float32 and complex64 lead to errors in spsolve/UMFpack dtype = np.complex128 with warnings.catch_warnings(): warnings.simplefilter("ignore", category=SparseEfficiencyWarning) for scale in [1e-2, 1e-1, 5e-1, 1, 10]: a = scale * speye(3, 3, dtype=dtype, format='csc') e = exp(scale) * eye(3, dtype=dtype) assert_array_almost_equal_nulp(expm(a).toarray(), e, nulp=100)
Example #18
Source File: test_iterative.py From Computable with MIT License | 5 votes |
def test_leftright_precond(self): """Check that QMR works with left and right preconditioners""" with warnings.catch_warnings(): warnings.simplefilter("ignore", category=SparseEfficiencyWarning) from scipy.sparse.linalg.dsolve import splu from scipy.sparse.linalg.interface import LinearOperator n = 100 dat = ones(n) A = spdiags([-2*dat, 4*dat, -dat], [-1,0,1],n,n) b = arange(n,dtype='d') L = spdiags([-dat/2, dat], [-1,0], n, n) U = spdiags([4*dat, -dat], [0,1], n, n) L_solver = splu(L) U_solver = splu(U) def L_solve(b): return L_solver.solve(b) def U_solve(b): return U_solver.solve(b) def LT_solve(b): return L_solver.solve(b,'T') def UT_solve(b): return U_solver.solve(b,'T') M1 = LinearOperator((n,n), matvec=L_solve, rmatvec=LT_solve) M2 = LinearOperator((n,n), matvec=U_solve, rmatvec=UT_solve) x,info = qmr(A, b, tol=1e-8, maxiter=15, M1=M1, M2=M2) assert_equal(info,0) assert_normclose(A*x, b, tol=1e-8)
Example #19
Source File: test_linsolve.py From Computable with MIT License | 5 votes |
def test_spilu_smoketest(self): with warnings.catch_warnings(): warnings.simplefilter("ignore", category=SparseEfficiencyWarning) # Check that spilu works at all x = random.rand(self.n) lu = spilu(self.A, drop_tol=1e-2, fill_factor=5) r = self.A*lu.solve(x) assert_(abs(x - r).max() < 1e-2) assert_(abs(x - r).max() > 1e-5)
Example #20
Source File: test_linsolve.py From Computable with MIT License | 5 votes |
def test_splu_smoketest(self): with warnings.catch_warnings(): warnings.simplefilter("ignore", category=SparseEfficiencyWarning) # Check that splu works at all x = random.rand(self.n) lu = splu(self.A) r = self.A*lu.solve(x) assert_(abs(x - r).max() < 1e-13)
Example #21
Source File: permutation_importance.py From interpret-community with MIT License | 4 votes |
def _compute_sparse_metric(self, dataset, col_idx, subset_idx, random_indexes, shuffled_dataset, predict_function, true_labels, base_metric, global_importance_values): """Shuffle a sparse dataset column and compute the feature importance metric. :param dataset: Dataset used as a reference point for getting column indexes per row. :type dataset: scipy.csc :param col_idx: The column index. :type col_idx: int :param subset_idx: The subset index. :type subset_idx: int :param random_indexes: Generated random indexes. :type random_indexes: numpy.ndarray :param shuffled_dataset: The dataset to shuffle. :type shuffled_dataset: scipy.csr :param predict_function: The prediction function. :type predict_function: function :param true_labels: The true labels. :type true_labels: numpy.ndarray :param base_metric: Base metric for unshuffled dataset. :type base_metric: float :param global_importance_values: Pre-allocated array of global importance values. :type global_importance_values: numpy.ndarray """ # Get non zero column indexes indptr = dataset.indptr indices = dataset.indices col_nz_indices = indices[indptr[col_idx]:indptr[col_idx + 1]] # Sparse optimization: If all zeros, skip the column! Shuffling won't make a difference to metric. if col_nz_indices.size == 0: return data = dataset.data # Replace non-zero indexes with shuffled indexes col_random_indexes = random_indexes[0:len(col_nz_indices)] with warnings.catch_warnings(): warnings.simplefilter('ignore', SparseEfficiencyWarning) # Shuffle the sparse column indexes shuffled_dataset[col_random_indexes, col_idx] = shuffled_dataset[col_nz_indices, col_idx].T # Get set difference and zero-out indexes that had a value but now should be zero difference_nz_random = list(set(col_nz_indices).difference(set(col_random_indexes))) difference_random_nz = list(set(col_random_indexes).difference(set(col_nz_indices))) # Set values that should not be sparse explicitly to zeros shuffled_dataset[difference_nz_random, col_idx] = np.zeros((len(difference_nz_random)), dtype=data.dtype) if self.explain_subset: idx = subset_idx else: idx = col_idx self._add_metric(predict_function, shuffled_dataset, true_labels, base_metric, global_importance_values, idx) # Restore column back to previous state by undoing shuffle shuffled_dataset[col_nz_indices, col_idx] = shuffled_dataset[col_random_indexes, col_idx].T shuffled_dataset[difference_random_nz, col_idx] = np.zeros((len(difference_random_nz)), dtype=data.dtype)