Python scipy.sparse.dia_matrix() Examples
The following are 30
code examples of scipy.sparse.dia_matrix().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
scipy.sparse
, or try the search function
.
Example #1
Source File: bicluster.py From twitter-stock-recommendation with MIT License | 6 votes |
def _scale_normalize(X): """Normalize ``X`` by scaling rows and columns independently. Returns the normalized matrix and the row and column scaling factors. """ X = make_nonnegative(X) row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze() col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze() row_diag = np.where(np.isnan(row_diag), 0, row_diag) col_diag = np.where(np.isnan(col_diag), 0, col_diag) if issparse(X): n_rows, n_cols = X.shape r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows)) c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols)) an = r * X * c else: an = row_diag[:, np.newaxis] * X * col_diag return an, row_diag, col_diag
Example #2
Source File: reranking.py From Landmark2019-1st-and-3rd-Place-Solution with Apache License 2.0 | 6 votes |
def get_laplacian(self, sims, ids, alpha=0.99): """Get Laplacian_alpha matrix """ logger.info('get_affinity') affinity = self.get_affinity(sims, ids) logger.info('get_affinity ... done') num = affinity.shape[0] degrees = affinity @ np.ones(num) + 1e-12 # mat: degree matrix ^ (-1/2) mat = sparse.dia_matrix( (degrees ** (-0.5), [0]), shape=(num, num), dtype=np.float32) logger.info('calc stochastic = mat @ affinity @ mat') stochastic = mat @ affinity @ mat sparse_eye = sparse.dia_matrix( (np.ones(num), [0]), shape=(num, num), dtype=np.float32) lap_alpha = sparse_eye - alpha * stochastic return lap_alpha # @cache('affinity.jbl')
Example #3
Source File: kernel.py From m-phate with GNU General Public License v3.0 | 6 votes |
def _diagonalize_interslice_kernels(interslice_kernels, method='dia'): n = interslice_kernels[0].shape[0] m = len(interslice_kernels) if method == 'dia': diags = np.zeros((2*n-1, n*m)) for vertex, A in enumerate(interslice_kernels): diags[(np.tile(np.arange(n, 2*n), n) - np.repeat(np.arange(1, n+1), n), np.tile(np.arange(0, n*m, m), n) + vertex)] = A.flatten() offsets = np.arange(-n*m + m, n*m, m) # set main diagonal to 0 diags[offsets == 0] = 0 K = sparse.dia_matrix((diags, offsets), shape=(n*m, n*m)) else: assert method == 'csr' # this is inefficient K = sparse.csr_matrix((n*m, n*m)) for vertex, A in enumerate(interslice_kernels): K = graphtools.matrix.set_submatrix( K, np.arange(n) * m + vertex, np.arange(n) * m + vertex, A) # set main diagonal to 0 K = graphtools.matrix.set_diagonal(K, 0) return K
Example #4
Source File: channel.py From Pruned-DFT-s-FBMC_Python with GNU General Public License v3.0 | 6 votes |
def new_realization(self): """ Generate a new channel realization and save the time-variant convolution matrix in "self.imp_convolution_matrix" """ if self.imp_pdp_string=='AWGN': impulse_response_squeezed = np.ones((self.nr_samples,1)) elif self.imp_pdp_string=='Flat': h = np.random.randn(1) + 1j * np.random.randn(1) impulse_response_squeezed = h * np.ones((self.nr_samples,1)) else: impulse_response_squeezed = np.zeros((self.nr_samples,self.imp_pdp_index.size), dtype=np.complex) t = np.arange(self.nr_samples).reshape(self.nr_samples,1)*self.phy_dt # Use a for loop because it is more memory efficient (but takes longer) for i in range(self.imp_nr_paths_wssus): doppler_shifts = np.cos(np.random.random((1,self.imp_pdp_index.size))*2*np.pi) * self.phy_max_doppler_shift random_phase = np.random.random((1,self.imp_pdp_index.size)) argument = 2 * np.pi * (random_phase + doppler_shifts*t) impulse_response_squeezed += np.cos(argument) + 1j*np.sin(argument) impulse_response_squeezed *= np.sqrt(self.imp_pdp[self.imp_pdp_index]/self.imp_nr_paths_wssus) self.imp_convolution_matrix = sparse.dia_matrix((np.transpose(impulse_response_squeezed), -self.imp_pdp_index), shape=(self.nr_samples, self.nr_samples)).tocsr()
Example #5
Source File: bicluster.py From Splunking-Crime with GNU Affero General Public License v3.0 | 6 votes |
def _scale_normalize(X): """Normalize ``X`` by scaling rows and columns independently. Returns the normalized matrix and the row and column scaling factors. """ X = make_nonnegative(X) row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze() col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze() row_diag = np.where(np.isnan(row_diag), 0, row_diag) col_diag = np.where(np.isnan(col_diag), 0, col_diag) if issparse(X): n_rows, n_cols = X.shape r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows)) c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols)) an = r * X * c else: an = row_diag[:, np.newaxis] * X * col_diag return an, row_diag, col_diag
Example #6
Source File: zeropi.py From scqubits with BSD 3-Clause "New" or "Revised" License | 6 votes |
def sparse_kinetic_mat(self): """ Kinetic energy portion of the Hamiltonian. TODO: update this method to use single-variable operator methods Returns ------- scipy.sparse.csc_matrix matrix representing the kinetic energy operator """ pt_count = self.grid.pt_count dim_theta = 2 * self.ncut + 1 identity_phi = sparse.identity(pt_count, format='csc', dtype=np.complex_) identity_theta = sparse.identity(dim_theta, format='csc', dtype=np.complex_) kinetic_matrix_phi = self.grid.second_derivative_matrix(prefactor=-2.0 * self.ECJ) diag_elements = 2.0 * self.ECS * np.square(np.arange(-self.ncut + self.ng, self.ncut + 1 + self.ng)) kinetic_matrix_theta = sparse.dia_matrix((diag_elements, [0]), shape=(dim_theta, dim_theta)).tocsc() kinetic_matrix = (sparse.kron(kinetic_matrix_phi, identity_theta, format='csc') + sparse.kron(identity_phi, kinetic_matrix_theta, format='csc')) kinetic_matrix -= 2.0 * self.ECS * self.dCJ * self.i_d_dphi_operator() * self.n_theta_operator() return kinetic_matrix
Example #7
Source File: zeropi_full.py From scqubits with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _zeropi_operator_in_product_basis(self, zeropi_operator, zeropi_evecs=None): """Helper method that converts a zeropi operator into one in the product basis. Returns ------- scipy.sparse.csc_matrix operator written in the product basis """ zeropi_dim = self.zeropi_cutoff zeta_dim = self.zeta_cutoff if zeropi_evecs is None: _, zeropi_evecs = self._zeropi.eigensys(evals_count=zeropi_dim) op_eigen_basis = sparse.dia_matrix((zeropi_dim, zeropi_dim), dtype=np.complex_) # is this guaranteed to be zero? op_zeropi = spec_utils.get_matrixelement_table(zeropi_operator, zeropi_evecs) for n in range(zeropi_dim): for m in range(zeropi_dim): op_eigen_basis += op_zeropi[n, m] * op.hubbard_sparse(n, m, zeropi_dim) return sparse.kron(op_eigen_basis, sparse.identity(zeta_dim, format='csc', dtype=np.complex_), format='csc')
Example #8
Source File: bicluster.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def _scale_normalize(X): """Normalize ``X`` by scaling rows and columns independently. Returns the normalized matrix and the row and column scaling factors. """ X = make_nonnegative(X) row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze() col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze() row_diag = np.where(np.isnan(row_diag), 0, row_diag) col_diag = np.where(np.isnan(col_diag), 0, col_diag) if issparse(X): n_rows, n_cols = X.shape r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows)) c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols)) an = r * X * c else: an = row_diag[:, np.newaxis] * X * col_diag return an, row_diag, col_diag
Example #9
Source File: diffusion.py From diffusion with MIT License | 6 votes |
def get_laplacian(self, sims, ids, alpha=0.99): """Get Laplacian_alpha matrix """ affinity = self.get_affinity(sims, ids) num = affinity.shape[0] degrees = affinity @ np.ones(num) + 1e-12 # mat: degree matrix ^ (-1/2) mat = sparse.dia_matrix( (degrees ** (-0.5), [0]), shape=(num, num), dtype=np.float32) stochastic = mat @ affinity @ mat sparse_eye = sparse.dia_matrix( (np.ones(num), [0]), shape=(num, num), dtype=np.float32) lap_alpha = sparse_eye - alpha * stochastic return lap_alpha # @cache('affinity.jbl')
Example #10
Source File: laplacian.py From reveal-graph-embedding with Apache License 2.0 | 5 votes |
def get_normalized_laplacian(adjacency_matrix): # Calculate diagonal matrix of node degrees. degree = spsp.dia_matrix((adjacency_matrix.sum(axis=0), np.array([0])), shape=adjacency_matrix.shape) degree = degree.tocsr() # Calculate sparse graph Laplacian. adjacency_matrix = spsp.csr_matrix(-adjacency_matrix + degree, dtype=np.float64) # Calculate inverse square root of diagonal matrix of node degrees. degree.data = np.real(1/np.sqrt(degree.data)) # Calculate sparse normalized graph Laplacian. normalized_laplacian = degree*adjacency_matrix*degree return normalized_laplacian
Example #11
Source File: randomized_lasso.py From stability-selection with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _rescale_data(X, weights): if issparse(X): size = weights.shape[0] weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size)) X_rescaled = X * weight_dia else: X_rescaled = X * (1 - weights) return X_rescaled
Example #12
Source File: test_composite_parametrization.py From spins-b with GNU General Public License v3.0 | 5 votes |
def calculate_gradient(self) -> sparse.dia_matrix: return 2 * self._scale * sparse.diags( self._vector[:-1], shape=(self._vector.size - 1, self._vector.size))
Example #13
Source File: utils.py From scprep with GNU General Public License v3.0 | 5 votes |
def matrix_min(data): """Get the minimum value from a data matrix. Pandas SparseDataFrame does not handle np.min. Parameters ---------- data : array-like, shape=[n_samples, n_features] Input data Returns ------- minimum : float Minimum entry in `data`. """ if is_SparseDataFrame(data): data = [np.min(data[col]) for col in data.columns] elif isinstance(data, pd.DataFrame): data = np.min(data) elif isinstance(data, sparse.lil_matrix): data = [np.min(d) for d in data.data] + [0] elif isinstance(data, sparse.dok_matrix): data = list(data.values()) + [0] elif isinstance(data, sparse.dia_matrix): data = [np.min(data.data), 0] return np.min(data)
Example #14
Source File: matrix.py From scprep with GNU General Public License v3.0 | 5 votes |
def _no_warning_dia_matrix(*args, **kwargs): """Helper function to silently create diagonal matrix""" with warnings.catch_warnings(): warnings.filterwarnings( "ignore", category=sparse.SparseEfficiencyWarning, message="Constructing a DIA matrix with [0-9]*" " diagonals is inefficient", ) return sparse.dia_matrix(*args, **kwargs)
Example #15
Source File: matutils.py From discretize with MIT License | 5 votes |
def spzeros(n1, n2): """a sparse matrix of zeros""" return sp.dia_matrix((n1, n2))
Example #16
Source File: base.py From twitter-stock-recommendation with MIT License | 5 votes |
def _rescale_data(X, y, sample_weight): """Rescale data so as to support sample_weight""" n_samples = X.shape[0] sample_weight = sample_weight * np.ones(n_samples) sample_weight = np.sqrt(sample_weight) sw_matrix = sparse.dia_matrix((sample_weight, 0), shape=(n_samples, n_samples)) X = safe_sparse_dot(sw_matrix, X) y = safe_sparse_dot(sw_matrix, y) return X, y
Example #17
Source File: laplacian.py From reveal-graph-embedding with Apache License 2.0 | 5 votes |
def get_unnormalized_laplacian(adjacency_matrix): # Calculate diagonal matrix of node degrees. degree = spsp.dia_matrix((adjacency_matrix.sum(axis=0), np.array([0])), shape=adjacency_matrix.shape) degree = degree.tocsr() # Calculate sparse graph Laplacian. laplacian = spsp.csr_matrix(-adjacency_matrix + degree, dtype=np.float64) return laplacian
Example #18
Source File: randomized_l1.py From twitter-stock-recommendation with MIT License | 5 votes |
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False, fit_intercept=True, tol=1e-3): X = X[safe_mask(X, mask)] y = y[mask] if issparse(X): size = len(weights) weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size)) X = X * weight_dia else: X *= (1 - weights) C = np.atleast_1d(np.asarray(C, dtype=np.float64)) if C.ndim > 1: raise ValueError("C should be 1-dimensional array-like, " "but got a {}-dimensional array-like instead: {}." .format(C.ndim, C)) scores = np.zeros((X.shape[1], len(C)), dtype=np.bool) for this_C, this_scores in zip(C, scores.T): # XXX : would be great to do it with a warm_start ... clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False, fit_intercept=fit_intercept) clf.fit(X, y) this_scores[:] = np.any( np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0) return scores
Example #19
Source File: large_sparse_functions.py From megaman with BSD 2-Clause "Simplified" License | 5 votes |
def set_sparse_diag_to_one(mat): # appears to implicitly convert to csr which might be a problem (n, n) = mat.shape # copy the matrix, subtract the diagonal values, add identity matrix # see http://nbviewer.jupyter.org/gist/Midnighter/9992103 for speed testing cpy = mat - dia_matrix((mat.diagonal()[sp.newaxis, :], [0]), shape=(n, n)) + identity(n) return(cpy)
Example #20
Source File: laplacian.py From reveal-graph-embedding with Apache License 2.0 | 5 votes |
def get_random_walk_laplacian(adjacency_matrix): # Calculate diagonal matrix of node degrees. degree = spsp.dia_matrix((adjacency_matrix.sum(axis=0), np.array([0])), shape=adjacency_matrix.shape) degree = degree.tocsr() # Calculate sparse graph Laplacian. adjacency_matrix = spsp.csr_matrix(-adjacency_matrix + degree, dtype=np.float64) # Calculate inverse of diagonal matrix of node degrees. degree.data = np.real(1/degree.data) # Calculate sparse normalized graph Laplacian. random_walk_laplacian = degree*adjacency_matrix return random_walk_laplacian
Example #21
Source File: transition.py From reveal-graph-embedding with Apache License 2.0 | 5 votes |
def get_label_based_random_walk_matrix(adjacency_matrix, labelled_nodes, label_absorption_probability): """ Returns the label-absorbing random walk transition probability matrix. Input: - A: A sparse matrix that contains the adjacency matrix of the graph. Output: - W: A sparse matrix that contains the natural random walk transition probability matrix. """ # Turn to sparse.csr_matrix format for faster row access. rw_transition = sparse.csr_matrix(adjacency_matrix, dtype=np.float64) # Sum along the two axes to get out-degree and in-degree, respectively out_degree = rw_transition.sum(axis=1) in_degree = rw_transition.sum(axis=0) # Form the inverse of the diagonal matrix containing the out-degree for i in np.arange(rw_transition.shape[0]): rw_transition.data[rw_transition.indptr[i]: rw_transition.indptr[i + 1]] =\ rw_transition.data[rw_transition.indptr[i]: rw_transition.indptr[i + 1]]/out_degree[i] out_degree = np.array(out_degree).astype(np.float64).reshape(out_degree.size) in_degree = np.array(in_degree).astype(np.float64).reshape(in_degree.size) # When the random walk agent encounters a labelled node, there is a probability that it will be absorbed. diag = np.zeros_like(out_degree) diag[labelled_nodes] = 1.0 diag = sparse.dia_matrix((diag, [0]), shape=(in_degree.size, in_degree.size)) diag = sparse.csr_matrix(diag) rw_transition[labelled_nodes, :] = (1-label_absorption_probability)*rw_transition[labelled_nodes, :] + label_absorption_probability*diag[labelled_nodes, :] return rw_transition, out_degree, in_degree
Example #22
Source File: signal_base.py From enterprise with MIT License | 5 votes |
def _add_diag(self, other): other_diag = sps.dia_matrix((other, np.array([0])), shape=(other.shape[0], other.shape[0])) return self._binopt(other_diag, "_plus_")
Example #23
Source File: randomized_l1.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False, fit_intercept=True, tol=1e-3): X = X[safe_mask(X, mask)] y = y[mask] if issparse(X): size = len(weights) weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size)) X = X * weight_dia else: X *= (1 - weights) C = np.atleast_1d(np.asarray(C, dtype=np.float64)) if C.ndim > 1: raise ValueError("C should be 1-dimensional array-like, " "but got a {}-dimensional array-like instead: {}." .format(C.ndim, C)) scores = np.zeros((X.shape[1], len(C)), dtype=np.bool) for this_C, this_scores in zip(C, scores.T): # XXX : would be great to do it with a warm_start ... clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False, fit_intercept=fit_intercept) clf.fit(X, y) this_scores[:] = np.any( np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0) return scores
Example #24
Source File: base.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def _rescale_data(X, y, sample_weight): """Rescale data so as to support sample_weight""" n_samples = X.shape[0] sample_weight = sample_weight * np.ones(n_samples) sample_weight = np.sqrt(sample_weight) sw_matrix = sparse.dia_matrix((sample_weight, 0), shape=(n_samples, n_samples)) X = safe_sparse_dot(sw_matrix, X) y = safe_sparse_dot(sw_matrix, y) return X, y
Example #25
Source File: Recommender_utils.py From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 | 5 votes |
def check_matrix(X, format='csc', dtype=np.float32): """ This function takes a matrix as input and transforms it into the specified format. The matrix in input can be either sparse or ndarray. If the matrix in input has already the desired format, it is returned as-is the dtype parameter is always applied and the default is np.float32 :param X: :param format: :param dtype: :return: """ if format == 'csc' and not isinstance(X, sps.csc_matrix): return X.tocsc().astype(dtype) elif format == 'csr' and not isinstance(X, sps.csr_matrix): return X.tocsr().astype(dtype) elif format == 'coo' and not isinstance(X, sps.coo_matrix): return X.tocoo().astype(dtype) elif format == 'dok' and not isinstance(X, sps.dok_matrix): return X.todok().astype(dtype) elif format == 'bsr' and not isinstance(X, sps.bsr_matrix): return X.tobsr().astype(dtype) elif format == 'dia' and not isinstance(X, sps.dia_matrix): return X.todia().astype(dtype) elif format == 'lil' and not isinstance(X, sps.lil_matrix): return X.tolil().astype(dtype) elif format == 'npy': if sps.issparse(X): return X.toarray().astype(dtype) else: return np.array(X) elif isinstance(X, np.ndarray): X = sps.csr_matrix(X, dtype=dtype) X.eliminate_zeros() return check_matrix(X, format=format, dtype=dtype) else: return X.astype(dtype)
Example #26
Source File: empca.py From aitom with GNU General Public License v3.0 | 5 votes |
def _solve(A, b, w): """ Solve Ax = b with weights w; return x A : 2D array b : 1D array length A.shape[0] w : 1D array same length as b """ #- Apply weights # nvar = len(w) # W = dia_matrix((w, 0), shape=(nvar, nvar)) # bx = A.T.dot( W.dot(b) ) # Ax = A.T.dot( W.dot(A) ) b = A.T.dot( w*b ) A = A.T.dot( (A.T * w).T ) if isinstance(A, scipy.sparse.spmatrix): x = scipy.sparse.linalg.spsolve(A, b) else: # x = N.linalg.solve(A, b) x = N.linalg.lstsq(A, b, rcond=-1)[0] return x #-------------------------------------------------------------------------
Example #27
Source File: nao.py From pyscf with Apache License 2.0 | 5 votes |
def vnl_coo(self): """ Non-local part of the Hamiltonian due to Kleinman-Bylander projectors """ from pyscf.nao.m_overlap_am import overlap_am from scipy.sparse import dia_matrix sop = self.overlap_coo(ao_log=self.ao_log, ao_log2=self.kb_log, funct=overlap_am).tocsr() nkb = sop.shape[1] vkb_dia = dia_matrix( ( self.get_vkb(), [0] ), shape = (nkb,nkb) ) return ((sop*vkb_dia)*sop.T).tocoo()
Example #28
Source File: base.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def _rescale_data(X, y, sample_weight): """Rescale data so as to support sample_weight""" n_samples = X.shape[0] sample_weight = np.full(n_samples, sample_weight, dtype=np.array(sample_weight).dtype) sample_weight = np.sqrt(sample_weight) sw_matrix = sparse.dia_matrix((sample_weight, 0), shape=(n_samples, n_samples)) X = safe_sparse_dot(sw_matrix, X) y = safe_sparse_dot(sw_matrix, y) return X, y
Example #29
Source File: gaussian.py From edm2016 with Apache License 2.0 | 5 votes |
def get_subset_cpd(self, sub_idx): """ Get the cpd over a subset of the variables. :param np.ndarray[int]|np.ndarray[bool] sub_idx: indices of variables to keep :return: a new Gaussian CPD :rtype: GaussianCPD """ if len(sub_idx) == 0 or (sub_idx.dtype == bool and not np.sum(sub_idx)): raise ValueError("sub_idx must not be empty") sub_mean = self.mean[sub_idx] sub_dim = len(sub_mean) if isinstance(self.precision, sp.dia_matrix): sub_precision = sp.dia_matrix((self.precision.diagonal()[sub_idx], np.zeros(1)), shape=(sub_dim, sub_dim)) elif np.isscalar(self.precision): sub_precision = self.precision elif isinstance(self.precision, np.ndarray): if np.prod(self.precision.shape) == self.dim: sub_precision = self.precision[sub_idx] else: # We do the indexing this way for performance reasons. sub_precision = self.precision[sub_idx, :][:, sub_idx] else: # We do the indexing this way for performance reasons. sub_precision = self.precision.tocsr()[sub_idx, :][:, sub_idx] return GaussianCPD(dim=sub_dim, mean=sub_mean, precision=sub_precision, mean_lin_op=get_subset_lin_op(self.mean_lin_op, sub_idx))
Example #30
Source File: zeropi_full.py From scqubits with BSD 3-Clause "New" or "Revised" License | 5 votes |
def hamiltonian(self, return_parts=False): """Returns Hamiltonian in basis obtained by discretizing phi, employing charge basis for theta, and Fock basis for zeta. Parameters ---------- return_parts: bool, optional If set to true, `hamiltonian` returns [hamiltonian, evals, evecs, g_coupling_matrix] Returns ------- scipy.sparse.csc_matrix or list """ zeropi_dim = self.zeropi_cutoff zeropi_evals, zeropi_evecs = self._zeropi.eigensys(evals_count=zeropi_dim) zeropi_diag_hamiltonian = sparse.dia_matrix((zeropi_dim, zeropi_dim), dtype=np.complex_) zeropi_diag_hamiltonian.setdiag(zeropi_evals) zeta_dim = self.zeta_cutoff prefactor = self.E_zeta zeta_diag_hamiltonian = op.number_sparse(zeta_dim, prefactor) hamiltonian_mat = sparse.kron(zeropi_diag_hamiltonian, sparse.identity(zeta_dim, format='dia', dtype=np.complex_)) hamiltonian_mat += sparse.kron(sparse.identity(zeropi_dim, format='dia', dtype=np.complex_), zeta_diag_hamiltonian) gmat = self.g_coupling_matrix(zeropi_evecs) zeropi_coupling = sparse.dia_matrix((zeropi_dim, zeropi_dim), dtype=np.complex_) for l1 in range(zeropi_dim): for l2 in range(zeropi_dim): zeropi_coupling += gmat[l1, l2] * op.hubbard_sparse(l1, l2, zeropi_dim) hamiltonian_mat += sparse.kron(zeropi_coupling, op.annihilation_sparse(zeta_dim)) + sparse.kron(zeropi_coupling.conjugate().T, op.creation_sparse(zeta_dim)) if return_parts: return [hamiltonian_mat.tocsc(), zeropi_evals, zeropi_evecs, gmat] return hamiltonian_mat.tocsc()