Python numpy.diagflat() Examples
The following are 30
code examples of numpy.diagflat().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: lap.py From BioNEV with MIT License | 6 votes |
def getLap(self): # degree_mat = np.diagflat(np.sum(self.adj_mat, axis=1)) # print('np.diagflat(np.sum(self.adj_mat, axis=1))') # deg_trans = np.diagflat(np.reciprocal(np.sqrt(np.sum(self.adj_mat, axis=1)))) # print('np.diagflat(np.reciprocal(np.sqrt(np.sum(self.adj_mat, axis=1))))') # deg_trans = np.nan_to_num(deg_trans) # L = degree_mat-self.adj_mat # print('begin norm_lap_mat') # # eye = np.eye(self.node_size) # # norm_lap_mat = np.matmul(np.matmul(deg_trans, L), deg_trans) G = self.g.G.to_undirected() print('begin norm_lap_mat') norm_lap_mat = nx.normalized_laplacian_matrix(G) print('finish norm_lap_mat') return norm_lap_mat
Example #2
Source File: hope.py From OpenNE with MIT License | 6 votes |
def learn_embedding(self): graph = self.g.G A = nx.to_numpy_matrix(graph) # self._beta = 0.0728 # M_g = np.eye(graph.number_of_nodes()) - self._beta * A # M_l = self._beta * A M_g = np.eye(graph.number_of_nodes()) M_l = np.dot(A, A) S = np.dot(np.linalg.inv(M_g), M_l) # s: \sigma_k u, s, vt = lg.svds(S, k=self._d // 2) sigma = np.diagflat(np.sqrt(s)) X1 = np.dot(u, sigma) X2 = np.dot(vt.T, sigma) # self._X = X2 self._X = np.concatenate((X1, X2), axis=1)
Example #3
Source File: transform.py From K3D-jupyter with MIT License | 6 votes |
def get_bounds_fit_matrix(xmin, xmax, ymin, ymax, zmin, zmax): """Create a 4x4 transform matrix which maps the default bounding box ([-0.5, 0.5] in all dimensions) into a custom bounding box ([xmin, xmax, ymin, ymax, zmin, zmax]). It is used for fitting fields (VectorField, Surface, MachingCubes) into custom domains.""" for name, value in locals().items(): try: float(value) except (TypeError, ValueError): raise TypeError('%s: expected float, %s given' % (name, type(value).__name__)) matrix = np.diagflat(np.array((xmax - xmin, ymax - ymin, zmax - zmin, 1.0), np.float32, order='C')) matrix[0:3, 3] = ((xmax + xmin) / 2.0, (ymax + ymin) / 2.0, (zmax + zmin) / 2.0) return matrix
Example #4
Source File: qchem_inter_rf.py From pyscf with Apache License 2.0 | 6 votes |
def kernel_qchem_inter_rf_pos_neg(self, **kw): """ This is constructing the E_m-E_n and E_n-E_m matrices """ h_rpa = diagflat(concatenate((ravel(self.FmE),-ravel(self.FmE)))) print(h_rpa.shape) nf = self.nfermi[0] nv = self.norbs-self.vstart[0] vs = self.vstart[0] neh = nf*nv x = self.mo_coeff[0,0,:,:,0] pab2v = self.pb.get_ac_vertex_array() self.pmn2v = pmn2v = einsum('nb,pmb->pmn', x[:nf,:], einsum('ma,pab->pmb', x[vs:,:], pab2v)) pmn2c = einsum('qp,pmn->qmn', self.hkernel_den, pmn2v) meri = einsum('pmn,pik->mnik', pmn2c, pmn2v).reshape((nf*nv,nf*nv)) #print(meri.shape) #meri.fill(0.0) h_rpa[:neh, :neh] = h_rpa[:neh, :neh]+meri h_rpa[:neh, neh:] = h_rpa[:neh, neh:]+meri h_rpa[neh:, :neh] = h_rpa[neh:, :neh]-meri h_rpa[neh:, neh:] = h_rpa[neh:, neh:]-meri edif, s2z = np.linalg.eig(h_rpa) print(abs(h_rpa-h_rpa.transpose()).sum()) print('edif', edif.real*27.2114) return
Example #5
Source File: commonrandom.py From systematictradingexamples with GNU General Public License v2.0 | 6 votes |
def threeassetportfolio(plength=5000, SRlist=[1.0, 1.0, 1.0], annual_vol=.15, clist=[.0,.0,.0], index_start=pd.datetime(2000,1,1)): (c1, c2, c3)=clist dindex=arbitrary_timeindex(plength, index_start) daily_vol=annual_vol/16.0 means=[x*annual_vol/250.0 for x in SRlist] stds = np.diagflat([daily_vol]*3) corr=np.array([[1.0, c1, c2], [c1, 1.0, c3], [c2, c3, 1.0]]) covs=np.dot(stds, np.dot(corr, stds)) plength=len(dindex) m = np.random.multivariate_normal(means, covs, plength).T portreturns=pd.DataFrame(dict(one=m[0], two=m[1], three=m[2]), dindex) portreturns=portreturns[['one', 'two', 'three']] return portreturns
Example #6
Source File: hilbert_space.py From scqubits with BSD 3-Clause "New" or "Revised" License | 6 votes |
def diag_hamiltonian(self, subsystem, evals=None): """Returns a `qutip.Qobj` which has the eigenenergies of the object `subsystem` on the diagonal. Parameters ---------- subsystem: object derived from `QuantumSystem` Subsystem for which the Hamiltonian is to be provided. evals: ndarray, optional Eigenenergies can be provided as `evals`; otherwise, they are calculated. Returns ------- qutip.Qobj operator """ evals_count = subsystem.truncated_dim if evals is None: evals = subsystem.eigenvals(evals_count=evals_count) diag_qt_op = qt.Qobj(inpt=np.diagflat(evals[0:evals_count])) return self.identity_wrap(diag_qt_op, subsystem)
Example #7
Source File: operators.py From scqubits with BSD 3-Clause "New" or "Revised" License | 6 votes |
def number(dimension, prefactor=None): """Number operator matrix of size dimension x dimension in sparse matrix representation. An additional prefactor can be directly included in the generation of the matrix by supplying 'prefactor'. Parameters ---------- dimension: int prefactor: float or complex, optional prefactor multiplying the number operator matrix Returns ------- ndarray number operator matrix, size dimension x dimension """ diag_elements = np.arange(dimension) if prefactor: diag_elements *= prefactor return np.diagflat(diag_elements)
Example #8
Source File: layers.py From numpy-ml with GNU General Public License v3.0 | 6 votes |
def _bwd(self, dLdy, X): """ Actual computation of the gradient of the loss wrt. the input X. The Jacobian, J, of the softmax for input x = [x1, ..., xn] is: J[i, j] = softmax(x_i) * (1 - softmax(x_j)) if i = j -softmax(x_i) * softmax(x_j) if i != j where x_n is input example n (ie., the n'th row in X) """ dX = [] for dy, x in zip(dLdy, X): dxi = [] for dyi, xi in zip(*np.atleast_2d(dy, x)): yi = self._fwd(xi.reshape(1, -1)).reshape(-1, 1) dyidxi = np.diagflat(yi) - yi @ yi.T # jacobian wrt. input sample xi dxi.append(dyi @ dyidxi) dX.append(dxi) return np.array(dX).reshape(*X.shape)
Example #9
Source File: hope.py From GPF with MIT License | 6 votes |
def learn_embedding(self): graph = self.g.G A = nx.to_numpy_matrix(graph) # self._beta = 0.0728 # M_g = np.eye(graph.number_of_nodes()) - self._beta * A # M_l = self._beta * A M_g = np.eye(graph.number_of_nodes()) M_l = np.dot(A, A) S = np.dot(np.linalg.inv(M_g), M_l) # s: \sigma_k u, s, vt = lg.svds(S, k=self._d // 2) sigma = np.diagflat(np.sqrt(s)) X1 = np.dot(u, sigma) X2 = np.dot(vt.T, sigma) # self._X = X2 self._X = np.concatenate((X1, X2), axis=1)
Example #10
Source File: test_transforms.py From skl-groups with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_flip(): X = np.diagflat([-2, -1, 0, 1, 2]) # eigenvalues -2, -1, 0, 1, 2; eigenvectors are I Xflip = FlipPSD().fit_transform(X) assert np.allclose(Xflip, np.diagflat([2, 1, 0, 1, 2])) Xflip2 = FlipPSD().fit(X).transform(X) assert np.allclose(Xflip2, np.diagflat([2, 1, 0, 1, 2])) Xflip3 = FlipPSD().fit(X).transform(X[:3, :]) assert np.allclose(Xflip3, [[2,0,0,0,0], [0,1,0,0,0], [0,0,0,0,0]]) assert_raises(TypeError, lambda: FlipPSD().fit(X[:2, :])) assert_raises(TypeError, lambda: FlipPSD().fit_transform(X[:2, :])) assert_raises(TypeError, lambda: FlipPSD().fit(X).transform(X[:, :2]))
Example #11
Source File: test_transforms.py From skl-groups with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_shift(): X = np.diagflat([-2., -1, 0, 1, 2]) # eigenvalues -2, -1, 0, 1, 2; eigenvectors are I Xshift = ShiftPSD().fit_transform(X) assert np.allclose(Xshift, np.diagflat([0, 1, 2, 3, 4])) Xshift2 = ShiftPSD().fit(X).transform(X) assert np.allclose(Xshift2, np.diagflat([0, 1, 2, 3, 4])) Xshift3 = ShiftPSD().fit(X).transform(X[:3, :]) assert np.allclose(Xshift3, X[:3, :]) Xshift4 = ShiftPSD(min_eig=2).fit_transform(X) assert np.allclose(Xshift4, np.diagflat([2, 3, 4, 5, 6])) assert_raises(TypeError, lambda: ShiftPSD().fit(X[:2, :])) assert_raises(TypeError, lambda: ShiftPSD().fit_transform(X[:2, :])) assert_raises(TypeError, lambda: ShiftPSD().fit(X).transform(X[:, :2]))
Example #12
Source File: market.py From pyblp with MIT License | 6 votes |
def compute_shares_by_xi_jacobian(self, probabilities: Array, conditionals: Optional[Array]) -> Array: """Compute the Jacobian of shares with respect to xi (equivalently, to delta).""" diagonal_shares = np.diagflat(self.products.shares) weighted_probabilities = self.agents.weights * probabilities.T jacobian = diagonal_shares - probabilities @ weighted_probabilities if self.epsilon_scale != 1: jacobian /= self.epsilon_scale if self.H > 0: membership = self.get_membership_matrix() jacobian += self.rho / (1 - self.rho) * ( diagonal_shares - membership * (conditionals @ weighted_probabilities) ) return jacobian
Example #13
Source File: test_response.py From threeML with BSD 3-Clause "New" or "Revised" License | 6 votes |
def get_matrix_elements(): # In[5]: np.diagflat([1, 2, 3, 4])[:3, :] matrix = np.diagflat([1.0, 2.0, 3.0, 4.0])[:3, :] # Now matrix is: # array([[1, 0, 0, 0], # [0, 2, 0, 0], # [0, 0, 3, 0]]) mc_energies = [1.0, 2.0, 3.0, 4.0, 5.0] ebounds = [1.0, 2.5, 4.5, 5.0] return matrix, mc_energies, ebounds
Example #14
Source File: test_AAA_against_xspec.py From threeML with BSD 3-Clause "New" or "Revised" License | 6 votes |
def get_matrix_elements(): # In[5]: np.diagflat([1, 2, 3, 4])[:3, :] matrix = np.diagflat([1.0, 2.0, 3.0, 4.0])[:3, :] # Now matrix is: # array([[1, 0, 0, 0], # [0, 2, 0, 0], # [0, 0, 3, 0]]) mc_energies = [1.0, 2.0, 3.0, 4.0, 5.0] ebounds = [1.0, 2.5, 4.5, 5.0] return matrix, mc_energies, ebounds
Example #15
Source File: hope.py From BioNEV with MIT License | 6 votes |
def learn_embedding(self): graph = self.g.G A = nx.to_numpy_matrix(graph) # self._beta = 0.0728 # M_g = np.eye(graph.number_of_nodes()) - self._beta * A # M_l = self._beta * A M_g = np.eye(graph.number_of_nodes()) M_l = np.dot(A, A) S = np.dot(np.linalg.inv(M_g), M_l) # s: \sigma_k u, s, vt = lg.svds(S, k=self._d // 2) sigma = np.diagflat(np.sqrt(s)) X1 = np.dot(u, sigma) X2 = np.dot(vt.T, sigma) # self._X = X2 self._X = np.concatenate((X1, X2), axis=1)
Example #16
Source File: test_umath.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def test_reduce_warns(self): # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus # and put it before the call to an intrisic function that causes # invalid status to be set. Also make sure warnings are emitted for n in (2, 4, 8, 16, 32): with suppress_warnings() as sup: sup.record(RuntimeWarning) for r in np.diagflat([np.nan] * n): assert_equal(np.min(r), np.nan) assert_equal(len(sup.log), n)
Example #17
Source File: test_quantity_non_ufuncs.py From Carnets with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_diagflat(self): self.check(np.diagflat)
Example #18
Source File: models.py From GAMENet with MIT License | 5 votes |
def normalize(self, mx): """Row-normalize sparse matrix""" rowsum = np.array(mx.sum(1)) r_inv = np.power(rowsum, -1).flatten() r_inv[np.isinf(r_inv)] = 0. r_mat_inv = np.diagflat(r_inv) mx = r_mat_inv.dot(mx) return mx
Example #19
Source File: reordering.py From chumpy with MIT License | 5 votes |
def diagflat(v, k=0): return DiagFlat(a=v, k=k)
Example #20
Source File: reordering.py From chumpy with MIT License | 5 votes |
def reorder(self, a): return np.diagflat(a, self.k)
Example #21
Source File: gaussian.py From MTSAnomalyDetection with Apache License 2.0 | 5 votes |
def multivariateGaussianModel(X): """多元高斯模型 Args: X 样本集 Returns: p 模型 """ # 参数估计 m, n = X.shape mu = np.mean(X.T, axis=1) Sigma = np.var(X, axis=0) Sigma = np.diagflat(Sigma) # Sigma = np.mat(np.cov(X.T)) detSigma = np.linalg.det(Sigma) def p(x): """p(x) Args: x x mu mu delta2 delta2 Returns: p """ x = x - mu return np.exp(-x.T * np.linalg.pinv(Sigma) * x / 2).A[0] * \ ((2 * np.pi) ** (-n / 2) * (detSigma ** (-0.5))) return p
Example #22
Source File: dngr.py From cogdl with MIT License | 5 votes |
def scale_matrix(self, mat): mat = mat - np.diag(np.diag(mat)) D_inv = np.diagflat(np.reciprocal(np.sum(mat, axis=0))) mat = np.dot(D_inv, mat) return mat
Example #23
Source File: market.py From pyblp with MIT License | 5 votes |
def compute_capital_lamda_by_parameter_tangent( self, parameter: Parameter, value_derivatives: Array, value_derivatives_tangent: Array) -> Array: """Compute the tangent of the diagonal capital lambda matrix with respect to a parameter.""" diagonal = value_derivatives_tangent @ self.agents.weights if self.H > 0: diagonal /= 1 - self.rho if isinstance(parameter, RhoParameter): associations = self.groups.expand(parameter.get_group_associations(self.groups)) diagonal += associations / (1 - self.rho)**2 * (value_derivatives @ self.agents.weights) return np.diagflat(diagonal)
Example #24
Source File: test_umath.py From recruit with Apache License 2.0 | 5 votes |
def test_reduce_reorder(self): # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus # and put it before the call to an intrisic function that causes # invalid status to be set. Also make sure warnings are not emitted for n in (2, 4, 8, 16, 32): for dt in (np.float32, np.float16, np.complex64): for r in np.diagflat(np.array([np.nan] * n, dtype=dt)): assert_equal(np.min(r), np.nan)
Example #25
Source File: market.py From pyblp with MIT License | 5 votes |
def compute_capital_lamda(self, value_derivatives: Array) -> Array: """Compute the diagonal capital lambda matrix used to decompose markups.""" diagonal = value_derivatives @ self.agents.weights if self.H > 0: diagonal /= 1 - self.rho return np.diagflat(diagonal)
Example #26
Source File: ridgeregression.py From mpyc with MIT License | 5 votes |
def random_matrix_determinant(secfld, d): d_2 = d * (d-1) // 2 L = np.diagflat([secfld(1)] * d) L[np.tril_indices(d, -1)] = mpc._randoms(secfld, d_2) L[np.triu_indices(d, 1)] = [secfld(0)] * d_2 diag = mpc._randoms(secfld, d) U = np.diagflat(diag) U[np.tril_indices(d, -1)] = [secfld(0)] * d_2 U[np.triu_indices(d, 1)] = mpc._randoms(secfld, d_2) R = mpc.matrix_prod(L.tolist(), U.tolist()) detR = mpc.prod(diag) # detR != 0 with overwhelming probability return R, detR
Example #27
Source File: instrument.py From isofit with Apache License 2.0 | 5 votes |
def dmeas_dinstrumentb(self, x_instrument, wl_hi, rdn_hi): """Jacobian of radiance with respect to the instrument parameters that are unknown and not retrieved, i.e., the inevitable persisting uncertainties in instrument spectral and radiometric calibration. Input: meas, a vector of size n_chan Returns: Kb_instrument, a matrix of size [n_measurements x nb_instrument] """ # Uncertainty due to radiometric calibration meas = self.sample(x_instrument, wl_hi, rdn_hi) dmeas_dinstrument = np.hstack( (np.diagflat(meas), np.zeros((self.n_chan, 2)))) # Uncertainty due to spectral calibration if self.bval[-2] > 1e-6: dmeas_dinstrument[:, -2] = self.sample(x_instrument, wl_hi, np.hstack((np.diff(rdn_hi), np.array([0])))) # Uncertainty due to spectral stray light if self.bval[-1] > 1e-6: ssrf = spectral_response_function(np.arange(-10, 11), 0, 4) blur = convolve(meas, ssrf, mode='same') dmeas_dinstrument[:, -1] = blur - meas return dmeas_dinstrument
Example #28
Source File: instrument.py From isofit with Apache License 2.0 | 5 votes |
def Sy(self, meas, geom): """Calculate measurement error covariance. Input: meas, the instrument measurement Returns: Sy, the measurement error covariance due to instrument noise """ if self.model_type == 'SNR': bad = meas < 1e-5 if np.any(bad): meas[bad] = 1e-5 logging.debug('SNR noise model found noise <= 0 - adjusting to slightly positive to avoid /0.') nedl = (1.0 / self.snr) * meas return np.diagflat(np.power(nedl,2)) elif self.model_type == 'parametric': noise_plus_meas = self.noise[:, 1]+meas if np.any(noise_plus_meas <=0): noise_plus_meas[noise_plus_meas <= 0] = 1e-5 logging.debug('Parametric noise model found noise <= 0 - adjusting to slightly positive to avoid /0.') nedl = np.abs(self.noise[:, 0]*np.sqrt(noise_plus_meas)+self.noise[:, 2]) nedl = nedl/np.sqrt(self.integrations) return np.diagflat(np.power(nedl,2)) elif self.model_type == 'pushbroom': if geom.pushbroom_column is None: C = np.squeeze(self.covs.mean(axis=0)) else: C = self.covs[geom.pushbroom_column, :, :] return C / np.sqrt(self.integrations) elif self.model_type == 'NEDT': return np.diagflat(np.power(self.noise_NESR,2))
Example #29
Source File: instrument.py From isofit with Apache License 2.0 | 5 votes |
def Sa(self): """Covariance of prior distribution (diagonal).""" if self.n_state == 0: return np.zeros((0, 0), dtype=float) return np.diagflat(np.power(self.prior_sigma, 2))
Example #30
Source File: iomath.py From pymrio with GNU General Public License v3.0 | 5 votes |
def calc_A(Z, x): """ Calculate the A matrix (coefficients) from Z and x Parameters ---------- Z : pandas.DataFrame or numpy.array Symmetric input output table (flows) x : pandas.DataFrame or numpy.array Industry output column vector Returns ------- pandas.DataFrame or numpy.array Symmetric input output table (coefficients) A The type is determined by the type of Z. If DataFrame index/columns as Z """ if (type(x) is pd.DataFrame) or (type(x) is pd.Series): x = x.values if (type(x) is not np.ndarray) and (x == 0): recix = 0 else: with warnings.catch_warnings(): # catch the divide by zero warning # we deal wit that by setting to 0 afterwards warnings.simplefilter('ignore') recix = 1/x recix[recix == np.inf] = 0 recix = recix.reshape((1, -1)) # use numpy broadcasting - factor ten faster # Mathematical form - slow # return Z.dot(np.diagflat(recix)) if type(Z) is pd.DataFrame: return pd.DataFrame(Z.values * recix, index=Z.index, columns=Z.columns) else: return Z*recix