Python numpy.diag_indices_from() Examples
The following are 30
code examples of numpy.diag_indices_from().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: gradient.py From q2mm with MIT License | 6 votes |
def do_levenberg(ma, vb, factor): """ Lagrange multipliers. Parameters ---------- ma : NumPy matrix vb : NumPy vector factor : float """ mac = copy.deepcopy(ma) ind = np.diag_indices_from(mac) mac[ind] = mac[ind] + factor logger.log(5, 'A:\n{}'.format(mac)) changes = solver(mac, vb) return [('LM {}'.format(factor), changes)]
Example #2
Source File: gradient.py From q2mm with MIT License | 6 votes |
def do_lagrange(ma, vb, factor): """ Lagrange multipliers. Parameters ---------- ma : NumPy matrix vb : NumPy vector factor : float """ mac = copy.deepcopy(ma) ind = np.diag_indices_from(mac) logger.log(5, 'A:\n{}'.format(mac)) mac[ind] = mac[ind] + factor logger.log(5, 'A:\n{}'.format(mac)) changes = solver(mac, vb) return [('LAGRANGE F{}'.format(factor), changes)]
Example #3
Source File: bayesian.py From nni with MIT License | 6 votes |
def first_fit(self, train_x, train_y): """ Fit the regressor for the first time. """ train_x, train_y = np.array(train_x), np.array(train_y) self._x = np.copy(train_x) self._y = np.copy(train_y) self._distance_matrix = edit_distance_matrix(self._x) k_matrix = bourgain_embedding_matrix(self._distance_matrix) k_matrix[np.diag_indices_from(k_matrix)] += self.alpha self._l_matrix = cholesky(k_matrix, lower=True) # Line 2 self._alpha_vector = cho_solve( (self._l_matrix, True), self._y) # Line 3 self._first_fitted = True return self
Example #4
Source File: correlation_structures.py From vnpy_crypto with MIT License | 6 votes |
def corr_equi(k_vars, rho): '''create equicorrelated correlation matrix with rho on off diagonal Parameters ---------- k_vars : int number of variables, correlation matrix will be (k_vars, k_vars) rho : float correlation between any two random variables Returns ------- corr : ndarray (k_vars, k_vars) correlation matrix ''' corr = np.empty((k_vars, k_vars)) corr.fill(rho) corr[np.diag_indices_from(corr)] = 1 return corr
Example #5
Source File: test_mutual_info.py From enspara with GNU General Public License v3.0 | 6 votes |
def test_symmetrical_mi_nonzero_int_shape_spec(): # test that when we use an integer (rather than a list of integers) # that we correctly assume that the integer is just repeated for all # of the various features. nonzero_mi_funcs = [nonzero_mi_np, nonzero_mi_ra, nonzero_mi_list] for a, n_states in (f() for f in nonzero_mi_funcs): mi = mutual_info.mi_matrix(a, a, 5, 5) assert_almost_equal(mi[-1, -2], 0.86114, decimal=3) mi[-1, -2] = mi[-2, -1] = 0 assert_almost_equal(np.diag(mi), 0.86114, decimal=2) mi[np.diag_indices_from(mi)] = 0 assert_allclose(mi, 0, atol=1e-3)
Example #6
Source File: test_mutual_info.py From enspara with GNU General Public License v3.0 | 6 votes |
def test_symmetrical_mi_nonzero(): # test that the MI matrix for sets of uncorrelated things results # in zero MI nonzero_mi_funcs = [nonzero_mi_np, nonzero_mi_ra, nonzero_mi_list] for a, n_states in (f() for f in nonzero_mi_funcs): mi = mutual_info.mi_matrix(a, a, n_states, n_states) assert_almost_equal(mi[-1, -2], 0.86114, decimal=3) mi[-1, -2] = mi[-2, -1] = 0 assert_almost_equal(np.diag(mi), 0.86114, decimal=2) mi[np.diag_indices_from(mi)] = 0 assert_allclose(mi, 0, atol=1e-3)
Example #7
Source File: correlation_structures.py From Splunking-Crime with GNU Affero General Public License v3.0 | 6 votes |
def corr_equi(k_vars, rho): '''create equicorrelated correlation matrix with rho on off diagonal Parameters ---------- k_vars : int number of variables, correlation matrix will be (k_vars, k_vars) rho : float correlation between any two random variables Returns ------- corr : ndarray (k_vars, k_vars) correlation matrix ''' corr = np.empty((k_vars, k_vars)) corr.fill(rho) corr[np.diag_indices_from(corr)] = 1 return corr
Example #8
Source File: meanderpy.py From meanderpy with Apache License 2.0 | 6 votes |
def order_cl_pixels(x_pix,y_pix): dist = distance.cdist(np.array([x_pix,y_pix]).T,np.array([x_pix,y_pix]).T) dist[np.diag_indices_from(dist)]=100.0 ind = np.argmin(x_pix) # select starting point on left side of image clinds = [ind] count = 0 while count<len(x_pix): t = dist[ind,:].copy() if len(clinds)>2: t[clinds[-2]]=t[clinds[-2]]+100.0 t[clinds[-3]]=t[clinds[-3]]+100.0 ind = np.argmin(t) clinds.append(ind) count=count+1 x_pix = x_pix[clinds] y_pix = y_pix[clinds] return x_pix,y_pix
Example #9
Source File: insert.py From cupy with MIT License | 6 votes |
def diag_indices_from(arr): """ Return the indices to access the main diagonal of an n-dimensional array. See `diag_indices` for full details. Args: arr (cupy.ndarray): At least 2-D. .. seealso:: :func:`numpy.diag_indices_from` """ if not isinstance(arr, cupy.ndarray): raise TypeError("Argument must be cupy.ndarray") if not arr.ndim >= 2: raise ValueError("input array must be at least 2-d") # For more than d=2, the strided formula is only valid for arrays with # all dimensions equal, so we check first. if not cupy.all(cupy.diff(arr.shape) == 0): raise ValueError("All dimensions of input must be of equal length") return diag_indices(arr.shape[0], arr.ndim)
Example #10
Source File: krige3d.py From pyGeoStatistics with MIT License | 6 votes |
def block_covariance(self): "return average covariance within block" if self._block_covariance is None: if self.ndb <= 1: # point kriging self._block_covariance = self.unbias else: cov = list() for x1, y1, z1 in zip(self.xdb, self.ydb, self.zdb): for x2, y2, z2 in zip(self.xdb, self.ydb, self.zdb): # cov.append(self._cova3((x1, y1, z1), (x2, y2, z2))) cov.append(cova3( (x1, y1, z1), (x2, y2, z2), self.rotmat, self.maxcov, self.nst, self.it, self.cc, self.aa_hmax)) cov = np.array(cov).reshape((self.ndb, self.ndb)) cov[np.diag_indices_from(cov)] -= self.c0 self._block_covariance = np.mean(cov) return self._block_covariance
Example #11
Source File: filt.py From pyins with MIT License | 6 votes |
def _kalman_correct(x, P, z, H, R, gain_factor, gain_curve): PHT = np.dot(P, H.T) S = np.dot(H, PHT) + R e = z - H.dot(x) L = cholesky(S, lower=True) inn = solve_triangular(L, e, lower=True) if gain_curve is not None: q = (np.dot(inn, inn) / inn.shape[0]) ** 0.5 f = gain_curve(q) if f == 0: return inn L *= (q / f) ** 0.5 K = cho_solve((L, True), PHT.T, overwrite_b=True).T if gain_factor is not None: K *= gain_factor[:, None] U = -K.dot(H) U[np.diag_indices_from(U)] += 1 x += K.dot(z - H.dot(x)) P[:] = U.dot(P).dot(U.T) + K.dot(R).dot(K.T) return inn
Example #12
Source File: math.py From qml with MIT License | 5 votes |
def bkf_solve(A, y): """ Solves the equation :math:`A x = y` for x using a Cholesky decomposition via calls to LAPACK dpotrf and dpotrs in the F2PY module. Preserves the input matrix A. :param A: Matrix (symmetric and positive definite, left-hand side). :type A: numpy array :param y: Vector (right-hand side of the equation). :type y: numpy array :return: The solution vector. :rtype: numpy array """ if len(A.shape) != 2 or A.shape[0] != A.shape[1]: raise ValueError('expected square matrix') if len(y.shape) != 1 or y.shape[0] != A.shape[1]: raise ValueError('expected matrix and vector of same stride size') n = A.shape[0] # Backup diagonal before Cholesky-decomposition A_diag = A[np.diag_indices_from(A)] x = np.zeros(n) fbkf_solve(A, y, x) # Reset diagonal after Cholesky-decomposition A[np.diag_indices_from(A)] = A_diag # Copy lower triangle to upper i_lower = np.tril_indices_from(A) A.T[i_lower] = A[i_lower] return x
Example #13
Source File: mole.py From pyscf with Apache License 2.0 | 5 votes |
def energy_nuc(mol, charges=None, coords=None): '''Compute nuclear repulsion energy (AU) or static Coulomb energy Returns float ''' if charges is None: charges = mol.atom_charges() if len(charges) <= 1: return 0 #e = 0 #for j in range(len(mol._atm)): # q2 = charges[j] # r2 = coords[j] # for i in range(j): # q1 = charges[i] # r1 = coords[i] # r = numpy.linalg.norm(r1-r2) # e += q1 * q2 / r rr = inter_distance(mol, coords) rr[numpy.diag_indices_from(rr)] = 1e200 if CHECK_GEOM and numpy.any(rr < 1e-5): for atm_idx in numpy.argwhere(rr<1e-5): logger.warn(mol, 'Atoms %s have the same coordinates', atm_idx) raise RuntimeError('Ill geometry') e = numpy.einsum('i,ij,j->', charges, 1./rr, charges) * .5 return e
Example #14
Source File: pairwise.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def cosine_distances(X, Y=None): """Compute cosine distance between samples in X and Y. Cosine distance is defined as 1.0 minus the cosine similarity. Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array_like, sparse matrix with shape (n_samples_X, n_features). Y : array_like, sparse matrix (optional) with shape (n_samples_Y, n_features). Returns ------- distance matrix : array An array with shape (n_samples_X, n_samples_Y). See also -------- sklearn.metrics.pairwise.cosine_similarity scipy.spatial.distance.cosine (dense matrices only) """ # 1.0 - cosine_similarity(X, Y) without copy S = cosine_similarity(X, Y) S *= -1 S += 1 np.clip(S, 0, 2, out=S) if X is Y or Y is None: # Ensure that distances between vectors and themselves are set to 0.0. # This may not be the case due to floating point rounding errors. S[np.diag_indices_from(S)] = 0.0 return S # Paired distances
Example #15
Source File: datasets.py From tape with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __getitem__(self, index): item = self.data[index] msa = item['msa'] dist = item['dist6d'] omega = item['omega6d'] theta = item['theta6d'] phi = item['phi6d'] if self._split == 'train': msa = self._subsample_msa(msa) elif self._split == 'valid': msa = msa[:20000] # runs out of memory if msa is way too big msa, dist, omega, theta, phi = self._slice_long_sequences( msa, dist, omega, theta, phi) mask = dist == 0 dist_bins = np.digitize(dist, self._dist_bins) omega_bins = np.digitize(omega, self._dihedral_bins) + 1 theta_bins = np.digitize(theta, self._dihedral_bins) + 1 phi_bins = np.digitize(phi, self._planar_bins) + 1 dist_bins[mask] = 0 omega_bins[mask] = 0 theta_bins[mask] = 0 phi_bins[mask] = 0 dist_bins[np.diag_indices_from(dist_bins)] = -1 # input_mask = np.ones_like(msa[0]) return msa, dist_bins, omega_bins, theta_bins, phi_bins
Example #16
Source File: compute.py From snfpy with GNU Lesser General Public License v3.0 | 5 votes |
def get_n_clusters(arr, n_clusters=range(2, 6)): """ Finds optimal number of clusters in `arr` via eigengap method Parameters ---------- arr : (N, N) array_like Input array (e.g., the output of :py:func`snf.compute.snf`) n_clusters : array_like Numbers of clusters to choose between Returns ------- opt_cluster : int Optimal number of clusters second_opt_cluster : int Second best number of clusters """ # confirm inputs are appropriate n_clusters = check_array(n_clusters, ensure_2d=False) n_clusters = n_clusters[n_clusters > 1] # don't overwrite provided array! graph = arr.copy() graph = (graph + graph.T) / 2 graph[np.diag_indices_from(graph)] = 0 degree = graph.sum(axis=1) degree[np.isclose(degree, 0)] += np.spacing(1) di = np.diag(1 / np.sqrt(degree)) laplacian = di @ (np.diag(degree) - graph) @ di # perform eigendecomposition and find eigengap eigs = np.sort(np.linalg.eig(laplacian)[0]) eigengap = np.abs(np.diff(eigs)) eigengap = eigengap * (1 - eigs[:-1]) / (1 - eigs[1:]) n = eigengap[n_clusters - 1].argsort()[::-1] return n_clusters[n[:2]]
Example #17
Source File: mSDA.py From domain_adversarial_neural_network with BSD 2-Clause "Simplified" License | 5 votes |
def mda_fit(X, noise=0.5, eta=1e-5): """ inputs: X : d x n input (Transpose of the usual data-matrix) noise: corruption level eta: regularization outputs: hx: d x n hidden representation W: d x (d+1) mapping """ d, n = np.shape(X) # adding bias Xb = np.vstack((X, np.ones(n))) # scatter matrix S S = np.dot(Xb, Xb.T) # corruption vector q = np.ones((d+1, 1)) * (1.-noise) q[-1] = 1 # Q: (d+1)x(d+1) Q = S*np.dot(q,q.T) Q[np.diag_indices_from(Q)] = q.T[0] * np.diag(S) #P: dx(d+1) P = S[0:-1,:] * q.T # final W = P*Q^-1, dx(d+1) reg = eta * np.eye(d+1) reg[-1,-1] = 0 W = np.linalg.solve(Q.T + reg, P.T).T hx = np.tanh(np.dot(W, Xb)) return hx, W
Example #18
Source File: meanderpy.py From meanderpy with Apache License 2.0 | 5 votes |
def kth_diag_indices(a,k): """function for finding diagonal indices with k offset [from https://stackoverflow.com/questions/10925671/numpy-k-th-diagonal-indices]""" rows, cols = np.diag_indices_from(a) if k<0: return rows[:k], cols[-k:] elif k>0: return rows[k:], cols[:-k] else: return rows, cols
Example #19
Source File: test_mutual_info.py From enspara with GNU General Public License v3.0 | 5 votes |
def test_symmetrical_mi_zero(): zero_mi_funcs = [zero_mi_np, zero_mi_ra, zero_mi_list] for a, n_states in (f() for f in zero_mi_funcs): mi = mutual_info.mi_matrix(a, a, n_states, n_states) assert_allclose(np.diag(mi), 0.86114, atol=0.1) mi[np.diag_indices_from(mi)] = 0 assert_allclose(mi, 0, atol=1e-3)
Example #20
Source File: test_mutual_info.py From enspara with GNU General Public License v3.0 | 5 votes |
def test_asymmetrical_mi_zero(): zero_mi_funcs = [zero_mi_np, zero_mi_ra, zero_mi_list] for gen_f in zero_mi_funcs: a, n_a = gen_f() b, n_b = gen_f() mi = mutual_info.mi_matrix(a, b, n_a, n_b) assert_allclose(np.diag(mi), 0, atol=0.1) mi[np.diag_indices_from(mi)] = 0 assert_allclose(mi, 0, atol=1e-3)
Example #21
Source File: test_quantity_non_ufuncs.py From Carnets with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_diag_indices_from(self): self.check(np.diag_indices_from)
Example #22
Source File: template_preprocess_dataset.py From G-SchNet with MIT License | 5 votes |
def get_connectivity(mol, cutoff=2.0): ''' Write code to obtain a connectivity matrix given a molecule from your database here. The simple default implementation calculates pairwise distances and then uses a radial cutoff (e.g. 2 angstrom) to determine which atoms are labeled as connected. The matrix only needs to be binary as it is only used to sample generation traces, i.e. an order of atom placement steps for training. However, one could for example also use chemoinformatics tools in order to obtain bond order information and check the valence of provided structures on the run if the structures allow this (we did this for our experiments with QM9 in order to allow for comparison to related work, but we think that using a radial cutoff is actually more robust and more general as it does not depend on usually unreliable bond order assignment algorithms and can be used for all kinds of materials or molecules). Args: mol (ase.Atoms): one molecule from the database cutoff (float, optional): cutoff value in angstrom used to determine which atoms are connected Returns: numpy.ndarray: the computed connectivity matrix (n_atoms x n_atoms, float) numpy.ndarray: the computed pairwise distances in a condensed format (length is n_atoms*(n_atoms-1)/2), see scipy.spatial.distance.pdist for more information ''' # retrieve positions atom_positions = mol.get_positions() # get pairwise distances (condensed) pairwise_distances = pdist(atom_positions) # use cutoff to obtain connectivity matrix (condensed format) connectivity = np.array(pairwise_distances <= cutoff, dtype=float) # cast to redundant square matrix format connectivity = squareform(connectivity) # set diagonal entries to zero (as we do not assume atoms to be their own neighbors) connectivity[np.diag_indices_from(connectivity)] = 0 return connectivity, pairwise_distances
Example #23
Source File: test_pairwise.py From twitter-stock-recommendation with MIT License | 5 votes |
def test_cosine_distances(): # Check the pairwise Cosine distances computation rng = np.random.RandomState(1337) x = np.abs(rng.rand(910)) XA = np.vstack([x, x]) D = cosine_distances(XA) assert_array_almost_equal(D, [[0., 0.], [0., 0.]]) # check that all elements are in [0, 2] assert_true(np.all(D >= 0.)) assert_true(np.all(D <= 2.)) # check that diagonal elements are equal to 0 assert_array_almost_equal(D[np.diag_indices_from(D)], [0., 0.]) XB = np.vstack([x, -x]) D2 = cosine_distances(XB) # check that all elements are in [0, 2] assert_true(np.all(D2 >= 0.)) assert_true(np.all(D2 <= 2.)) # check that diagonal elements are equal to 0 and non diagonal to 2 assert_array_almost_equal(D2, [[0., 2.], [2., 0.]]) # check large random matrix X = np.abs(rng.rand(1000, 5000)) D = cosine_distances(X) # check that diagonal elements are equal to 0 assert_array_almost_equal(D[np.diag_indices_from(D)], [0.] * D.shape[0]) assert_true(np.all(D >= 0.)) assert_true(np.all(D <= 2.)) # Paired distances
Example #24
Source File: pairwise.py From twitter-stock-recommendation with MIT License | 5 votes |
def cosine_distances(X, Y=None): """Compute cosine distance between samples in X and Y. Cosine distance is defined as 1.0 minus the cosine similarity. Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array_like, sparse matrix with shape (n_samples_X, n_features). Y : array_like, sparse matrix (optional) with shape (n_samples_Y, n_features). Returns ------- distance matrix : array An array with shape (n_samples_X, n_samples_Y). See also -------- sklearn.metrics.pairwise.cosine_similarity scipy.spatial.distance.cosine (dense matrices only) """ # 1.0 - cosine_similarity(X, Y) without copy S = cosine_similarity(X, Y) S *= -1 S += 1 np.clip(S, 0, 2, out=S) if X is Y or Y is None: # Ensure that distances between vectors and themselves are set to 0.0. # This may not be the case due to floating point rounding errors. S[np.diag_indices_from(S)] = 0.0 return S # Paired distances
Example #25
Source File: math.py From qml with MIT License | 5 votes |
def cho_solve(A, y): """ Solves the equation :math:`A x = y` for x using a Cholesky decomposition via calls to LAPACK dpotrf and dpotrs in the F2PY module. Preserves the input matrix A. :param A: Matrix (symmetric and positive definite, left-hand side). :type A: numpy array :param y: Vector (right-hand side of the equation). :type y: numpy array :return: The solution vector. :rtype: numpy array """ if len(A.shape) != 2 or A.shape[0] != A.shape[1]: raise ValueError('expected square matrix') if len(y.shape) != 1 or y.shape[0] != A.shape[1]: raise ValueError('expected matrix and vector of same stride size') n = A.shape[0] # Backup diagonal before Cholesky-decomposition A_diag = A[np.diag_indices_from(A)] x = np.zeros(n) fcho_solve(A, y, x) # Reset diagonal after Cholesky-decomposition A[np.diag_indices_from(A)] = A_diag # Copy lower triangle to upper i_lower = np.tril_indices_from(A) A.T[i_lower] = A[i_lower] return x
Example #26
Source File: bayesian.py From nni with MIT License | 5 votes |
def incremental_fit(self, train_x, train_y): """ Incrementally fit the regressor. """ if not self._first_fitted: raise ValueError( "The first_fit function needs to be called first.") train_x, train_y = np.array(train_x), np.array(train_y) # Incrementally compute K up_right_k = edit_distance_matrix(self._x, train_x) down_left_k = np.transpose(up_right_k) down_right_k = edit_distance_matrix(train_x) up_k = np.concatenate((self._distance_matrix, up_right_k), axis=1) down_k = np.concatenate((down_left_k, down_right_k), axis=1) temp_distance_matrix = np.concatenate((up_k, down_k), axis=0) k_matrix = bourgain_embedding_matrix(temp_distance_matrix) diagonal = np.diag_indices_from(k_matrix) diagonal = (diagonal[0][-len(train_x):], diagonal[1][-len(train_x):]) k_matrix[diagonal] += self.alpha try: self._l_matrix = cholesky(k_matrix, lower=True) # Line 2 except LinAlgError: return self self._x = np.concatenate((self._x, train_x), axis=0) self._y = np.concatenate((self._y, train_y), axis=0) self._distance_matrix = temp_distance_matrix self._alpha_vector = cho_solve( (self._l_matrix, True), self._y) # Line 3 return self
Example #27
Source File: test_minimize_constrained.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def grad(self, x): dx, dy, dz = self._compute_coordinate_deltas(x) with np.errstate(divide='ignore'): dm3 = (dx**2 + dy**2 + dz**2) ** -1.5 dm3[np.diag_indices_from(dm3)] = 0 grad_x = -np.sum(dx * dm3, axis=1) grad_y = -np.sum(dy * dm3, axis=1) grad_z = -np.sum(dz * dm3, axis=1) return np.hstack((grad_x, grad_y, grad_z))
Example #28
Source File: test_minimize_constrained.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def fun(self, x): dx, dy, dz = self._compute_coordinate_deltas(x) with np.errstate(divide='ignore'): dm1 = (dx**2 + dy**2 + dz**2) ** -0.5 dm1[np.diag_indices_from(dm1)] = 0 return 0.5 * np.sum(dm1)
Example #29
Source File: test_util.py From kite with GNU General Public License v3.0 | 5 votes |
def test_trim_matrix(): arr = num.full((100, 100), num.nan) arr[-1, -1] = 1. assert util.trimMatrix(arr).shape == (1, 1) arr[-2, -2] = 1. assert util.trimMatrix(arr).shape == (2, 2) arr[num.diag_indices_from(arr)] = 1. assert util.trimMatrix(arr).shape == arr.shape
Example #30
Source File: cokrige.py From pyGeoStatistics with MIT License | 5 votes |
def block_covariance(self): "return average covariance within block" if self._block_covariance is None: if self.ndb <= 1: # point kriging self._block_covariance = self.unbias else: cov = list() for x1, y1, z1 in zip(self.xdb, self.ydb, self.zdb): for x2, y2, z2 in zip(self.xdb, self.ydb, self.zdb): cov.append(self._cova3((x1, y1, z1), (x2, y2, z2))) cov = np.array(cov).reshape((self.ndb, self.ndb)) cov[np.diag_indices_from(cov)] -= self.c0 self._block_covariance = np.mean(cov) return self._block_covariance