Python scipy.spatial.distance_matrix() Examples
The following are 19
code examples of scipy.spatial.distance_matrix().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
scipy.spatial
, or try the search function
.
Example #1
Source File: pre.py From skan with BSD 3-Clause "New" or "Revised" License | 6 votes |
def hyperball(ndim, radius): """Return a binary morphological filter containing pixels within `radius`. Parameters ---------- ndim : int The number of dimensions of the filter. radius : int The radius of the filter. Returns ------- ball : array of bool, shape [2 * radius + 1,] * ndim The required structural element """ size = 2 * radius + 1 center = [(radius,) * ndim] coords = np.mgrid[[slice(None, size),] * ndim].reshape(ndim, -1).T distances = np.ravel(spatial.distance_matrix(coords, center)) selector = distances <= radius ball = np.zeros((size,) * ndim, dtype=bool) ball.ravel()[selector] = True return ball
Example #2
Source File: ipol.py From wradlib with MIT License | 6 votes |
def _krig_matrix(self, src, drift): """Sets up the kriging system for a configuration of source points. """ # the basic covariance matrix var_matrix = self.cov_func(spatial.distance_matrix(src, src)) # the extended matrix, initialized to ones edk_matrix = np.ones((len(src) + 2, len(src) + 2)) # adding entries for the first lagrange multiplier for the ordinary # kriging part edk_matrix[:-2, :-2] = var_matrix edk_matrix[-2, -2] = 0.0 # adding entries for the second lagrange multiplier for the edk part edk_matrix[:-2, -1] = drift edk_matrix[-1, :-2] = drift edk_matrix[-2:, -1] = 0.0 edk_matrix[-1, -2:] = 0.0 return edk_matrix
Example #3
Source File: test_kdtree.py From Computable with MIT License | 5 votes |
def test_distance_matrix(): m = 10 n = 11 k = 4 np.random.seed(1234) xs = np.random.randn(m,k) ys = np.random.randn(n,k) ds = distance_matrix(xs,ys) assert_equal(ds.shape, (m,n)) for i in range(m): for j in range(n): assert_almost_equal(distance(xs[i],ys[j]),ds[i,j])
Example #4
Source File: pull_numpy.py From compas with MIT License | 5 votes |
def trimesh_pull_points_numpy(mesh, points): # preprocess i_k = mesh.index_key() fk_fi = {fkey: index for index, fkey in enumerate(mesh.faces())} vertices = array(mesh.vertices_attributes('xyz'), dtype=float64).reshape((-1, 3)) triangles = array([mesh.face_coordinates(fkey) for fkey in mesh.faces()], dtype=float64) points = array(points, dtype=float64).reshape((-1, 3)) closest_vis = argmin(distance_matrix(points, vertices), axis=1) # transformation matrices # ? pulled_points = [] # pull every point onto the mesh for i in range(points.shape[0]): point = points[i] closest_vi = closest_vis[i] closest_vk = i_k[closest_vi] closest_tris = [fk_fi[fk] for fk in mesh.vertex_faces(closest_vk, ordered=True) if fk is not None] # process the connected triangles d, p, c = _find_closest_component( point, vertices, triangles, closest_tris, closest_vi ) pulled_points.append(p) return pulled_points # ============================================================================== # helpers # ==============================================================================
Example #5
Source File: delta.py From hyperbolic-image-embeddings with MIT License | 5 votes |
def get_delta(loader): """ computes delta value for image data by extracting features using VGG network; input -- data loader for images """ vgg = torchvision.models.vgg16(pretrained=True) vgg_feats = vgg.features vgg_classifier = nn.Sequential(*list(vgg.classifier.children())[:-1]) vgg_part = nn.Sequential(vgg_feats, Flatten(), vgg_classifier).to(device) vgg_part.eval() all_features = [] for i, (batch, _) in enumerate(loader): with torch.no_grad(): batch = batch.to(device) all_features.append(vgg_part(batch).detach().cpu().numpy()) all_features = np.concatenate(all_features) idx = np.random.choice(len(all_features), 1500) all_features_small = all_features[idx] dists = distance_matrix(all_features_small, all_features_small) delta = delta_hyp(dists) diam = np.max(dists) return delta, diam
Example #6
Source File: delta.py From hyperbolic-image-embeddings with MIT License | 5 votes |
def batched_delta_hyp(X, n_tries=10, batch_size=1500): vals = [] for i in tqdm(range(n_tries)): idx = np.random.choice(len(X), batch_size) X_batch = X[idx] distmat = distance_matrix(X_batch, X_batch) diam = np.max(distmat) delta_rel = delta_hyp(distmat) / diam vals.append(delta_rel) return np.mean(vals), np.std(vals)
Example #7
Source File: ipol.py From wradlib with MIT License | 5 votes |
def _krig_matrix(self, src): """Sets up the kriging system for a configuration of source points. """ var_matrix = self.cov_func(spatial.distance_matrix(src, src)) ok_matrix = np.ones((len(src) + 1, len(src) + 1)) ok_matrix[:-1, :-1] = var_matrix ok_matrix[-1, -1] = 0.0 return ok_matrix
Example #8
Source File: preprocess.py From molecularGNN_3Dstructure with Apache License 2.0 | 5 votes |
def create_distances(coords): """Create the distance matrix from a set of 3D coordinates. Note that we transform the element 0.0 in the matrix into a large value for processing by Gaussian exp(-d^2), where d is the distance. """ distance_matrix = spatial.distance_matrix(coords, coords) return np.where(distance_matrix == 0.0, 1e6, distance_matrix)
Example #9
Source File: query_methods.py From DiscriminativeActiveLearning with MIT License | 5 votes |
def greedy_k_center(self, labeled, unlabeled, amount): greedy_indices = [] # get the minimum distances between the labeled and unlabeled examples (iteratively, to avoid memory issues): min_dist = np.min(distance_matrix(labeled[0, :].reshape((1, labeled.shape[1])), unlabeled), axis=0) min_dist = min_dist.reshape((1, min_dist.shape[0])) for j in range(1, labeled.shape[0], 100): if j + 100 < labeled.shape[0]: dist = distance_matrix(labeled[j:j+100, :], unlabeled) else: dist = distance_matrix(labeled[j:, :], unlabeled) min_dist = np.vstack((min_dist, np.min(dist, axis=0).reshape((1, min_dist.shape[1])))) min_dist = np.min(min_dist, axis=0) min_dist = min_dist.reshape((1, min_dist.shape[0])) # iteratively insert the farthest index and recalculate the minimum distances: farthest = np.argmax(min_dist) greedy_indices.append(farthest) for i in range(amount-1): if i%1000==0: print("At Point " + str(i)) dist = distance_matrix(unlabeled[greedy_indices[-1], :].reshape((1,unlabeled.shape[1])), unlabeled) min_dist = np.vstack((min_dist, dist.reshape((1, min_dist.shape[1])))) min_dist = np.min(min_dist, axis=0) min_dist = min_dist.reshape((1, min_dist.shape[0])) farthest = np.argmax(min_dist) greedy_indices.append(farthest) return np.array(greedy_indices, dtype=int), np.max(min_dist)
Example #10
Source File: query_methods.py From DiscriminativeActiveLearning with MIT License | 5 votes |
def greedy_k_center(self, labeled, unlabeled, amount): greedy_indices = [] # get the minimum distances between the labeled and unlabeled examples (iteratively, to avoid memory issues): min_dist = np.min(distance_matrix(labeled[0, :].reshape((1, labeled.shape[1])), unlabeled), axis=0) min_dist = min_dist.reshape((1, min_dist.shape[0])) for j in range(1, labeled.shape[0], 100): if j + 100 < labeled.shape[0]: dist = distance_matrix(labeled[j:j+100, :], unlabeled) else: dist = distance_matrix(labeled[j:, :], unlabeled) min_dist = np.vstack((min_dist, np.min(dist, axis=0).reshape((1, min_dist.shape[1])))) min_dist = np.min(min_dist, axis=0) min_dist = min_dist.reshape((1, min_dist.shape[0])) # iteratively insert the farthest index and recalculate the minimum distances: farthest = np.argmax(min_dist) greedy_indices.append(farthest) for i in range(amount-1): dist = distance_matrix(unlabeled[greedy_indices[-1], :].reshape((1,unlabeled.shape[1])), unlabeled) min_dist = np.vstack((min_dist, dist.reshape((1, min_dist.shape[1])))) min_dist = np.min(min_dist, axis=0) min_dist = min_dist.reshape((1, min_dist.shape[0])) farthest = np.argmax(min_dist) greedy_indices.append(farthest) return np.array(greedy_indices)
Example #11
Source File: example.py From tensorflow-som with MIT License | 5 votes |
def get_umatrix(input_vects, weights, m, n): """ Generates an n x m u-matrix of the SOM's weights and bmu indices of all the input data points Used to visualize higher-dimensional data. Shows the average distance between a SOM unit and its neighbors. When displayed, areas of a darker color separated by lighter colors correspond to clusters of units which encode similar information. :param weights: SOM weight matrix, `ndarray` :param m: Rows of neurons :param n: Columns of neurons :return: m x n u-matrix `ndarray` :return: input_size x 1 bmu indices 'ndarray' """ umatrix = np.zeros((m * n, 1)) # Get the location of the neurons on the map to figure out their neighbors. I know I already have this in the # SOM code but I put it here too to make it easier to follow. neuron_locs = list() for i in range(m): for j in range(n): neuron_locs.append(np.array([i, j])) # Get the map distance between each neuron (i.e. not the weight distance). neuron_distmat = distance_matrix(neuron_locs, neuron_locs) for i in range(m * n): # Get the indices of the units which neighbor i neighbor_idxs = neuron_distmat[i] <= 1 # Change this to `< 2` if you want to include diagonal neighbors # Get the weights of those units neighbor_weights = weights[neighbor_idxs] # Get the average distance between unit i and all of its neighbors # Expand dims to broadcast to each of the neighbors umatrix[i] = distance_matrix(np.expand_dims(weights[i], 0), neighbor_weights).mean() bmu_indices = [] for vect in input_vects: min_index = min([i for i in range(len(list(weights)))], key=lambda x: np.linalg.norm(vect- list(weights)[x])) bmu_indices.append(neuron_locs[min_index]) return umatrix, bmu_indices
Example #12
Source File: test_kdtree.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_distance_matrix_looping(): m = 10 n = 11 k = 4 np.random.seed(1234) xs = np.random.randn(m,k) ys = np.random.randn(n,k) ds = distance_matrix(xs,ys) dsl = distance_matrix(xs,ys,threshold=1) assert_equal(ds,dsl)
Example #13
Source File: test_kdtree.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_distance_matrix(): m = 10 n = 11 k = 4 np.random.seed(1234) xs = np.random.randn(m,k) ys = np.random.randn(n,k) ds = distance_matrix(xs,ys) assert_equal(ds.shape, (m,n)) for i in range(m): for j in range(n): assert_almost_equal(minkowski_distance(xs[i],ys[j]),ds[i,j])
Example #14
Source File: cof.py From pyod with BSD 2-Clause "Simplified" License | 5 votes |
def _cof(self, X): """ Connectivity-Based Outlier Factor (COF) Algorithm This function is called internally to calculate the Connectivity-Based Outlier Factor (COF) as an outlier score for observations. :return: numpy array containing COF scores for observations. The greater the COF, the greater the outlierness. """ dist_matrix = np.array(distance_matrix(X, X)) sbn_path_index, ac_dist, cof_ = [], [], [] for i in range(X.shape[0]): sbn_path = sorted(range(len(dist_matrix[i])), key=dist_matrix[i].__getitem__) sbn_path_index.append(sbn_path[1: self.n_neighbors_ + 1]) cost_desc = [] for j in range(self.n_neighbors_): cost_desc.append( np.min(dist_matrix[sbn_path[j + 1]][sbn_path][:j + 1])) acd = [] for _h, cost_ in enumerate(cost_desc): neighbor_add1 = self.n_neighbors_ + 1 acd.append(((2. * (neighbor_add1 - (_h + 1))) / ( neighbor_add1 * self.n_neighbors_)) * cost_) ac_dist.append(np.sum(acd)) for _g in range(X.shape[0]): cof_.append((ac_dist[_g] * self.n_neighbors_) / np.sum(itemgetter(*sbn_path_index[_g])(ac_dist))) return np.nan_to_num(cof_)
Example #15
Source File: test_kdtree.py From Computable with MIT License | 5 votes |
def test_distance_matrix_looping(): m = 10 n = 11 k = 4 np.random.seed(1234) xs = np.random.randn(m,k) ys = np.random.randn(n,k) ds = distance_matrix(xs,ys) dsl = distance_matrix(xs,ys,threshold=1) assert_equal(ds,dsl)
Example #16
Source File: preprocess.py From molecularGNN_3Dstructure with Apache License 2.0 | 4 votes |
def create_datasets(dataset, physical_property, device): dir_dataset = '../dataset/' + dataset + '/' """Initialize atom_dict, in which each key is an atom type and each value is its index. """ atom_dict = defaultdict(lambda: len(atom_dict)) def create_dataset(filename): print(filename) """Load a dataset.""" with open(dir_dataset + filename, 'r') as f: property_types = f.readline().strip().split() data_original = f.read().strip().split('\n\n') """The physical_property is an energy, HOMO, or LUMO.""" property_index = property_types.index(physical_property) dataset = [] for data in data_original: data = data.strip().split('\n') idx = data[0] property = float(data[-1].split()[property_index]) """Load the atoms and their coordinates of a molecular data.""" atoms, atom_coords = [], [] for atom_xyz in data[1:-1]: atom, x, y, z = atom_xyz.split() atoms.append(atom) xyz = [float(v) for v in [x, y, z]] atom_coords.append(xyz) """Create each data with the above defined functions.""" atoms = create_atoms(atoms, atom_dict) distance_matrix = create_distances(atom_coords) molecular_size = len(atoms) """Transform the above each data of numpy to pytorch tensor on a device (i.e., CPU or GPU). """ atoms = torch.LongTensor(atoms).to(device) distance_matrix = torch.FloatTensor(distance_matrix).to(device) property = torch.FloatTensor([[property]]).to(device) dataset.append((atoms, distance_matrix, molecular_size, property)) return dataset dataset_train = create_dataset('data_train.txt') dataset_train, dataset_dev = split_dataset(dataset_train, 0.9) dataset_test = create_dataset('data_test.txt') N_atoms = len(atom_dict) return dataset_train, dataset_dev, dataset_test, N_atoms
Example #17
Source File: turbine.py From floris with Apache License 2.0 | 4 votes |
def calculate_swept_area_velocities(self, local_wind_speed, coord, x, y, z): """ This method calculates and returns the wind speeds at each rotor swept area grid point for the turbine, interpolated from the flow field grid. Args: wind_direction (float): The wind farm wind direction (deg). local_wind_speed (np.array): The wind speed at each grid point in the flow field (m/s). coord (:py:obj:`~.utilities.Vec3`): The coordinate of the turbine. x (np.array): The x-coordinates of the flow field grid. y (np.array): The y-coordinates of the flow field grid. z (np.array): The z-coordinates of the flow field grid. Returns: np.array: The wind speed at each rotor grid point for the turbine (m/s). """ u_at_turbine = local_wind_speed # TODO: # # PREVIOUS METHOD======================== # # UNCOMMENT IF ANY ISSUE UNCOVERED WITH NEW MOETHOD # x_grid = x # y_grid = y # z_grid = z # yPts = np.array([point[0] for point in self.grid]) # zPts = np.array([point[1] for point in self.grid]) # # interpolate from the flow field to get the flow field at the grid # # points # dist = [np.sqrt((coord.x1 - x_grid)**2 \ # + (coord.x2 + yPts[i] - y_grid) **2 \ # + (self.hub_height + zPts[i] - z_grid)**2) \ # for i in range(len(yPts))] # idx = [np.where(dist[i] == np.min(dist[i])) for i in range(len(yPts))] # data = [np.mean(u_at_turbine[idx[i]]) for i in range(len(yPts))] # # PREVIOUS METHOD======================== # # NEW METHOD======================== # Sort by distance flow_grid_points = np.column_stack([x.flatten(), y.flatten(), z.flatten()]) # Set up a grid array y_array = np.array(self.grid)[:, 0] + coord.x2 z_array = np.array(self.grid)[:, 1] + self.hub_height x_array = np.ones_like(y_array) * coord.x1 grid_array = np.column_stack([x_array, y_array, z_array]) ii = np.argmin(distance_matrix(flow_grid_points, grid_array), axis=0) # return np.array(data) return np.array(u_at_turbine.flatten()[ii])
Example #18
Source File: distance.py From compas with MIT License | 4 votes |
def closest_points_in_cloud_numpy(points, cloud, threshold=10**7, distances=True, num_nbrs=1): """Find the closest points in a point cloud to a set of sample points. Parameters ---------- points : array, list The sample points (n,). cloud : array, list The cloud points to compare to (n,). threshold : float Points are checked within this distance. distances : bool Return distance matrix. Returns ------- list Indices of the closest points in the cloud per point in points. array Distances between points and closest points in cloud (n x n). Notes ----- Items in cloud further from items in points than threshold return zero distance and will affect the indices returned if not set suitably high. Examples -------- >>> a = np.random.rand(4, 3) >>> b = np.random.rand(4, 3) >>> indices, distances = closest_points(a, b, distances=True) [1, 2, 0, 3] array([[ 1.03821946, 0.66226402, 0.67964346, 0.98877891], [ 0.4650432 , 0.54484186, 0.36158995, 0.60385484], [ 0.19562088, 0.73240154, 0.50235761, 0.51439644], [ 0.84680233, 0.85390316, 0.72154983, 0.50432293]]) """ from numpy import asarray from numpy import argmin from numpy import argpartition from scipy.spatial import distance_matrix points = asarray(points).reshape((-1, 3)) cloud = asarray(cloud).reshape((-1, 3)) d_matrix = distance_matrix(points, cloud, threshold=threshold) if num_nbrs == 1: indices = argmin(d_matrix, axis=1) else: indices = argpartition(d_matrix, num_nbrs, axis=1) if distances: return indices, d_matrix return indices
Example #19
Source File: mfm.py From yass with Apache License 2.0 | 4 votes |
def calc_mahalonobis(vbParam, score): prec = np.transpose( vbParam.Vhat * vbParam.nuhat[np.newaxis, np.newaxis, :, np.newaxis], axes=[2, 3, 0, 1]) scoremhat = np.transpose( score[:, :, np.newaxis, :] - vbParam.muhat, axes=[0, 2, 3, 1]) maha = np.sqrt( np.sum( np.matmul( np.matmul(scoremhat[:, :, :, np.newaxis, :], prec), scoremhat[:, :, :, :, np.newaxis]), axis=(3, 4), keepdims=False)) return maha[:, :, 0] #def merge_move_quick(maskedData, vbParam, cluster_id, param): #n_merged = 0 #n_features, n_clusters, n_channels = vbParam.muhat.shape #n_data = cluster_id.shape[0] #cluster_id_list = make_list_by_id(cluster_id) #if n_clusters > 1: #all_checked = 0 #else: #all_checked = 1 #while (not all_checked) and (n_clusters > 1): #m = np.transpose(vbParam.muhat, [1, 0, 2]).reshape( #[n_clusters, n_features * n_channels], order="F") #kdist = ss.distance_matrix(m, m) #kdist[np.tril_indices(n_clusters)] = np.inf #merged = 0 #k_untested = np.ones(n_clusters) #while np.sum(k_untested) > 0 and merged == 0: #untested_k = np.argwhere(k_untested) #ka = untested_k[np.random.choice(len(untested_k), 1)].ravel()[0] #kb = np.argmin(kdist[ka, :]).ravel()[0] #k_untested[ka] = 0 #if np.argmin(kdist[kb, :]).ravel()[0] == ka: #k_untested[kb] = 0 #kdist[min(ka, kb), max(ka, kb)] = np.inf #vbParam, cluster_id_list, merged = check_merge_quick( #maskedData, vbParam, cluster_id_list, ka, kb, param) #if merged: #n_merged += 1 #n_clusters -= 1 #if not merged: #all_checked = 1 #return vbParam, make_id_array(cluster_id_list, n_data)