Python numpy.setdiff1d() Examples
The following are 30
code examples of numpy.setdiff1d().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: tcpr.py From libTLDA with MIT License | 7 votes |
def add_intercept(self, X): """Add 1's to data as last features.""" # Data shape N, D = X.shape # Check if there's not already an intercept column if np.any(np.sum(X, axis=0) == N): # Report print('Intercept is not the last feature. Swapping..') # Find which column contains the intercept intercept_index = np.argwhere(np.sum(X, axis=0) == N) # Swap intercept to last X = X[:, np.setdiff1d(np.arange(D), intercept_index)] # Add intercept as last column X = np.hstack((X, np.ones((N, 1)))) # Append column of 1's to data, and increment dimensionality return X, D+1
Example #2
Source File: test_util.py From libTLDA with MIT License | 6 votes |
def test_one_hot(): """Check if one_hot returns correct label matrices.""" # Generate label vector y = np.hstack((np.ones((10,))*0, np.ones((10,))*1, np.ones((10,))*2)) # Map to matrix Y, labels = one_hot(y) # Check for only 0's and 1's assert len(np.setdiff1d(np.unique(Y), [0, 1])) == 0 # Check for correct labels assert np.all(labels == np.unique(y)) # Check correct shape of matrix assert Y.shape[0] == y.shape[0] assert Y.shape[1] == len(labels)
Example #3
Source File: diffussion.py From manifold-diffusion with MIT License | 6 votes |
def dfs_trunk(sim, A,alpha = 0.99, QUERYKNN = 10, maxiter = 8, K = 100, tol = 1e-3): qsim = sim_kernel(sim).T sortidxs = np.argsort(-qsim, axis = 1) for i in range(len(qsim)): qsim[i,sortidxs[i,QUERYKNN:]] = 0 qsims = sim_kernel(qsim) W = sim_kernel(A) W = csr_matrix(topK_W(W, K)) out_ranks = [] t =time() for i in range(qsims.shape[0]): qs = qsims[i,:] tt = time() w_idxs, W_trunk = find_trunc_graph(qs, W, 2); Wn = normalize_connection_graph(W_trunk) Wnn = eye(Wn.shape[0]) - alpha * Wn f,inf = s_linalg.minres(Wnn, qs[w_idxs], tol=tol, maxiter=maxiter) ranks = w_idxs[np.argsort(-f.reshape(-1))] missing = np.setdiff1d(np.arange(A.shape[1]), ranks) out_ranks.append(np.concatenate([ranks.reshape(-1,1), missing.reshape(-1,1)], axis = 0)) #print time() -t, 'qtime' out_ranks = np.concatenate(out_ranks, axis = 1) return out_ranks
Example #4
Source File: label.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def inverse_transform(self, y): """Transform labels back to original encoding. Parameters ---------- y : numpy array of shape [n_samples] Target values. Returns ------- y : numpy array of shape [n_samples] """ check_is_fitted(self, 'classes_') y = column_or_1d(y, warn=True) # inverse transform of empty array is empty array if _num_samples(y) == 0: return np.array([]) diff = np.setdiff1d(y, np.arange(len(self.classes_))) if len(diff): raise ValueError( "y contains previously unseen labels: %s" % str(diff)) y = np.asarray(y) return self.classes_[y]
Example #5
Source File: utils.py From dgl with Apache License 2.0 | 6 votes |
def set_diff(ar1, ar2): """Find the set difference of two index arrays. Return the unique values in ar1 that are not in ar2. Parameters ---------- ar1: utils.Index Input index array. ar2: utils.Index Input comparison index array. Returns ------- setdiff: Array of values in ar1 that are not in ar2. """ ar1_np = ar1.tonumpy() ar2_np = ar2.tonumpy() setdiff = np.setdiff1d(ar1_np, ar2_np) setdiff = toindex(setdiff) return setdiff
Example #6
Source File: bagging.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes): """Private function used to compute log probabilities within a job.""" n_samples = X.shape[0] log_proba = np.empty((n_samples, n_classes)) log_proba.fill(-np.inf) all_classes = np.arange(n_classes, dtype=np.int) for estimator, features in zip(estimators, estimators_features): log_proba_estimator = estimator.predict_log_proba(X[:, features]) if n_classes == len(estimator.classes_): log_proba = np.logaddexp(log_proba, log_proba_estimator) else: log_proba[:, estimator.classes_] = np.logaddexp( log_proba[:, estimator.classes_], log_proba_estimator[:, range(len(estimator.classes_))]) missing = np.setdiff1d(all_classes, estimator.classes_) log_proba[:, missing] = np.logaddexp(log_proba[:, missing], -np.inf) return log_proba
Example #7
Source File: circuit.py From viznet with MIT License | 6 votes |
def focus(self, lines): ''' focus to target lines Args: lines (list): the target lines to put up. ''' alllines = range(self.num_bit) pin = NodeBrush('pin') old_positions = [] for i in range(self.num_bit): old_positions.append(self.gate(pin, i)) lmap = np.append(lines, np.setdiff1d(alllines, lines)) self.x += 0.8 pins = [] for opos, j in zip(old_positions, lmap): pi = Pin(self.get_position(j)) self.node_dict[j].append(pi) self.edge >> (opos, pi) pins.append(pi) return pins
Example #8
Source File: data_utils.py From bdol-ml with GNU Lesser General Public License v3.0 | 6 votes |
def cross_validation_folds(n, k=5): if n % k != 0: skip = int(np.floor(float(n)/float(k))) else: skip = n/k ind = np.arange(n) np.random.shuffle(ind) train_ind = dict() val_ind = dict() for i in range(k): if i == k-1: # Use the rest of the examples val = ind[skip*i:] else: val = ind[skip*i:skip*(i+1)] train = np.setdiff1d(ind, val_ind) val_ind[i] = val train_ind[i] = train return train_ind, val_ind
Example #9
Source File: auto_arima_forecast.py From driverlessai-recipes with Apache License 2.0 | 5 votes |
def update_history(self, X: dt.Frame, y: np.array = None): """ Update the model fit with additional observed endog/exog values. Updating an ARIMA adds new observations to the model, updating the MLE of the parameters accordingly by performing several new iterations (maxiter) from the existing model parameters. :param X: Datatable Frame containing input features :param y: Numpy array containing new observations to update the ARIMA model :return: """ X = X.to_pandas() XX = X[self.tgc].copy() XX['y'] = np.array(y) tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) if len(tgc_wo_time) > 0: XX_grp = XX.groupby(tgc_wo_time) else: XX_grp = [([None], XX)] for key, X in XX_grp: key = key if isinstance(key, list) else [key] grp_hash = '_'.join(map(str, key)) # print("auto arima - update history with data of shape: %s for group: %s" % (str(X.shape), grp_hash)) order = np.argsort(X[self.time_column]) if grp_hash in self.models: model = self.models[grp_hash] if model is not None: model.update(X['y'].values[order]) return self
Example #10
Source File: eval_metrics.py From TKP with Apache License 2.0 | 5 votes |
def evaluate(distmat, q_pids, g_pids, q_camids, g_camids): num_q, num_g = distmat.shape index = np.argsort(distmat, axis=1) # from small to large num_no_gt = 0 # num of query imgs without groundtruth num_r1 = 0 CMC = np.zeros(len(g_pids)) AP = 0 for i in range(num_q): # groundtruth index query_index = np.argwhere(g_pids==q_pids[i]) camera_index = np.argwhere(g_camids==q_camids[i]) good_index = np.setdiff1d(query_index, camera_index, assume_unique=True) if good_index.size == 0: num_no_gt += 1 continue # remove gallery samples that have the same pid and camid with query junk_index = np.intersect1d(query_index, camera_index) ap_tmp, CMC_tmp = compute_ap_cmc(index[i], good_index, junk_index) if CMC_tmp[0]==1: num_r1 += 1 CMC = CMC + CMC_tmp AP += ap_tmp if num_no_gt > 0: print("{} query imgs do not have groundtruth.".format(num_no_gt)) # print("R1:{}".format(num_r1)) CMC = CMC / (num_q - num_no_gt) mAP = AP / (num_q - num_no_gt) return CMC, mAP
Example #11
Source File: acquisition.py From GPflowOpt with Apache License 2.0 | 5 votes |
def objective_indices(self): """ Method returning the indices of the model outputs which are objective functions. By default all outputs are objectives. :return: indices to the objectives, size R """ return np.setdiff1d(np.arange(self.data[1].shape[1]), self.constraint_indices())
Example #12
Source File: label.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def inverse_transform(self, yt): """Transform the given indicator matrix into label sets Parameters ---------- yt : array or sparse matrix of shape (n_samples, n_classes) A matrix containing only 1s ands 0s. Returns ------- y : list of tuples The set of labels for each sample such that `y[i]` consists of `classes_[j]` for each `yt[i, j] == 1`. """ check_is_fitted(self, 'classes_') if yt.shape[1] != len(self.classes_): raise ValueError('Expected indicator for {0} classes, but got {1}' .format(len(self.classes_), yt.shape[1])) if sp.issparse(yt): yt = yt.tocsr() if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0: raise ValueError('Expected only 0s and 1s in label indicator.') return [tuple(self.classes_.take(yt.indices[start:end])) for start, end in zip(yt.indptr[:-1], yt.indptr[1:])] else: unexpected = np.setdiff1d(yt, [0, 1]) if len(unexpected) > 0: raise ValueError('Expected only 0s and 1s in label indicator. ' 'Also got {0}'.format(unexpected)) return [tuple(self.classes_.compress(indicators)) for indicators in yt]
Example #13
Source File: _base.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def transform(self, X): """Generate missing values indicator for X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The input data to complete. Returns ------- Xt : {ndarray or sparse matrix}, shape (n_samples, n_features) The missing indicator for input data. The data type of ``Xt`` will be boolean. """ check_is_fitted(self, "features_") X = self._validate_input(X) if X.shape[1] != self._n_features: raise ValueError("X has a different number of features " "than during fitting.") imputer_mask, features = self._get_missing_features_info(X) if self.features == "missing-only": features_diff_fit_trans = np.setdiff1d(features, self.features_) if (self.error_on_new and features_diff_fit_trans.size > 0): raise ValueError("The features {} have missing values " "in transform but have no missing values " "in fit.".format(features_diff_fit_trans)) if self.features_.size < self._n_features: imputer_mask = imputer_mask[:, self.features_] return imputer_mask
Example #14
Source File: multilayer_perceptron.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def _validate_input(self, X, y, incremental): X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], multi_output=True) if y.ndim == 2 and y.shape[1] == 1: y = column_or_1d(y, warn=True) if not incremental: self._label_binarizer = LabelBinarizer() self._label_binarizer.fit(y) self.classes_ = self._label_binarizer.classes_ elif self.warm_start: classes = unique_labels(y) if set(classes) != set(self.classes_): raise ValueError("warm_start can only be used where `y` has " "the same classes as in the previous " "call to fit. Previously got %s, `y` has %s" % (self.classes_, classes)) else: classes = unique_labels(y) if len(np.setdiff1d(classes, self.classes_, assume_unique=True)): raise ValueError("`y` has classes not in `self.classes_`." " `self.classes_` has %s. 'y' has %s." % (self.classes_, classes)) y = self._label_binarizer.transform(y) return X, y
Example #15
Source File: data_utils.py From bdol-ml with GNU Lesser General Public License v3.0 | 5 votes |
def split_train_test(data, target, p=0.7): n = data.shape[0] num_train = int(np.floor(p * n)) train_idx = np.random.choice(n, num_train, replace=False) test_idx = np.setdiff1d(np.arange(n), train_idx) train_data = data[train_idx, :] test_data = data[test_idx, :] train_target = target[train_idx] test_target = target[test_idx] return train_data, test_data, train_target, test_target
Example #16
Source File: env.py From DeepRobust with MIT License | 5 votes |
def get_possible_nodes(self, target_node): # connected = set() connected = [target_node] for n1, n2 in self.edge_set: if n1 == target_node: # connected.add(target_node) connected.append(n2) return np.setdiff1d(self.node_set, np.array(connected)) # return self.node_set - connected
Example #17
Source File: arraysetops.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def setdiff1d(ar1, ar2, assume_unique=False): """ Find the set difference of two arrays. Return the sorted, unique values in `ar1` that are not in `ar2`. Parameters ---------- ar1 : array_like Input array. ar2 : array_like Input comparison array. assume_unique : bool If True, the input arrays are both assumed to be unique, which can speed up the calculation. Default is False. Returns ------- setdiff1d : ndarray Sorted 1D array of values in `ar1` that are not in `ar2`. See Also -------- numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. Examples -------- >>> a = np.array([1, 2, 3, 2, 4, 1]) >>> b = np.array([3, 4, 5, 6]) >>> np.setdiff1d(a, b) array([1, 2]) """ if assume_unique: ar1 = np.asarray(ar1).ravel() else: ar1 = unique(ar1) ar2 = unique(ar2) return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
Example #18
Source File: _osqp.py From osqp-python with Apache License 2.0 | 5 votes |
def update_rho_vec(self): """ Update values of rho_vec and refactor if constraints change. """ # Find indices of loose bounds, equality constr and one-sided constr loose_ind = np.where(np.logical_and( self.work.data.l < -OSQP_INFTY*MIN_SCALING, self.work.data.u > OSQP_INFTY*MIN_SCALING))[0] eq_ind = np.where(self.work.data.u - self.work.data.l < RHO_TOL)[0] ineq_ind = np.setdiff1d(np.setdiff1d(np.arange(self.work.data.m), loose_ind), eq_ind) # Find indices of current constraint types old_loose_ind = np.where(self.work.constr_type == -1) old_eq_ind = np.where(self.work.constr_type == 1) old_ineq_ind = np.where(self.work.constr_type == 0) # Check if type of any constraint changed constr_type_changed = (loose_ind != old_loose_ind).any() or \ (eq_ind != old_eq_ind).any() or \ (ineq_ind != old_ineq_ind).any() # Update type of constraints self.work.constr_type[loose_ind] = -1 self.work.constr_type[eq_ind] = 1 self.work.constr_type[ineq_ind] = 0 self.work.rho_vec[loose_ind] = RHO_MIN self.work.rho_vec[eq_ind] = RHO_EQ_OVER_RHO_INEQ * \ self.work.settings.rho self.work.rho_vec[ineq_ind] = self.work.settings.rho self.work.rho_inv_vec = np.reciprocal(self.work.rho_vec) if constr_type_changed: self.work.linsys_solver = linsys_solver(self.work)
Example #19
Source File: rl_s2v_env.py From DeepRobust with MIT License | 5 votes |
def get_possible_nodes(self, target_node): connected = set() connected = [] for n1, n2 in self.edge_set: if n1 == target_node: # connected.add(target_node) connected.append(n1) return np.setdiff1d(self.node_set, np.array(connected)) # return self.node_set - connected
Example #20
Source File: tfdeploy.py From tfdeploy with MIT License | 5 votes |
def ListDiff(a, b): """ List diff op. """ d = np.setdiff1d(a, b) return d, np.searchsorted(a, d).astype(np.int32)
Example #21
Source File: general.py From strax with BSD 3-Clause "New" or "Revised" License | 5 votes |
def split_by_containment(things, containers): """Return list of thing-arrays contained in each container Assumes everything is sorted, and containers are nonoverlapping """ if not len(containers): return [] # Index of which container each thing belongs to, or -1 which_container = fully_contained_in(things, containers) # Restrict to things in containers mask = which_container != -1 things = things[mask] which_container = which_container[mask] if not len(things): # np.split has confusing behaviour for empty arrays return [things[:0] for _ in range(len(containers))] # Split things up by container split_indices = np.where(np.diff(which_container))[0] + 1 things_split = np.split(things, split_indices) # Insert empty arrays for empty containers empty_containers = np.setdiff1d(np.arange(len(containers)), np.unique(which_container)) for c_i in empty_containers: things_split.insert(c_i, things[:0]) return things_split
Example #22
Source File: grid.py From person-reid-lib with MIT License | 5 votes |
def _prepare_split(self, images_info): """ Image name format: 0001001.png, where first four digits represent identity and last four digits represent cameras. Camera 1&2 are considered the same view and camera 3&4 are considered the same view. """ self.logger.info("Begin Load 10 random splits") split_mat = loadmat(self.split_mat_path) trainIdxAll = split_mat['trainIdxAll'][0] # length = 10 probe_info_all = images_info[0] gallery_info_all = images_info[1] probe_id_all = np.unique(probe_info_all[:, 0]) gallery_id_all = np.unique(gallery_info_all[:, 0]) splits = [] for split_idx in range(10): train_idxs = trainIdxAll[split_idx][0][0][2][0] assert train_idxs.size == 125 probe_id = np.setdiff1d(probe_id_all, train_idxs) gallery_id = np.setdiff1d(gallery_id_all, train_idxs) train_info = np.concatenate( (np_filter(probe_info_all, train_idxs), np_filter(gallery_info_all, train_idxs)), axis=0) probe_info = np_filter(probe_info_all, probe_id) gallery_info = np_filter(gallery_info_all, gallery_id) assert np.intersect1d(probe_id, gallery_id).size == probe_id.size assert probe_id.size == 125 assert gallery_id.size == 126 split = {} split['train'] = train_info split['probe'] = probe_info split['gallery'] = gallery_info split['info'] = 'GRID dataset. Split ID {:2d}'.format(split_idx) splits.append(split) self.logger.info("Load 10 random splits") return splits
Example #23
Source File: __init__.py From person-reid-lib with MIT License | 5 votes |
def _print_info(self, train_info, test_info, probe_info, gallery_info): GalleryInds = np.unique(gallery_info[:, 0]) probeInds = np.unique(probe_info[:, 0]) self.logger.info(' Train Test Probe Gallery') self.logger.info('#ID {:5d} {:5d} {:5d} {:5d}'.format(np.unique(train_info[:, 0]).size, np.unique(test_info[:, 0]).size, np.unique(probe_info[:, 0]).size, np.unique(gallery_info[:, 0]).size)) self.logger.info('#Track {:8d} {:8d} {:8d} {:8d}'.format(train_info.shape[0], test_info.shape[0], probe_info.shape[0], gallery_info.shape[0])) self.logger.info('#Image {:8d} {:8d} {:8d} {:8d}'.format(np.sum(train_info[:, 4]), np.sum(test_info[:, 4]), np.sum(probe_info[:, 4]), np.sum(gallery_info[:, 4]))) self.logger.info( '#Cam {:2d} {:2d} {:2d} {:2d}'.format(np.unique(train_info[:, 1]).size, np.unique(test_info[:, 1]).size, np.unique(probe_info[:, 1]).size, np.unique(gallery_info[:, 1]).size)) self.logger.info('MaxLen {:8d} {:8d} {:8d} {:8d}'.format(np.max(train_info[:, 4]), np.max(test_info[:, 4]), np.max(probe_info[:, 4]), np.max(gallery_info[:, 4]))) self.logger.info('MinLen {:8d} {:8d} {:8d} {:8d}'.format(np.min(train_info[:, 4]), np.min(test_info[:, 4]), np.min(probe_info[:, 4]), np.min(gallery_info[:, 4]))) self.logger.info('Gallery ID diff Probe ID: %s' % np.setdiff1d(GalleryInds, probeInds)) self.logger.info('{0:-^60}'.format(''))
Example #24
Source File: _osqp.py From osqp-python with Apache License 2.0 | 5 votes |
def set_rho_vec(self): """ Set values of rho vector based on constraint types """ self.work.settings.rho = np.minimum(np.maximum(self.work.settings.rho, RHO_MIN), RHO_MAX) # Find indices of loose bounds, equality constr and one-sided constr loose_ind = np.where(np.logical_and( self.work.data.l < -OSQP_INFTY*MIN_SCALING, self.work.data.u > OSQP_INFTY*MIN_SCALING))[0] eq_ind = np.where(self.work.data.u - self.work.data.l < RHO_TOL)[0] ineq_ind = np.setdiff1d(np.setdiff1d(np.arange(self.work.data.m), loose_ind), eq_ind) # Type of constraints self.work.constr_type[loose_ind] = -1 self.work.constr_type[eq_ind] = 1 self.work.constr_type[ineq_ind] = 0 self.work.rho_vec[loose_ind] = RHO_MIN self.work.rho_vec[eq_ind] = RHO_EQ_OVER_RHO_INEQ * \ self.work.settings.rho self.work.rho_vec[ineq_ind] = self.work.settings.rho self.work.rho_inv_vec = np.reciprocal(self.work.rho_vec)
Example #25
Source File: bootstrap.py From stability-selection with BSD 3-Clause "New" or "Revised" License | 5 votes |
def complementary_pairs_bootstrap(y, n_subsamples, random_state=None): """ Complementary pairs bootstrap. Two subsamples A and B are generated, such that |A| = n_subsamples, the union of A and B equals {0, ..., n_samples - 1}, and the intersection of A and B is the empty set. Samples irrespective of label. Parameters ---------- y : array of size [n_subsamples,] True labels n_subsamples : int Number of subsamples in the bootstrap sample random_state : int, RandomState instance or None, optional, default=None Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- A : array of size [n_subsamples,] The sampled subsets of integer. The subset of selected integer might not be randomized, see the method argument. B : array of size [n_samples - n_subsamples,] The complement of A. """ n_samples = y.shape[0] subsample = bootstrap_without_replacement(y, n_subsamples, random_state) complementary_subsample = np.setdiff1d(np.arange(n_samples), subsample) return subsample, complementary_subsample
Example #26
Source File: transform.py From unmixing with MIT License | 5 votes |
def biophysical_composition_index(rast, tc_func=None, nodata=-9999): ''' Calculates the biophysical composition index (BCI) of Deng and Wu (2012) in Remote Sensing of Environment 127. The NoData value is assumed to be negative (could never be the maximum value in a band). Arguments: rast A NumPy Array or gdal.Dataset instance tc_func The function to be used to transform the input raster to Tasseled Cap brightness, greenness, and wetness nodata The NoData value to ignore ''' shp = rast.shape if tc_func is None: tc_func = tasseled_cap_tm # Perform the tasseled cap rotation x = tc_func(rast, ncomp=3).reshape(3, shp[1]*shp[2]) unit = np.ones((1, shp[1] * shp[2])) stack = [] for i in range(0, 3): # Calculate the minimum values after excluding NoData values tcmin = np.setdiff1d(x[i, ...].ravel(), np.array([nodata])).min() stack.append(np.divide(np.subtract(x[i, ...], unit * tcmin), unit * (x[i, ...].max() - tcmin))) # Unpack the High-albedo, Vegetation, and Low-albedo components h, v, l = stack return np.divide( np.subtract(np.divide(np.add(h, l), unit * 2), v), np.add(np.divide(np.add(h, l), unit * 2), v))\ .reshape((1, shp[1], shp[2]))
Example #27
Source File: arraysetops.py From lambda-packs with MIT License | 5 votes |
def setdiff1d(ar1, ar2, assume_unique=False): """ Find the set difference of two arrays. Return the sorted, unique values in `ar1` that are not in `ar2`. Parameters ---------- ar1 : array_like Input array. ar2 : array_like Input comparison array. assume_unique : bool If True, the input arrays are both assumed to be unique, which can speed up the calculation. Default is False. Returns ------- setdiff1d : ndarray Sorted 1D array of values in `ar1` that are not in `ar2`. See Also -------- numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. Examples -------- >>> a = np.array([1, 2, 3, 2, 4, 1]) >>> b = np.array([3, 4, 5, 6]) >>> np.setdiff1d(a, b) array([1, 2]) """ if assume_unique: ar1 = np.asarray(ar1).ravel() else: ar1 = unique(ar1) ar2 = unique(ar2) return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
Example #28
Source File: arraysetops.py From lambda-packs with MIT License | 5 votes |
def setdiff1d(ar1, ar2, assume_unique=False): """ Find the set difference of two arrays. Return the sorted, unique values in `ar1` that are not in `ar2`. Parameters ---------- ar1 : array_like Input array. ar2 : array_like Input comparison array. assume_unique : bool If True, the input arrays are both assumed to be unique, which can speed up the calculation. Default is False. Returns ------- setdiff1d : ndarray Sorted 1D array of values in `ar1` that are not in `ar2`. See Also -------- numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. Examples -------- >>> a = np.array([1, 2, 3, 2, 4, 1]) >>> b = np.array([3, 4, 5, 6]) >>> np.setdiff1d(a, b) array([1, 2]) """ if assume_unique: ar1 = np.asarray(ar1).ravel() else: ar1 = unique(ar1) ar2 = unique(ar2) return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
Example #29
Source File: sampling_ops_test.py From lingvo with Apache License 2.0 | 5 votes |
def testZFilter(self, cmethod, nmethod, nalgo): b, n, m, k = 1, 10000, 128, 128 g = tf.Graph() with g.as_default(): points = tf.random.uniform(shape=(b, n, 3)) points_padding = tf.zeros(shape=(b, n)) center, center_padding, indices, indices_padding = ops.sample_points( points=points, points_padding=points_padding, num_seeded_points=0, center_selector=cmethod, neighbor_sampler=nmethod, num_centers=m, center_z_min=0.25, center_z_max=0.75, num_neighbors=k, max_distance=0.25) # Ensure shapes are known at graph construction. self.assertListEqual(center.shape.as_list(), [b, m]) self.assertListEqual(center_padding.shape.as_list(), [b, m]) self.assertListEqual(indices.shape.as_list(), [b, m, k]) self.assertListEqual(indices_padding.shape.as_list(), [b, m, k]) with self.session(graph=g): c1, p1 = self.evaluate([center, points]) c2, p2 = self.evaluate([center, points]) # With extremely high probability, sampling centers twice should be # different. self.assertGreater(np.setdiff1d(c1, c2).size, 0) # Centers should be filtered by z range. self.assertTrue((0.25 <= p1[0, c1[0], 2]).all()) self.assertTrue((p1[0, c1[0], 2] <= 0.75).all()) self.assertTrue((0.25 <= p2[0, c2[0], 2]).all()) self.assertTrue((p2[0, c2[0], 2] <= 0.75).all())
Example #30
Source File: index_operations.py From paramz with BSD 3-Clause "New" or "Revised" License | 5 votes |
def remove_indices(arr, to_remove): return numpy.setdiff1d(arr, to_remove, True)