Python numpy.flatnonzero() Examples
The following are 30
code examples of numpy.flatnonzero().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.

Example #1
Source File: test_gee.py From vnpy_crypto with MIT License | 6 votes |
def test_default_time(self): # Check that the time defaults work correctly. endog, exog, group = load_data("gee_logistic_1.csv") # Time values for the autoregressive model T = np.zeros(len(endog)) idx = set(group) for ii in idx: jj = np.flatnonzero(group == ii) T[jj] = lrange(len(jj)) family = Binomial() va = Autoregressive() md1 = GEE(endog, exog, group, family=family, cov_struct=va) mdf1 = md1.fit() md2 = GEE(endog, exog, group, time=T, family=family, cov_struct=va) mdf2 = md2.fit() assert_almost_equal(mdf1.params, mdf2.params, decimal=6) assert_almost_equal(mdf1.standard_errors(), mdf2.standard_errors(), decimal=6)
Example #2
Source File: compressed.py From lambda-packs with MIT License | 6 votes |
def _minor_reduce(self, ufunc): """Reduce nonzeros with a ufunc over the minor axis when non-empty Warning: this does not call sum_duplicates() Returns ------- major_index : array of ints Major indices where nonzero value : array of self.dtype Reduce result for nonzeros in each major_index """ major_index = np.flatnonzero(np.diff(self.indptr)) value = ufunc.reduceat(self.data, downcast_intp_index(self.indptr[major_index])) return major_index, value ####################### # Getting and Setting # #######################
Example #3
Source File: bayes_mi.py From vnpy_crypto with MIT License | 6 votes |
def update_data(self): """ Gibbs update of the missing data values. """ for ix in self.patterns: i = ix[0] ix_miss = np.flatnonzero(self.mask[i, :]) ix_obs = np.flatnonzero(~self.mask[i, :]) mm = self.mean[ix_miss] mo = self.mean[ix_obs] voo = self.cov[ix_obs, :][:, ix_obs] vmm = self.cov[ix_miss, :][:, ix_miss] vmo = self.cov[ix_miss, :][:, ix_obs] r = self.data[ix, :][:, ix_obs] - mo cm = mm + np.dot(vmo, np.linalg.solve(voo, r.T)).T cv = vmm - np.dot(vmo, np.linalg.solve(voo, vmo.T)) cs = np.linalg.cholesky(cv) u = np.random.normal(size=(len(ix), len(ix_miss))) self.data[np.ix_(ix, ix_miss)] = cm + np.dot(u, cs.T)
Example #4
Source File: kernel_pca.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def transform(self, X): """Transform X. Parameters ---------- X : array-like, shape (n_samples, n_features) Returns ------- X_new : array-like, shape (n_samples, n_components) """ check_is_fitted(self, 'X_fit_') # Compute centered gram matrix between X and training data X_fit_ K = self._centerer.transform(self._get_kernel(X, self.X_fit_)) # scale eigenvectors (properly account for null-space for dot product) non_zeros = np.flatnonzero(self.lambdas_) scaled_alphas = np.zeros_like(self.alphas_) scaled_alphas[:, non_zeros] = (self.alphas_[:, non_zeros] / np.sqrt(self.lambdas_[non_zeros])) # Project with a scalar product between K and the scaled eigenvectors return np.dot(K, scaled_alphas)
Example #5
Source File: node.py From edm2016 with Apache License 2.0 | 6 votes |
def get_data_by_id(self, ids): """ Helper for getting current data values from stored identifiers :param float|list ids: ids for which data are requested :return: the stored ids :rtype: np.ndarray """ if self.ids is None: raise ValueError("IDs not stored in node {}".format(self.name)) if self.data is None: raise ValueError("No data in node {}".format(self.name)) ids = np.array(ids, ndmin=1, copy=False) found_items = np.in1d(ids, self.ids) if not np.all(found_items): raise ValueError("Cannot find {} among {}".format(ids[np.logical_not(found_items)], self.name)) idx = np.empty(len(ids), dtype='int') for k, this_id in enumerate(ids): if self.ids.ndim > 1: idx[k] = np.flatnonzero(np.all(self.ids == this_id, axis=1))[0] else: idx[k] = np.flatnonzero(self.ids == this_id)[0] return np.array(self.data, ndmin=1)[idx]
Example #6
Source File: test_linear_operators.py From edm2016 with Apache License 2.0 | 6 votes |
def subset_test(lin_op): """ Test that subsetting a linear operator produces the correct outputs. :param LinearOperator lin_op: the linear operator """ sub_idx = np.random.rand(lin_op.shape[0], 1) > 0.5 # make sure at least one element included sub_idx[np.random.randint(0, len(sub_idx))] = True sub_idx = np.flatnonzero(sub_idx) sub_lin_op = undertest.get_subset_lin_op(lin_op, sub_idx) # test projection to subset of indices x = np.random.randn(lin_op.shape[1], np.random.randint(1, 3)) np.testing.assert_array_almost_equal(sub_lin_op * x, (lin_op * x)[sub_idx, :]) # test back projection from subset of indices y = np.random.randn(len(sub_idx), np.random.randint(1, 3)) z = np.zeros((lin_op.shape[0], y.shape[1])) z[sub_idx] = y np.testing.assert_array_almost_equal(sub_lin_op.rmatvec(y), lin_op.rmatvec(z))
Example #7
Source File: test_survfunc.py From vnpy_crypto with MIT License | 6 votes |
def test_incidence2(): # Check that the cumulative incidence functions for all competing # risks sum to the complementary survival function. np.random.seed(2423) n = 200 time = -np.log(np.random.uniform(size=n)) status = np.random.randint(0, 3, size=n) ii = np.argsort(time) time = time[ii] status = status[ii] ci = CumIncidenceRight(time, status) statusa = 1*(status >= 1) sf = SurvfuncRight(time, statusa) x = 1 - sf.surv_prob y = (ci.cinc[0] + ci.cinc[1])[np.flatnonzero(statusa)] assert_allclose(x, y)
Example #8
Source File: test_node.py From edm2016 with Apache License 2.0 | 6 votes |
def test_get_data_by_id(self): dim, data, cpd, ids = self.gen_data() node = undertest.Node(name='test node', data=data, cpd=cpd, ids=ids) # test setting of ids np.testing.assert_array_equal(node.ids, ids) # test for one id idx = np.random.randint(0, dim) np.testing.assert_array_equal(node.get_data_by_id(ids[idx]).ravel(), node.data[idx]) # test for a random set of ids ids_subset = np.random.choice(ids, dim, replace=True) np.testing.assert_array_equal(node.get_data_by_id(ids_subset), [node.data[np.flatnonzero(ids == x)[0]] for x in ids_subset]) # test for all ids self.assertEqual(node.get_all_data_and_ids(), {x: node.get_data_by_id(x) for x in ids}) # test when data are singleton dim, _, cpd, ids = self.gen_data(dim=1) node = undertest.Node(name='test node', data=1, cpd=cpd, ids=ids) self.assertEqual(node.get_all_data_and_ids(), {x: node.get_data_by_id(x) for x in ids})
Example #9
Source File: bayes_mixed_glm.py From vnpy_crypto with MIT License | 6 votes |
def vb_elbo_grad(self, vb_mean, vb_sd): """ Returns the gradient of the model's evidence lower bound (ELBO). """ fep_mean, vcp_mean, vc_mean = self._unpack(vb_mean) fep_sd, vcp_sd, vc_sd = self._unpack(vb_sd) tm, tv = self._lp_stats(fep_mean, fep_sd, vc_mean, vc_sd) def h(z): u = tm + np.sqrt(tv)*z x = np.zeros_like(u) ii = np.flatnonzero(u > 0) uu = u[ii] x[ii] = 1 / (1 + np.exp(-uu)) ii = np.flatnonzero(u <= 0) uu = u[ii] x[ii] = np.exp(uu) / (1 + np.exp(uu)) return -x return self.vb_elbo_grad_base( h, tm, tv, fep_mean, vcp_mean, vc_mean, fep_sd, vcp_sd, vc_sd)
Example #10
Source File: cov_struct.py From vnpy_crypto with MIT License | 6 votes |
def covariance_matrix(self, endog_expval, index): if self.grid: return self.covariance_matrix_grid(endog_expval, index) j1, j2 = np.tril_indices(len(endog_expval)) dx = np.abs(self.time[index][j1] - self.time[index][j2]) ii = np.flatnonzero((0 < dx) & (dx <= self.max_lag)) j1 = j1[ii] j2 = j2[ii] dx = dx[ii] cmat = np.eye(len(endog_expval)) cmat[j1, j2] = self.dep_params[dx - 1] cmat[j2, j1] = self.dep_params[dx - 1] return cmat, True
Example #11
Source File: Math.py From pyberny with Mozilla Public License 2.0 | 6 votes |
def pinv(A, log=lambda _: None): U, D, V = np.linalg.svd(A) thre = 1e3 thre_log = 1e8 gaps = D[:-1] / D[1:] try: n = np.flatnonzero(gaps > thre)[0] except IndexError: n = len(gaps) else: gap = gaps[n] if gap < thre_log: log('Pseudoinverse gap of only: {:.1e}'.format(gap)) D[n + 1 :] = 0 D[: n + 1] = 1 / D[: n + 1] return U.dot(np.diag(D)).dot(V)
Example #12
Source File: signatures.py From spotpy with MIT License | 6 votes |
def fill_nan(data): """ Returns the timeseries where any gaps (represented by NaN) are filled, using a linear approximation between the neighbors. Gaps at the beginning or end are filled with the first resp. last valid entry :param data: The timeseries data as a numeric sequence :return: The filled timeseries as array """ # All data indices x = np.arange(len(data)) # Valid data indices xp = np.flatnonzero(np.isfinite(data)) # Valid data fp = remove_nan(data) # Interpolate missing values return np.interp(x, xp, fp)
Example #13
Source File: coords.py From pyberny with Mozilla Public License 2.0 | 6 votes |
def get_clusters(C): nonassigned = list(range(len(C))) clusters = [] while nonassigned: queue = {nonassigned[0]} clusters.append([]) while queue: node = queue.pop() clusters[-1].append(node) nonassigned.remove(node) queue.update(n for n in np.flatnonzero(C[node]) if n in nonassigned) C = np.zeros_like(C) for cluster in clusters: for i in cluster: C[i, cluster] = True return clusters, C
Example #14
Source File: sas_xport.py From vnpy_crypto with MIT License | 5 votes |
def _record_count(self): """ Get number of records in file. This is maybe suboptimal because we have to seek to the end of the file. Side effect: returns file position to record_start. """ self.filepath_or_buffer.seek(0, 2) total_records_length = (self.filepath_or_buffer.tell() - self.record_start) if total_records_length % 80 != 0: warnings.warn("xport file may be corrupted") if self.record_length > 80: self.filepath_or_buffer.seek(self.record_start) return total_records_length // self.record_length self.filepath_or_buffer.seek(-80, 2) last_card = self.filepath_or_buffer.read(80) last_card = np.frombuffer(last_card, dtype=np.uint64) # 8 byte blank ix = np.flatnonzero(last_card == 2314885530818453536) if len(ix) == 0: tail_pad = 0 else: tail_pad = 8 * len(ix) self.filepath_or_buffer.seek(self.record_start) return (total_records_length - tail_pad) // self.record_length
Example #15
Source File: rock_sample.py From Chimp with Apache License 2.0 | 5 votes |
def categorical(self, d): return np.flatnonzero( self.random_state.multinomial(1,d,1) )[0] ################################################################# # Create functions #################################################################
Example #16
Source File: shack_hartmann.py From hcipy with MIT License | 5 votes |
def __init__(self, mla_grid, mla_index, estimation_subapertures=None): self.mla_grid = mla_grid self.mla_index = mla_index if estimation_subapertures is None: self.estimation_subapertures = np.unique(self.mla_index) else: self.estimation_subapertures = np.flatnonzero(np.array(estimation_subapertures)) self.estimation_grid = self.mla_grid.subset(estimation_subapertures)
Example #17
Source File: ngram.py From wordkit with GNU General Public License v3.0 | 5 votes |
def list_features(self, X): """Lists the features for each word.""" if isinstance(X, np.ndarray): for x in X: yield tuple([self.inv_features[idx] for idx in np.flatnonzero(x)]) else: X = self._unpack(X) for x in X: yield tuple(zip(*self._decompose(x)))[1]
Example #18
Source File: ngram.py From wordkit with GNU General Public License v3.0 | 5 votes |
def inverse_transform(self, X, threshold=.9): """ Convert a vector back into its constituent ngrams. WARNING: this currently does not work. """ inverted = [] inverted_features = {v: k for k, v in self.features.items()} if not self.use_padding: raise ValueError("This function is only supported when use_padding" " is set to True.") if np.ndim(X) == 1: X = X[None, :] for x in X: t = [] for idx in np.flatnonzero(x > threshold): t.append(inverted_features[idx]) cols = list(zip(*t)) s, e = list(zip(*cols[:-1])), list(zip(*cols[1:])) pad = tuple(["#"] * (self.n-1)) f = list(set(s) - (set(e) - {pad}))[0] word = [] while len(word) < len(t): idx = s.index(f) word.append(t[idx][0]) f = e[idx] else: word.extend(t[idx][1:]) inverted.append("".join(word).strip("#")) return inverted
Example #19
Source File: PR2.py From visual_dynamics with MIT License | 5 votes |
def __init__(self): # set up openrave self.env = rave.Environment() self.env.StopSimulation() self.env.Load("robots/pr2-beta-static.zae") # todo: use up-to-date urdf self.robot = self.env.GetRobots()[0] self.joint_listener = TopicListener("/joint_states", sm.JointState) # rave to ros conversions joint_msg = self.get_last_joint_message() ros_names = joint_msg.name inds_ros2rave = np.array([self.robot.GetJointIndex(name) for name in ros_names]) self.good_ros_inds = np.flatnonzero(inds_ros2rave != -1) # ros joints inds with matching rave joint self.rave_inds = inds_ros2rave[self.good_ros_inds] # openrave indices corresponding to those joints self.update_rave() self.larm = Arm(self, "l") self.rarm = Arm(self, "r") self.lgrip = Gripper(self, "l") self.rgrip = Gripper(self, "r") self.head = Head(self) self.torso = Torso(self) self.base = Base(self) rospy.on_shutdown(self.stop_all)
Example #20
Source File: lwlrap.py From dcase2019_task2 with MIT License | 5 votes |
def _one_sample_positive_class_precisions(scores, truth): """Calculate precisions for each true class for a single sample. Args: scores: np.array of (num_classes,) giving the individual classifier scores. truth: np.array of (num_classes,) bools indicating which classes are true. Returns: pos_class_indices: np.array of indices of the true classes for this sample. pos_class_precisions: np.array of precisions corresponding to each of those classes. """ num_classes = scores.shape[0] pos_class_indices = np.flatnonzero(truth > 0) # Only calculate precisions if there are some true classes. if not len(pos_class_indices): return pos_class_indices, np.zeros(0) # Retrieval list of classes for this sample. retrieved_classes = np.argsort(scores)[::-1] # class_rankings[top_scoring_class_index] == 0 etc. class_rankings = np.zeros(num_classes, dtype=np.int) class_rankings[retrieved_classes] = range(num_classes) # Which of these is a true label? retrieved_class_true = np.zeros(num_classes, dtype=np.bool) retrieved_class_true[class_rankings[pos_class_indices]] = True # Num hits for every truncated retrieval list. retrieved_cumulative_hits = np.cumsum(retrieved_class_true) # Precision of retrieval list truncated at each hit, in order of pos_labels. precision_at_hits = ( retrieved_cumulative_hits[class_rankings[pos_class_indices]] / (1 + class_rankings[pos_class_indices].astype(np.float))) return pos_class_indices, precision_at_hits
Example #21
Source File: rfsm.py From pysheds with GNU General Public License v3.0 | 5 votes |
def volume_to_level(self, node, waterlevel): if node.current_vol > 0: maxelev = node.parent.elev if node.elev: minelev = node.elev else: # TODO: This bound could be a lot better minelev = np.nanmin(self.dem) target_vol = node.current_vol elev = optimize.bisect(self.compute_vol, minelev, maxelev, args=(node, target_vol)) if node.name: mask = self.ws[node.level] == node.name else: leaves = [] self.enumerate_leaves(node, level=node.level, stack=leaves) mask = np.isin(self.ws[node.level], leaves) boundary = list(chain.from_iterable([self.b[node.level].setdefault(pair, []) for pair in combinations(leaves, 2)])) mask.flat[boundary] = True mask = np.flatnonzero(mask & (self.dem < elev)) waterlevel.flat[mask] = elev else: if node.l: self.volume_to_level(node.l, waterlevel) if node.r: self.volume_to_level(node.r, waterlevel)
Example #22
Source File: test_omp.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_orthogonal_mp_gram_readonly(): # Non-regression test for: # https://github.com/scikit-learn/scikit-learn/issues/5956 idx, = gamma[:, 0].nonzero() G_readonly = G.copy() G_readonly.setflags(write=False) Xy_readonly = Xy.copy() Xy_readonly.setflags(write=False) gamma_gram = orthogonal_mp_gram(G_readonly, Xy_readonly[:, 0], 5, copy_Gram=False, copy_Xy=False) assert_array_equal(idx, np.flatnonzero(gamma_gram)) assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
Example #23
Source File: utils.py From freesound-classification with Apache License 2.0 | 5 votes |
def lwlrap(truth, scores): """Calculate the overall lwlrap using sklearn.metrics.lrap.""" # sklearn doesn't correctly apply weighting to samples with no labels, so just skip them. sample_weight = np.sum(truth > 0, axis=1) nonzero_weight_sample_indices = np.flatnonzero(sample_weight > 0) overall_lwlrap = label_ranking_average_precision_score( truth[nonzero_weight_sample_indices, :] > 0, scores[nonzero_weight_sample_indices, :], sample_weight=sample_weight[nonzero_weight_sample_indices]) return overall_lwlrap
Example #24
Source File: arrayfns.py From Computable with MIT License | 5 votes |
def nz(x): x = asarray(x, dtype=np.ubyte) if x.ndim != 1: raise TypeError("intput must have 1 dimension.") indxs = np.flatnonzero(x != 0) return indxs[-1].item()+1
Example #25
Source File: utils_old.py From vnpy_crypto with MIT License | 5 votes |
def clean0(matrix): """ Erase columns of zeros: can save some time in pseudoinverse. """ colsum = np.add.reduce(matrix**2, 0) val = [matrix[:,i] for i in np.flatnonzero(colsum)] return np.array(np.transpose(val))
Example #26
Source File: mice.py From vnpy_crypto with MIT License | 5 votes |
def _split_indices(self, vec): null = pd.isnull(vec) ix_obs = np.flatnonzero(~null) ix_miss = np.flatnonzero(null) if len(ix_obs) == 0: raise ValueError("variable to be imputed has no observed values") return ix_obs, ix_miss
Example #27
Source File: test_bayes_mixed_glm.py From vnpy_crypto with MIT License | 5 votes |
def gen_crossed_logit_pandas(nc, cs, s1, s2): np.random.seed(3799) a = np.kron(np.arange(nc), np.ones(cs)) b = np.kron(np.ones(cs), np.arange(nc)) fe = np.ones(nc * cs) vc = np.zeros(nc * cs) for i in np.unique(a): ii = np.flatnonzero(a == i) vc[ii] += s1*np.random.normal() for i in np.unique(b): ii = np.flatnonzero(b == i) vc[ii] += s2*np.random.normal() lp = -0.5 * fe + vc pr = 1 / (1 + np.exp(-lp)) y = 1*(np.random.uniform(size=nc*cs) < pr) ident = np.zeros(2*nc, dtype=np.int) ident[nc:] = 1 df = pd.DataFrame({"fe": fe, "a": a, "b": b, "y": y}) return df
Example #28
Source File: graphTools.py From graph-neural-networks with GNU General Public License v3.0 | 5 votes |
def computeNonzeroRows(S, Nl = 'all'): """ computeNonzeroRows: Find the position of the nonzero elements of each row of a matrix Input: S (np.array): matrix Nl (int or 'all'): number of rows to compute the nonzero elements; if 'all', then Nl = S.shape[0]. Rows are counted from the top. Output: nonzeroElements (list): list of size Nl where each element is an array of the indices of the nonzero elements of the corresponding row. """ # Find the position of the nonzero elements of each row of the matrix S. # Nl = 'all' means for all rows, otherwise, it will be an int. if Nl == 'all': Nl = S.shape[0] assert Nl <= S.shape[0] # Save neighborhood variable neighborhood = [] # For each of the selected nodes for n in range(Nl): neighborhood += [np.flatnonzero(S[n,:])] return neighborhood
Example #29
Source File: tools.py From vnpy_crypto with MIT License | 5 votes |
def clean0(matrix): """ Erase columns of zeros: can save some time in pseudoinverse. """ colsum = np.add.reduce(matrix**2, 0) val = [matrix[:, i] for i in np.flatnonzero(colsum)] return np.array(np.transpose(val))
Example #30
Source File: contingency_tables.py From vnpy_crypto with MIT License | 5 votes |
def __init__(self, tables, shift_zeros=False): if isinstance(tables, np.ndarray): sp = tables.shape if (len(sp) != 3) or (sp[0] != 2) or (sp[1] != 2): raise ValueError("If an ndarray, argument must be 2x2xn") table = tables else: # Create a data cube table = np.dstack(tables).astype(np.float64) if shift_zeros: zx = (table == 0).sum(0).sum(0) ix = np.flatnonzero(zx > 0) if len(ix) > 0: table = table.copy() table[:, :, ix] += 0.5 self.table = table self._cache = resettable_cache() # Quantities to precompute. Table entries are [[a, b], [c, # d]], 'ad' is 'a * d', 'apb' is 'a + b', 'dma' is 'd - a', # etc. self._apb = table[0, 0, :] + table[0, 1, :] self._apc = table[0, 0, :] + table[1, 0, :] self._bpd = table[0, 1, :] + table[1, 1, :] self._cpd = table[1, 0, :] + table[1, 1, :] self._ad = table[0, 0, :] * table[1, 1, :] self._bc = table[0, 1, :] * table[1, 0, :] self._apd = table[0, 0, :] + table[1, 1, :] self._dma = table[1, 1, :] - table[0, 0, :] self._n = table.sum(0).sum(0)