Python numpy.cumsum() Examples
The following are 30
code examples of numpy.cumsum().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: resampling.py From pyfilter with MIT License | 6 votes |
def filterpy_systematic_resample(weights, u): """ ___NOTE___: This is the systematic resampling function from: https://github.com/rlabbe/filterpy/blob/master/filterpy/monte_carlo/resampling.py, i.e. __NOT MINE__, modified to take as input the offsetting random variable. """ N = len(weights) # make N subdivisions, and choose positions with a consistent random offset positions = (u + np.arange(N)) / N indexes = np.zeros(N, 'i') cumulative_sum = np.cumsum(weights) i, j = 0, 0 while i < N: if positions[i] < cumulative_sum[j]: indexes[i] = j i += 1 else: j += 1 return indexes
Example #2
Source File: cgp.py From cgp-cnn with MIT License | 6 votes |
def active_net_list(self): net_list = [["input", 0, 0]] active_cnt = np.arange(self.net_info.input_num + self.net_info.node_num + self.net_info.out_num) active_cnt[self.net_info.input_num:] = np.cumsum(self.is_active) for n, is_a in enumerate(self.is_active): if is_a: t = self.gene[n][0] if n < self.net_info.node_num: # intermediate node type_str = self.net_info.func_type[t] else: # output node type_str = self.net_info.out_type[t] connections = [active_cnt[self.gene[n][i+1]] for i in range(self.net_info.max_in_num)] net_list.append([type_str] + connections) return net_list # CGP with (1 + \lambda)-ES
Example #3
Source File: Collection.py From fullrmc with GNU Affero General Public License v3.0 | 6 votes |
def step_function(x, center=0, FWHM=0.1, height=1, check=True): """ Compute a step function as the cumulative summation of a gaussian distribution of a given vector. :Parameters: #. x (numpy.ndarray): The vector to compute the gaussian. gaussian is computed as a function of x. #. center (number): The center of the step function which is the the center of the gaussian. #. FWHM (number): The Full Width at Half Maximum of the gaussian. #. height (number): The height of the step function. #. check (boolean): whether to check arguments before generating vectors. """ if check: assert is_number(height), LOGGER.error("height must be a number") height = FLOAT_TYPE(height) g = gaussian(x, center=center, FWHM=FWHM, normalize=False, check=check) sf = np.cumsum(g) sf /= sf[-1] return (sf*height).astype(FLOAT_TYPE)
Example #4
Source File: pfilter.py From pfilter with MIT License | 6 votes |
def create_indices(positions, weights): n = len(weights) indices = np.zeros(n, np.uint32) cumsum = np.cumsum(weights) i, j = 0, 0 while i < n: if positions[i] < cumsum[j]: indices[i] = j i += 1 else: j += 1 return indices ### end rlabbe's resampling functions
Example #5
Source File: utils.py From DOTA_models with Apache License 2.0 | 6 votes |
def calc_pr(gt, out, wt=None): if wt is None: wt = np.ones((gt.size,1)) gt = gt.astype(np.float64).reshape((-1,1)) wt = wt.astype(np.float64).reshape((-1,1)) out = out.astype(np.float64).reshape((-1,1)) gt = gt*wt tog = np.concatenate([gt, wt, out], axis=1)*1. ind = np.argsort(tog[:,2], axis=0)[::-1] tog = tog[ind,:] cumsumsortgt = np.cumsum(tog[:,0]) cumsumsortwt = np.cumsum(tog[:,1]) prec = cumsumsortgt / cumsumsortwt rec = cumsumsortgt / np.sum(tog[:,0]) ap = voc_ap(rec, prec) return ap, rec, prec
Example #6
Source File: demo_sampler_wrapper.py From robosuite with MIT License | 6 votes |
def sample(self): """ This is the core sampling method. Samples a state from a demonstration, in accordance with the configuration. """ # chooses a sampling scheme randomly based on the mixing ratios seed = random.uniform(0, 1) ratio = np.cumsum(self.scheme_ratios) ratio = ratio > seed for i, v in enumerate(ratio): if v: break sample_method = getattr(self, self.sample_method_dict[self.sampling_schemes[i]]) return sample_method()
Example #7
Source File: algorithmic.py From fine-lm with MIT License | 6 votes |
def zipf_distribution(nbr_symbols, alpha): """Helper function: Create a Zipf distribution. Args: nbr_symbols: number of symbols to use in the distribution. alpha: float, Zipf's Law Distribution parameter. Default = 1.5. Usually for modelling natural text distribution is in the range [1.1-1.6]. Returns: distr_map: list of float, Zipf's distribution over nbr_symbols. """ tmp = np.power(np.arange(1, nbr_symbols + 1), -alpha) zeta = np.r_[0.0, np.cumsum(tmp)] return [x / zeta[-1] for x in zeta]
Example #8
Source File: tree.py From PHATE with GNU General Public License v2.0 | 6 votes |
def gen_dla( n_dim=100, n_branch=20, branch_length=100, rand_multiplier=2, seed=37, sigma=4 ): np.random.seed(seed) M = np.cumsum(-1 + rand_multiplier * np.random.rand(branch_length, n_dim), 0) for i in range(n_branch - 1): ind = np.random.randint(branch_length) new_branch = np.cumsum( -1 + rand_multiplier * np.random.rand(branch_length, n_dim), 0 ) M = np.concatenate([M, new_branch + M[ind, :]]) noise = np.random.normal(0, sigma, M.shape) M = M + noise # returns the group labels for each point to make it easier to visualize # embeddings C = np.array([i // branch_length for i in range(n_branch * branch_length)]) return M, C
Example #9
Source File: pfilter.py From pfilter with MIT License | 6 votes |
def residual_resample(weights): n = len(weights) indices = np.zeros(n, np.uint32) # take int(N*w) copies of each weight num_copies = (n * weights).astype(np.uint32) k = 0 for i in range(n): for _ in range(num_copies[i]): # make n copies indices[k] = i k += 1 # use multinormial resample on the residual to fill up the rest. residual = weights - num_copies # get fractional part residual /= np.sum(residual) cumsum = np.cumsum(residual) cumsum[-1] = 1 indices[k:n] = np.searchsorted(cumsum, np.random.uniform(0, 1, n - k)) return indices
Example #10
Source File: carbonara.py From gnocchi with Apache License 2.0 | 6 votes |
def unserialize(cls, data, block_size, back_window): uncompressed = lz4.block.decompress(data) nb_points = ( len(uncompressed) // cls._SERIALIZATION_TIMESTAMP_VALUE_LEN ) try: timestamps = numpy.frombuffer(uncompressed, dtype='<Q', count=nb_points) values = numpy.frombuffer( uncompressed, dtype='<d', offset=nb_points * cls._SERIALIZATION_TIMESTAMP_LEN) except ValueError: raise InvalidData return cls.from_data( numpy.cumsum(timestamps), values, block_size=block_size, back_window=back_window)
Example #11
Source File: kccsd_uhf.py From pyscf with Apache License 2.0 | 6 votes |
def vector_to_amplitudes(vec, nmo, nocc, nkpts=1): nocca, noccb = nocc nmoa, nmob = nmo nvira, nvirb = nmoa - nocca, nmob - noccb sizes = (nkpts*nocca*nvira, nkpts*noccb*nvirb, nkpts**3*nocca**2*nvira**2, nkpts**3*nocca*noccb*nvira*nvirb, nkpts**3*noccb**2*nvirb**2) sections = np.cumsum(sizes[:-1]) t1a, t1b, t2aa, t2ab, t2bb = np.split(vec, sections) t1a = t1a.reshape(nkpts,nocca,nvira) t1b = t1b.reshape(nkpts,noccb,nvirb) t2aa = t2aa.reshape(nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira) t2ab = t2ab.reshape(nkpts,nkpts,nkpts,nocca,noccb,nvira,nvirb) t2bb = t2bb.reshape(nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb) return (t1a,t1b), (t2aa,t2ab,t2bb)
Example #12
Source File: mpi.py From pyscf with Apache License 2.0 | 6 votes |
def work_balanced_partition(tasks, costs=None): if costs is None: costs = numpy.ones(tasks) if rank == 0: segsize = float(sum(costs)) / pool.size loads = [] cum_costs = numpy.cumsum(costs) start_id = 0 for k in range(pool.size): stop_id = numpy.argmin(abs(cum_costs - (k+1)*segsize)) + 1 stop_id = max(stop_id, start_id+1) loads.append([start_id,stop_id]) start_id = stop_id comm.bcast(loads) else: loads = comm.bcast() if rank < len(loads): start, stop = loads[rank] return tasks[start:stop] else: return tasks[:0]
Example #13
Source File: uccsd.py From pyscf with Apache License 2.0 | 6 votes |
def vector_to_amplitudes(vector, nmo, nocc): nocca, noccb = nocc nmoa, nmob = nmo nvira, nvirb = nmoa-nocca, nmob-noccb nocc = nocca + noccb nvir = nvira + nvirb nov = nocc * nvir size = nov + nocc*(nocc-1)//2*nvir*(nvir-1)//2 if vector.size == size: #return ccsd.vector_to_amplitudes_s4(vector, nmo, nocc) raise RuntimeError('Input vector is GCCSD vecotr') else: sizea = nocca * nvira + nocca*(nocca-1)//2*nvira*(nvira-1)//2 sizeb = noccb * nvirb + noccb*(noccb-1)//2*nvirb*(nvirb-1)//2 sections = np.cumsum([sizea, sizeb]) veca, vecb, t2ab = np.split(vector, sections) t1a, t2aa = ccsd.vector_to_amplitudes_s4(veca, nmoa, nocca) t1b, t2bb = ccsd.vector_to_amplitudes_s4(vecb, nmob, noccb) t2ab = t2ab.copy().reshape(nocca,noccb,nvira,nvirb) return (t1a,t1b), (t2aa,t2ab,t2bb)
Example #14
Source File: ucisd.py From pyscf with Apache License 2.0 | 6 votes |
def cisdvec_to_amplitudes(civec, nmo, nocc): norba, norbb = nmo nocca, noccb = nocc nvira = norba - nocca nvirb = norbb - noccb nooa = nocca * (nocca-1) // 2 nvva = nvira * (nvira-1) // 2 noob = noccb * (noccb-1) // 2 nvvb = nvirb * (nvirb-1) // 2 size = (1, nocca*nvira, noccb*nvirb, nocca*noccb*nvira*nvirb, nooa*nvva, noob*nvvb) loc = numpy.cumsum(size) c0 = civec[0] c1a = civec[loc[0]:loc[1]].reshape(nocca,nvira) c1b = civec[loc[1]:loc[2]].reshape(noccb,nvirb) c2ab = civec[loc[2]:loc[3]].reshape(nocca,noccb,nvira,nvirb) c2aa = _unpack_4fold(civec[loc[3]:loc[4]], nocca, nvira) c2bb = _unpack_4fold(civec[loc[4]:loc[5]], noccb, nvirb) return c0, (c1a,c1b), (c2aa,c2ab,c2bb)
Example #15
Source File: ecg_findpeaks.py From NeuroKit with MIT License | 6 votes |
def _ecg_findpeaks_MWA(signal, window_size): """From https://github.com/berndporr/py-ecg-detectors/""" mwa = np.zeros(len(signal)) sums = np.cumsum(signal) def get_mean(begin, end): if begin == 0: return sums[end - 1] / end dif = sums[end - 1] - sums[begin - 1] return dif / (end - begin) for i in range(len(signal)): # pylint: disable=C0200 if i < window_size: section = signal[0:i] else: section = get_mean(i - window_size, i) if i != 0: mwa[i] = np.mean(section) else: mwa[i] = signal[i] return mwa
Example #16
Source File: signal_changepoints.py From NeuroKit with MIT License | 6 votes |
def _signal_changepoints_cost_mean(signal): """Cost function for a normally distributed signal with a changing mean.""" i_variance_2 = 1 / (np.var(signal) ** 2) cmm = [0.0] cmm.extend(np.cumsum(signal)) cmm2 = [0.0] cmm2.extend(np.cumsum(np.abs(signal))) def cost(start, end): cmm2_diff = cmm2[end] - cmm2[start] cmm_diff = pow(cmm[end] - cmm[start], 2) i_diff = end - start diff = cmm2_diff - cmm_diff return (diff / i_diff) * i_variance_2 return cost
Example #17
Source File: sensitivity_analysis.py From DETAD with MIT License | 6 votes |
def compute_mAP_N(result,this_cls_pred,this_cls_gt): ap = np.zeros(len(result.tiou_thresholds)) tp = np.zeros((len(result.tiou_thresholds), len(this_cls_pred))) fp = np.zeros((len(result.tiou_thresholds), len(this_cls_pred))) for tidx, tiou in enumerate(result.tiou_thresholds): fp[tidx,pd.isnull(this_cls_pred[result.matched_gt_id_cols[tidx]]).values] = 1 tp[tidx,~(pd.isnull(this_cls_pred[result.matched_gt_id_cols[tidx]]).values)] = 1 tp_cumsum = np.cumsum(tp, axis=1).astype(np.float) fp_cumsum = np.cumsum(fp, axis=1).astype(np.float) recall_cumsum = tp_cumsum / len(np.unique(this_cls_gt['gt-id'])) precision_cumsum = recall_cumsum * result.average_num_instance_per_class / (recall_cumsum * result.average_num_instance_per_class + fp_cumsum) for tidx in range(len(result.tiou_thresholds)): ap[tidx] = interpolated_prec_rec(precision_cumsum[tidx,:], recall_cumsum[tidx,:]) return ap.mean() # Initialize true positive and false positive vectors.
Example #18
Source File: 4_numpy_100.py From deep-learning-note with MIT License | 5 votes |
def moving_averate(a, n=3): ret = np.cumsum(a, dtype=float) ret[n:] = ret[n:] - ret[:-n] return ret[n-1:] / n
Example #19
Source File: eom_kccsd_uhf.py From pyscf with Apache License 2.0 | 5 votes |
def vector_to_amplitudes_ip(vector, kshift, nkpts, nmo, nocc, kconserv): nocca, noccb = nocc nmoa, nmob = nmo nvira, nvirb = nmoa-nocca, nmob-noccb sizes = (nocca, noccb, (nkpts*nocca)*(nkpts*nocca-1)*nvira//2, nkpts**2*noccb*nocca*nvira, nkpts**2*nocca*noccb*nvirb, nkpts*noccb*(nkpts*noccb-1)*nvirb//2) sections = np.cumsum(sizes[:-1]) r1a, r1b, r2a, r2baa, r2abb, r2b = np.split(vector, sections) r2a = r2a.reshape(nkpts*nocca*(nkpts*nocca-1)//2,nvira) r2b = r2b.reshape(nkpts*noccb*(nkpts*noccb-1)//2,nvirb) idxa, idya = np.tril_indices(nkpts*nocca, -1) idxb, idyb = np.tril_indices(nkpts*noccb, -1) r2aaa = np.zeros((nkpts*nocca,nkpts*nocca,nvira), dtype=r2a.dtype) r2aaa[idxa,idya] = r2a.copy() r2aaa[idya,idxa] = -r2a.copy() # Fill in value : kj, j < ki, i r2aaa = r2aaa.reshape(nkpts,nocca,nkpts,nocca,nvira) r2aaa = r2aaa.transpose(0,2,1,3,4) r2baa = r2baa.reshape(nkpts,nkpts,noccb,nocca,nvira).copy() r2abb = r2abb.reshape(nkpts,nkpts,nocca,noccb,nvirb).copy() r2bbb = np.zeros((nkpts*noccb,nkpts*noccb,nvirb), dtype=r2b.dtype) r2bbb[idxb,idyb] = r2b.copy() r2bbb[idyb,idxb] = -r2b.copy() # Fill in value : kj, j < ki, i r2bbb = r2bbb.reshape(nkpts,noccb,nkpts,noccb,nvirb) r2bbb = r2bbb.transpose(0,2,1,3,4) r1 = (r1a.copy(), r1b.copy()) r2 = (r2aaa, r2baa, r2abb, r2bbb) return r1, r2
Example #20
Source File: Collection.py From fullrmc with GNU Affero General Public License v3.0 | 5 votes |
def set_weights(self, weights=None): """ Set generator's weights. :Parameters: #. weights (None, list, numpy.ndarray): The weights scheme. The length defines the number of bins and the edges. The length of weights array defines the resolution of the biased numbers generation. If None is given, ones array of length 10000 is automatically generated. """ # set original weights if weights is None: self.__bins = 10000 self.__originalWeights = np.ones(self.__bins) else: assert isinstance(weights, (list, set, tuple, np.ndarray)), LOGGER.error("weights must be a list of numbers") if isinstance(weights, np.ndarray): assert len(weights.shape)==1, LOGGER.error("weights must be uni-dimensional") wgts = [] assert len(weights)>=100, LOGGER.error("weights minimum length allowed is 100") for w in weights: assert is_number(w), LOGGER.error("weights items must be numbers") w = FLOAT_TYPE(w) assert w>=0, LOGGER.error("weights items must be positive") wgts.append(w) self.__originalWeights = np.array(wgts, dtype=FLOAT_TYPE) self.__bins = len(self.__originalWeights) # set bin width self.__binWidth = FLOAT_TYPE(self.rang/self.__bins) self.__halfBinWidth = FLOAT_TYPE(self.__binWidth/2.) # set scheme self.__scheme = np.cumsum( self.__originalWeights )
Example #21
Source File: signal_fixpeaks.py From NeuroKit with MIT License | 5 votes |
def _period_to_location(period, sampling_rate=1000, first_location=0): location = np.cumsum(period * sampling_rate) location = location - (location[0] - first_location) return location.astype(np.int)
Example #22
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 5 votes |
def t(distances,coordinates): n = coordinates.shape[1] if distances is None: distances = np.ones(n - 1) t = np.cumsum(np.pad(distances, (1,0), 'constant')) t.setflags(write=False) return t
Example #23
Source File: metric.py From subword-qac with MIT License | 5 votes |
def mrr_summary(ranks, pranks, seens, n_candidates): ranks = np.array(ranks) pranks = np.array(pranks) n = np.zeros(3, dtype=int) rank = np.zeros((3, n_candidates + 1), dtype=int) prank = np.zeros((3, n_candidates + 1), dtype=int) reciprocal = np.array([0.] + [1. / r for r in range(1, n_candidates + 1)]).reshape(1, -1) for s, r, pr in zip(seens, ranks, pranks): for i in [1 - s, 2]: n[i] += 1 rank[i, r] += 1 prank[i, pr] += 1 mrr = np.cumsum(rank * reciprocal, 1) / n.reshape((3, 1)) pmrr = np.cumsum(prank * reciprocal, 1) / n.reshape((3, 1)) logs = [] for i in range(1, n_candidates + 1): i_str = ' '.join(f"{mrr[s, i]:.4f} ({seen_str})" for s, seen_str in enumerate(['seen', 'unseen', 'all'])) logs.append(f"mrr @{i:-2d}: {i_str}") logs.append(" ") for i in range(1, n_candidates + 1): i_str = ' '.join(f"{pmrr[s, i]:.4f} ({seen_str})" for s, seen_str in enumerate(['seen', 'unseen', 'all'])) logs.append(f"pmrr @{i:-2d}: {i_str}") logs.append(" ") return logs
Example #24
Source File: signal_changepoints.py From NeuroKit with MIT License | 5 votes |
def _signal_changepoints_cost_var(signal): """Cost function for a normally distributed signal with a changing variance.""" cumm = [0.0] cumm.extend(np.cumsum(np.power(np.abs(signal - np.mean(signal)), 2))) def cost(s, t): dist = float(t - s) diff = cumm[t] - cumm[s] return dist * np.log(diff / dist) return cost
Example #25
Source File: carbonara.py From gnocchi with Apache License 2.0 | 5 votes |
def quantile(self, q): ordered = numpy.lexsort((self._ts['values'], self.indexes)) min_pos = numpy.cumsum(self.counts) - self.counts real_pos = min_pos + (self.counts - 1) * (q / 100) floor_pos = numpy.floor(real_pos).astype(numpy.int, copy=False) ceil_pos = numpy.ceil(real_pos).astype(numpy.int, copy=False) values = ( self._ts['values'][ordered][floor_pos] * (ceil_pos - real_pos) + self._ts['values'][ordered][ceil_pos] * (real_pos - floor_pos)) # NOTE(gordc): above code doesn't compute proper value if pct lands on # exact index, it sets it to 0. we need to set it properly here exact_pos = numpy.equal(floor_pos, ceil_pos) values[exact_pos] = self._ts['values'][ordered][floor_pos][exact_pos] return make_timeseries(self.tstamps, values)
Example #26
Source File: carbonara.py From gnocchi with Apache License 2.0 | 5 votes |
def first(self): cumcounts = numpy.cumsum(self.counts) - self.counts values = self._ts['values'][cumcounts] return make_timeseries(self.tstamps, values)
Example #27
Source File: carbonara.py From gnocchi with Apache License 2.0 | 5 votes |
def last(self): cumcounts = numpy.cumsum(self.counts) - 1 values = self._ts['values'][cumcounts] return make_timeseries(self.tstamps, values)
Example #28
Source File: carbonara.py From gnocchi with Apache License 2.0 | 5 votes |
def median(self): ordered = numpy.lexsort((self._ts['values'], self.indexes)) # TODO(gordc): can use np.divmod when centos supports numpy 1.13 mid_diff = numpy.floor_divide(self.counts, 2) odd = numpy.mod(self.counts, 2) mid_floor = (numpy.cumsum(self.counts) - 1) - mid_diff mid_ceil = mid_floor + (odd + 1) % 2 return make_timeseries( self.tstamps, (self._ts['values'][ordered][mid_floor] + self._ts['values'][ordered][mid_ceil]) / 2.0)
Example #29
Source File: tcpr.py From libTLDA with MIT License | 5 votes |
def project_simplex(self, v, z=1.0): """ Project vector onto simplex using sorting. Reference: "Efficient Projections onto the L1-Ball for Learning in High Dimensions (Duchi, Shalev-Shwartz, Singer, Chandra, 2006)." Parameters ---------- v : array vector to be projected (n dimensions by 0) z : float constant (def: 1.0) Returns ------- w : array projected vector (n dimensions by 0) """ # Number of dimensions n = v.shape[0] # Sort vector mu = np.sort(v, axis=0)[::-1] # Find rho C = np.cumsum(mu) - z j = np.arange(n) + 1 rho = j[mu - C/j > 0][-1] # Define theta theta = C[mu - C/j > 0][-1] / float(rho) # Subtract theta from original vector and cap at 0 w = np.maximum(v - theta, 0) # Return projected vector return w
Example #30
Source File: olmar.py From fin with MIT License | 5 votes |
def simplex_projection(v, b=1): v = np.array(v) p = np.size(v) v = (v > 0)*v u = np.sort(v)[::-1] sv = np.cumsum(u) rho = np.where(u > (sv-b) / np.arange(1,p+1))[0][-1] theta = np.max([0, (sv[rho]-b)/(rho+1)]) w = v - theta w[w<0] = 0 return w