Python autograd.numpy.sum() Examples
The following are 30
code examples of autograd.numpy.sum().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
autograd.numpy
, or try the search function
.
Example #1
Source File: likelihood.py From momi2 with GNU General Public License v3.0 | 6 votes |
def _mut_factor_het(sfs, demo, mut_rate, vector, p_missing): mut_rate = mut_rate * np.ones(sfs.n_loci) E_het = expected_heterozygosity( demo, restrict_to_pops=np.array( sfs.sampled_pops)[sfs.ascertainment_pop]) p_missing = p_missing * np.ones(len(sfs.ascertainment_pop)) p_missing = p_missing[sfs.ascertainment_pop] lambd = np.einsum("i,j->ij", mut_rate, E_het * (1.0 - p_missing)) counts = sfs.avg_pairwise_hets[:, sfs.ascertainment_pop] ret = -lambd + counts * np.log(lambd) - scipy.special.gammaln(counts + 1) ret = ret * sfs.sampled_n[sfs.ascertainment_pop] / float( np.sum(sfs.sampled_n[sfs.ascertainment_pop])) if not vector: ret = np.sum(ret) else: ret = np.sum(ret, axis=1) return ret
Example #2
Source File: configurations.py From momi2 with GNU General Public License v3.0 | 6 votes |
def _build_old_new_idxs(self, folded): idxs = self.full_configs._augmented_idxs(folded) denom_idx_key = 'denom_idx' denom_idx = idxs[denom_idx_key] idxs = {k: v[self.sub_idxs] for k, v in list(idxs.items()) if k != denom_idx_key} old_idxs = np.array( list(set(sum(map(list, idxs.values()), [denom_idx])))) old_2_new_idxs = {old_id: new_id for new_id, old_id in enumerate(old_idxs)} idxs = {k: np.array([old_2_new_idxs[old_id] for old_id in v], dtype=int) for k, v in list(idxs.items())} idxs[denom_idx_key] = old_2_new_idxs[denom_idx] return old_idxs, idxs
Example #3
Source File: g.py From pymoo with Apache License 2.0 | 6 votes |
def _evaluate(self, x, out, *args, **kwargs): l = [] for j in range(self.n_var): l.append((j + 1) * x[:, j] ** 2) sum_jx = anp.sum(anp.column_stack(l), axis=1) a = anp.sum(anp.cos(x) ** 4, axis=1) b = 2 * anp.prod(anp.cos(x) ** 2, axis=1) c = (anp.sqrt(sum_jx)).flatten() c = c + (c == 0) * 1e-20 f = -anp.absolute((a - b) / c) # Constraints g1 = -anp.prod(x, 1) + 0.75 g2 = anp.sum(x, axis=1) - 7.5 * self.n_var out["F"] = f out["G"] = anp.column_stack([g1, g2])
Example #4
Source File: confidence_region.py From momi2 with GNU General Public License v3.0 | 6 votes |
def _many_score_cov(params, data, demo_func, **kwargs): params = np.array(params) def f_vec(x): ret = _composite_log_likelihood( data, demo_func(*x), vector=True, **kwargs) # centralize return ret - np.mean(ret) # g_out = einsum('ij,ik', jacobian(f_vec)(params), jacobian(f_vec)(params)) # but computed in a roundabout way because jacobian implementation is slow def _g_out_antihess(x): l = f_vec(x) lc = make_constant(l) return np.sum(0.5 * (l**2 - l * lc - lc * l)) return autograd.hessian(_g_out_antihess)(params)
Example #5
Source File: size_history.py From momi2 with GNU General Public License v3.0 | 6 votes |
def sfs(self, n): if n == 0: return np.array([0.]) Et_jj = self.etjj(n) #assert np.all(Et_jj[:-1] - Et_jj[1:] >= 0.0) and np.all(Et_jj >= 0.0) and np.all(Et_jj <= self.tau) ret = np.sum(Et_jj[:, None] * Wmatrix(n), axis=0) before_tmrca = self.tau - np.sum(ret * np.arange(1, n) / n) # ignore branch length above untruncated TMRCA if self.tau == float('inf'): before_tmrca = 0.0 ret = np.concatenate((np.array([0.0]), ret, np.array([before_tmrca]))) return ret # def transition_prob(self, v, axis=0): # return moran_model.moran_action(self.scaled_time, v, axis=axis)
Example #6
Source File: zdt.py From pymoo with Apache License 2.0 | 6 votes |
def _evaluate(self, x, out, *args, **kwargs): _x = [x[:, :30]] for i in range(self.m - 1): _x.append(x[:, 30 + i * self.n: 30 + (i + 1) * self.n]) u = anp.column_stack([x_i.sum(axis=1) for x_i in _x]) v = (2 + u) * (u < self.n) + 1 * (u == self.n) g = v[:, 1:].sum(axis=1) f1 = 1 + u[:, 0] f2 = g * (1 / f1) if self.normalize: f1 = normalize(f1, 1, 31) f2 = normalize(f2, (self.m-1) * 1/31, (self.m-1)) out["F"] = anp.column_stack([f1, f2])
Example #7
Source File: demography.py From momi2 with GNU General Public License v3.0 | 6 votes |
def get_treeseq_configs(treeseq, sampled_n): mat = np.zeros((len(sampled_n), sum(sampled_n)), dtype=int) j = 0 for i, n in enumerate(sampled_n): for _ in range(n): mat[i, j] = 1 j += 1 mat = scipy.sparse.csr_matrix(mat) def get_config(genos): derived_counts = mat.dot(genos) return np.array([ sampled_n - derived_counts, derived_counts ]).T for v in treeseq.variants(): yield get_config(v.genotypes)
Example #8
Source File: observation.py From scarlet with MIT License | 6 votes |
def get_loss(self, model): """Computes the loss/fidelity of a given model wrt to the observation Parameters ---------- model: array The model from `Blend` Returns ------- result: array Scalar tensor with the likelihood of the model given the image data """ model_ = self.render(model) if self.frame != self.model_frame: images_ = self.images[self.slices_for_images] weights_ = self.weights[self.slices_for_images] else: images_ = self.images weights_ = self.weights return self.log_norm + np.sum(weights_ * (model_ - images_) ** 2) / 2
Example #9
Source File: observation.py From scarlet with MIT License | 6 votes |
def log_norm(self): try: return self._log_norm except AttributeError: if self.frame != self.model_frame: images_ = self.images[self.slices_for_images] weights_ = self.weights[self.slices_for_images] else: images_ = self.images weights_ = self.weights # normalization of the single-pixel likelihood: # 1 / [(2pi)^1/2 (sigma^2)^1/2] # with inverse variance weights: sigma^2 = 1/weight # full likelihood is sum over all data samples: pixel in images # NOTE: this assumes that all pixels are used in likelihood! log_sigma = np.zeros(weights_.shape, dtype=self.weights.dtype) cuts = weights_ > 0 log_sigma[cuts] = np.log(1 / weights_[cuts]) self._log_norm = ( np.prod(images_.shape) / 2 * np.log(2 * np.pi) + np.sum(log_sigma) / 2 ) return self._log_norm
Example #10
Source File: sfs.py From momi2 with GNU General Public License v3.0 | 6 votes |
def _entropy(self): counts = self._total_freqs n_snps = float(self.n_snps()) p = counts / n_snps # return np.sum(p * np.log(p)) ret = np.sum(p * np.log(p)) # correct for missing data sampled_n = np.sum(self.configs.value, axis=2) sampled_n_counts = co.Counter() assert len(counts) == len(sampled_n) for c, n in zip(counts, sampled_n): n = tuple(n) sampled_n_counts[n] += c sampled_n_counts = np.array( list(sampled_n_counts.values()), dtype=float) ret = ret + np.sum(sampled_n_counts / n_snps * np.log(n_snps / sampled_n_counts)) assert not np.isnan(ret) return ret
Example #11
Source File: einsum2.py From momi2 with GNU General Public License v3.0 | 6 votes |
def _sum_unique_axes(in_arr, in_sublist, *keep_subs): # assume no repeated subscripts assert len(in_sublist) == len(set(in_sublist)) out_sublist = [] sum_axes = [] keep_subs = set([s for ks in keep_subs for s in ks]) for idx, sub in enumerate(in_sublist): if sub in keep_subs: out_sublist.append(sub) else: sum_axes.append(idx) if sum_axes: return np.sum(in_arr, axis=tuple(sum_axes)), out_sublist else: return in_arr, out_sublist
Example #12
Source File: standard_models.py From pyhawkes with MIT License | 6 votes |
def G(self): full_W = np.array([node.w for node in self.nodes]) WB = full_W[:,1:].reshape((self.K,self.K, self.B)) # Weight matrix is summed over impulse response functions WT = WB.sum(axis=2) # Impulse response weights are normalized weights GT = WB / WT[:,:,None] # Then we transpose so that the impuolse matrix is (outgoing x incoming x basis) G = np.transpose(GT, [1,0,2]) # TODO: Decide if this is still necessary for k1 in range(self.K): for k2 in range(self.K): if G[k1,k2,:].sum() < 1e-2: G[k1,k2,:] = 1.0/self.B return G
Example #13
Source File: sfs.py From momi2 with GNU General Public License v3.0 | 6 votes |
def resample(self): """Create a new SFS by resampling blocks with replacement. Note the resampled SFS is assumed to have the same length in base pairs \ as the original SFS, which may be a poor assumption if the blocks are not of equal length. :returns: Resampled SFS :rtype: :class:`Sfs` """ loci = np.random.choice( np.arange(self.n_loci), size=self.n_loci, replace=True) mat = self.freqs_matrix[:, loci] to_keep = np.asarray(mat.sum(axis=1) > 0).squeeze() to_keep = np.arange(len(self.configs))[to_keep] mat = mat[to_keep, :] configs = _ConfigList_Subset(self.configs, to_keep) return self.from_matrix(mat, configs, self.folded, self.length)
Example #14
Source File: standard_models.py From pyhawkes with MIT License | 6 votes |
def objective(self, w): obj = 0 N = float(sum([np.sum(d[1]) for d in self.data_list])) for F,S in self.data_list: psi = np.dot(F, w) lam = self.link(psi) obj -= np.sum(S * np.log(lam) -lam*self.dt) / N # assert np.isfinite(ll) # Add penalties obj += (0.5 * np.sum(w[1:]**2) / self.sigma**2) / N obj += np.sum(np.abs(w[1:]) * self.lmbda) / N # assert np.isfinite(obj) return obj
Example #15
Source File: osy.py From pymoo with Apache License 2.0 | 6 votes |
def _evaluate(self, x, out, *args, **kwargs): f1 = - (25 * (x[:, 0] - 2) ** 2 + (x[:, 1] - 2) ** 2 + (x[:, 2] - 1) ** 2 + (x[:, 3] - 4) ** 2 + ( x[:, 4] - 1) ** 2) f2 = anp.sum(anp.square(x), axis=1) g1 = (x[:, 0] + x[:, 1] - 2.0) / 2.0 g2 = (6.0 - x[:, 0] - x[:, 1]) / 6.0 g3 = (2.0 - x[:, 1] + x[:, 0]) / 2.0 g4 = (2.0 - x[:, 0] + 3.0 * x[:, 1]) / 2.0 g5 = (4.0 - (x[:, 2] - 3.0) ** 2 - x[:, 3]) / 4.0 g6 = ((x[:, 4] - 3.0) ** 2 + x[:, 5] - 4.0) / 4.0 out["F"] = anp.column_stack([f1, f2]) out["G"] = anp.column_stack([g1, g2, g3, g4, g5, g6]) out["G"] = - out["G"]
Example #16
Source File: ctp.py From pymoo with Apache License 2.0 | 6 votes |
def __init__(self, n_var=2, n_constr=1, option="linear"): super().__init__(n_var=n_var, n_obj=2, n_constr=n_constr, xl=0, xu=1, type_var=anp.double) def g_linear(x): return 1 + anp.sum(x, axis=1) def g_multimodal(x): A = 10 return 1 + A * x.shape[1] + anp.sum(x ** 2 - A * anp.cos(2 * anp.pi * x), axis=1) if option == "linear": self.calc_g = g_linear elif option == "multimodal": self.calc_g = g_multimodal self.xl[:, 1:] = -5.12 self.xu[:, 1:] = 5.12 else: print("Unknown option for CTP single.")
Example #17
Source File: test_autograd.py From momi2 with GNU General Public License v3.0 | 5 votes |
def test_simple_2(): def f(x): return np.sum(1.0 / np.outer(x, x)) check_gradient(f, np.random.normal(size=10))
Example #18
Source File: test_msprime.py From momi2 with GNU General Public License v3.0 | 5 votes |
def check_sfs_counts(demo, sampled_pops, sampled_n, theta=2.5, rho=2.5, num_loci=1000, num_bases=1e5, folded=False): #seg_sites = demo.simulate_data( # sampled_pops, sampled_n, # mutation_rate=theta / num_bases, # recombination_rate=rho / num_bases, # length=num_bases, # num_replicates=num_loci, #) #sfs_list = seg_sites.sfs demo.set_mut_rate(muts_per_gen=theta / num_bases) data = demo.simulate_data( length=num_bases, recoms_per_gen=rho / num_bases, num_replicates=num_loci, sampled_n_dict=dict(zip(sampled_pops, sampled_n))) sfs_list = data.extract_sfs(None) if folded: # pass sfs_list = sfs_list.fold() #config_list = sorted(set(sum([sfs.keys() for sfs in sfs_list.loci],[]))) #sfs_vals, branch_len = expected_sfs(demo, sfs_list.configs, folded=folded), expected_total_branch_len( # demo, sampled_pops=sfs_list.sampled_pops, sampled_n=sfs_list.sampled_n) demo.set_data(sfs_list) sfs_vals = np.array(list(demo.expected_sfs().values())) theoretical = sfs_vals / num_loci # observed = np.zeros((len(sfs_list.configs), len(sfs_list.loci))) # for j in range(sfs_list.n_loci): # for i,config in enumerate(sfs_list.configs): # observed[i,j] = sfs_list.freq(config,locus=j) observed = np.array(sfs_list.freqs_matrix.todense()) labels = list(sfs_list.configs) p_val = my_t_test(labels, theoretical, observed) print("p-value of smallest p-value under beta(1,num_configs)\n", p_val) cutoff = 0.05 #cutoff = 1.0 assert p_val > cutoff
Example #19
Source File: likelihood.py From momi2 with GNU General Public License v3.0 | 5 votes |
def _get_stochastic_pieces(self, pieces, rgen): sfs_pieces = _subsfs_list(self.sfs, pieces, rgen) if self.mut_rate is None: mut_rate = None else: mut_rate = self.mut_rate * np.ones(self.sfs.n_loci) mut_rate = np.sum(mut_rate) / float(pieces) return [SfsLikelihoodSurface(sfs, demo_func=self.demo_func, mut_rate=None, folded=self.folded, error_matrices=self.error_matrices, truncate_probs=self.truncate_probs, batch_size=self.batch_size) for sfs in sfs_pieces]
Example #20
Source File: einsum2.py From momi2 with GNU General Public License v3.0 | 5 votes |
def _reshape(in_arr, in_sublist, *out_sublists): assert len(out_sublists) == 3 old_sublist = in_sublist in_sublist = sum(out_sublists, []) in_arr = _transpose(in_arr, old_sublist, in_sublist) # in_arr.shape breaks in autograd if it has no dimension if in_sublist: shapes = {s:i for i,s in zip(in_arr.shape, in_sublist)} else: shapes = {} return np.reshape(in_arr, [np.prod([shapes[s] for s in out_subs], dtype=int) for out_subs in out_sublists])
Example #21
Source File: test_autograd.py From momi2 with GNU General Public License v3.0 | 5 votes |
def test_simple(): def f(x): return np.sum(np.outer(x, x)) check_gradient(f, np.random.normal(size=10))
Example #22
Source File: sfs.py From momi2 with GNU General Public License v3.0 | 5 votes |
def _subset_populations(self, populations, non_ascertained_pops): # old indexes of the new population ordering old_sampled_pops = list(self.sampled_pops) old_pop_idx = np.array([ old_sampled_pops.index(p) for p in populations], dtype=int) # old ascertainment ascertained = dict(zip(self.sampled_pops, self.ascertainment_pop)) # restrict to new populations ascertained = {p: ascertained[p] for p in populations} # previously non-ascertained pops should remain so for pop, is_asc in ascertained.items(): if not is_asc: assert pop in non_ascertained_pops # update ascertainment for pop in non_ascertained_pops: ascertained[pop] = False # convert from dict to numpy array ascertained = np.array([ascertained[p] for p in populations]) # keep only polymorphic configs asc_only = self.configs[:, old_pop_idx[ascertained], :] asc_is_poly = (asc_only.sum(axis=1) != 0).all(axis=1) asc_is_poly = np.arange(len(asc_is_poly))[asc_is_poly] sub_sfs = self._subset_configs(asc_is_poly) # get the new configs new_configs = CompressedAlleleCounts.from_iter( sub_sfs.configs[:, old_pop_idx, :], len(populations), sort=False) return self.from_matrix( new_configs.index2uniq_mat @ sub_sfs.freqs_matrix, ConfigList(populations, new_configs.config_array, ascertainment_pop=ascertained), self.folded, self._length)
Example #23
Source File: sfs.py From momi2 with GNU General Public License v3.0 | 5 votes |
def n_snps(self, vector=False): """Number of SNPs, either per-locus or overall. :param bool vector: Whether to return a single total count, or an array of counts per-locus """ if vector: return raw_np.array(self.freqs_matrix.sum(axis=0)).reshape((-1,)) else: return np.sum(self._total_freqs)
Example #24
Source File: sfs.py From momi2 with GNU General Public License v3.0 | 5 votes |
def avg_pairwise_hets(self): # avg number of hets per ind per pop (assuming Hardy-Weinberg) n_nonmissing = np.sum(self.configs.value, axis=2) # for denominator, assume 1 allele is drawn from whole sample, and 1 # allele is drawn only from nomissing alleles denoms = np.maximum(n_nonmissing * (self.sampled_n - 1), 1.0) p_het = 2 * self.configs.value[:, :, 0] * \ self.configs.value[:, :, 1] / denoms return self.freqs_matrix.T.dot(p_het)
Example #25
Source File: demography.py From momi2 with GNU General Public License v3.0 | 5 votes |
def simulate_data(self, length, num_replicates=1, **kwargs): treeseq = self.simulate_trees(length=length, num_replicates=num_replicates, **kwargs) try: treeseq.variants except: pass else: treeseq = [treeseq] mat = np.zeros((len(self.sampled_n), sum(self.sampled_n)), dtype=int) j = 0 for i, n in enumerate(self.sampled_n): for _ in range(n): mat[i, j] = 1 j += 1 mat = scipy.sparse.csr_matrix(mat) def get_config(genos): derived_counts = mat.dot(genos) return np.array([ self.sampled_n - derived_counts, derived_counts ]).T #chrom = [] chrom = _CompressedList() pos = [] compressed_counts = _CompressedHashedCounts(len(self.sampled_pops)) for c, locus in enumerate(treeseq): for v in locus.variants(): compressed_counts.append(get_config(v.genotypes)) chrom.append(c) pos.append(v.position) return SnpAlleleCounts( chrom, pos, compressed_counts.compressed_allele_counts(), self.sampled_pops, use_folded_sfs=False, non_ascertained_pops=[], length=length*num_replicates, n_read_snps=len(compressed_counts), n_excluded_snps=0)
Example #26
Source File: demography.py From momi2 with GNU General Public License v3.0 | 5 votes |
def _n_at_node(self, node): return sum(self._G.node[(pop, idx)]['lineages'] for pop, idx in nx.dfs_preorder_nodes(self._G, node) if idx == 0)
Example #27
Source File: util.py From momi2 with GNU General Public License v3.0 | 5 votes |
def check_probs_matrix(x): x = truncate0(x) rowsums = np.sum(x, axis=1) assert np.allclose(rowsums, 1.0) return np.einsum('ij,i->ij', x, 1.0 / rowsums)
Example #28
Source File: standard_models.py From pyhawkes with MIT License | 5 votes |
def W(self): full_W = np.array([node.w for node in self.nodes]) WB = full_W[:,1:].reshape((self.K,self.K, self.B)) # Weight matrix is summed over impulse response functions WT = WB.sum(axis=2) # Then we transpose so that the weight matrix is (outgoing x incoming) W = WT.T return W
Example #29
Source File: standard_models.py From pyhawkes with MIT License | 5 votes |
def log_likelihood(self, index=None): if index is None: data_list = self.data_list else: data_list = [self.data_list[index]] ll = 0 for F,S in data_list: psi = F.dot(self.w) lam = self.link(psi) ll += (S * np.log(lam) -lam*self.dt).sum() return ll
Example #30
Source File: standard_models.py From pyhawkes with MIT License | 5 votes |
def initialize_to_background_rate(self): # self.w = abs(1e-6 * np.random.randn(*self.w.shape)) self.w = 1e-6 * np.ones_like(self.w) if len(self.data_list) > 0: N = 0 T = 0 for F,S in self.data_list: N += S.sum(axis=0) T += S.shape[0] * self.dt lambda0 = self.invlink(N / float(T)) self.w[0] = lambda0