Python numpy.bincount() Examples
The following are 30
code examples of numpy.bincount().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: cityscape.py From Deep-Feature-Flow-Segmentation with MIT License | 6 votes |
def get_confusion_matrix(self, gt_label, pred_label, class_num): """ Calcute the confusion matrix by given label and pred :param gt_label: the ground truth label :param pred_label: the pred label :param class_num: the nunber of class :return: the confusion matrix """ index = (gt_label * class_num + pred_label).astype('int32') label_count = np.bincount(index) confusion_matrix = np.zeros((class_num, class_num)) for i_label in range(class_num): for i_pred_label in range(class_num): cur_index = i_label * class_num + i_pred_label if cur_index < len(label_count): confusion_matrix[i_label, i_pred_label] = label_count[cur_index] return confusion_matrix
Example #2
Source File: test_crp.py From cgpm with Apache License 2.0 | 6 votes |
def test_crp_decrement(N, alpha, seed): A = gu.simulate_crp(N, alpha, rng=gu.gen_rng(seed)) Nk = list(np.bincount(A)) # Decrement all counts by 1. Nk = [n-1 if n > 1 else n for n in Nk] # Decrement rowids. crp = simulate_crp_gpm(N, alpha, rng=gu.gen_rng(seed)) targets = [c for c in crp.counts if crp.counts[c] > 1] seen = set([]) for r, c in crp.data.items(): if c in targets and c not in seen: seen.add(c) crp.unincorporate(r) if seen == len(targets): break assert_crp_equality(alpha, Nk, crp)
Example #3
Source File: distributed_random_forest.py From discomll with Apache License 2.0 | 6 votes |
def reduce_fit(interface, state, label, inp): import numpy as np out = interface.output(0) out.add("X_names", state["X_names"]) forest = [] group_fillins = [] for i, (k, value) in enumerate(inp): if k == "tree": forest.append(value) elif len(value) > 0: group_fillins.append(value) out.add("forest", forest) fill_in_values = [] if len(group_fillins) > 0: for i, type in enumerate(state["X_meta"]): if type == "c": fill_in_values.append(np.average([sample[i] for sample in group_fillins])) else: fill_in_values.append(np.bincount([sample[i] for sample in group_fillins]).argmax()) out.add("fill_in_values", fill_in_values)
Example #4
Source File: plots.py From cgpm with Apache License 2.0 | 6 votes |
def plot_dist_discrete(X, output, clusters, ax=None, Y=None, hist=True): # Create a new axis? if ax is None: _, ax = plt.subplots() # Set up x axis. X = np.asarray(X, dtype=int) x_max = max(X) Y = range(int(x_max)+1) X_hist = np.bincount(X) / float(len(X)) ax.bar(Y, X_hist, color='gray', edgecolor='none') # Compute weighted pdfs pdf = np.zeros((len(clusters), len(Y))) W = [log(clusters[k].N) - log(float(len(X))) for k in clusters] for i, k in enumerate(clusters): pdf[i,:] = np.exp( [W[i] + clusters[k].logpdf(None, {output:y}) for y in Y]) color, alpha = gu.curve_color(i) ax.bar(Y, pdf[i,:], color=color, edgecolor='none', alpha=alpha) # Plot the sum of pdfs. ax.bar( Y, np.sum(pdf, axis=0), color='none', edgecolor='black', linewidth=3) ax.set_xlim([0, x_max+1]) # Title. ax.set_title(clusters.values()[0].name()) return ax
Example #5
Source File: test_crp.py From cgpm with Apache License 2.0 | 6 votes |
def test_crp_increment(N, alpha, seed): A = gu.simulate_crp(N, alpha, rng=gu.gen_rng(seed)) Nk = list(np.bincount(A)) # Add 3 new classes. Nk.extend([2, 3, 1]) crp = simulate_crp_gpm(N, alpha, rng=gu.gen_rng(seed)) # Increment rowids. rowid = max(crp.data) clust = max(crp.data.values()) crp.incorporate(rowid+1, {0:clust+1}, None) crp.incorporate(rowid+2, {0:clust+1}, None) crp.incorporate(rowid+3, {0:clust+2}, None) crp.incorporate(rowid+4, {0:clust+2}, None) crp.incorporate(rowid+5, {0:clust+2}, None) crp.incorporate(rowid+6, {0:clust+3}, None) assert_crp_equality(alpha, Nk, crp)
Example #6
Source File: forest_distributed_decision_trees.py From discomll with Apache License 2.0 | 6 votes |
def reduce_fit(interface, state, label, inp): import numpy as np out = interface.output(0) out.add("X_names", state["X_names"]) forest = [] group_fillins = [] for i, (k, value) in enumerate(inp): if k == "tree": forest.append(value) elif len(value) > 0: group_fillins.append(value) out.add("forest", forest) fill_in_values = [] if len(group_fillins) > 0: for i, type in enumerate(state["X_meta"]): if type == "c": fill_in_values.append(np.average([sample[i] for sample in group_fillins])) else: fill_in_values.append(np.bincount([sample[i] for sample in group_fillins]).argmax()) out.add("fill_in_values", fill_in_values)
Example #7
Source File: test_normal_categorical.py From cgpm with Apache License 2.0 | 6 votes |
def test_conditional_real(state): # Simulate from the conditional Z|X fig, axes = plt.subplots(2,3) fig.suptitle('Conditional Simulation Of Indicator Z Given Data X') # Compute representative data sample for each indicator. means = [np.mean(DATA[DATA[:,1]==t], axis=0)[0] for t in INDICATORS] for mean, indicator, ax in zip(means, INDICATORS, axes.ravel('F')): samples_subpop = [s[1] for s in state.simulate(-1, [1], {0:mean}, None, N_SAMPLES)] ax.hist(samples_subpop, color='g', alpha=.4) ax.set_title('True Indicator %d' % indicator) ax.set_xlabel('Simulated Indicator') ax.set_xticks(INDICATORS) ax.set_ylabel('Frequency') ax.set_ylim([0, ax.get_ylim()[1]+10]) ax.grid() # Check that the simulated indicator agrees with true indicator. true_ind_a = indicator true_ind_b = indicator-1 if indicator % 2 else indicator+1 counts = np.bincount(samples_subpop) frac = sum(counts[[true_ind_a, true_ind_b]])/float(sum(counts)) assert .8 < frac
Example #8
Source File: test_mvkde.py From cgpm with Apache License 2.0 | 6 votes |
def test_univariate_categorical(): # This test generates univariate data from a nominal variable with 6 levels # and probability vector p_theory, and performs a chi-square test on # posterior samples from MvKde. rng = gu.gen_rng(2) N_SAMPLES = 1000 p_theory = [.3, .1, .2, .15, .15, .1] samples_test = rng.choice(range(6), p=p_theory, size=N_SAMPLES) kde = MultivariateKde( [7], None, distargs={O: {ST: [C], SA:[{'k': 6}]}}, rng=rng) # Incorporate observations. for rowid, x in enumerate(samples_test): kde.incorporate(rowid, {7: x}) kde.transition() # Posterior samples. samples_gen = kde.simulate(-1, [7], N=N_SAMPLES) f_obs = np.bincount([s[7] for s in samples_gen]) f_exp = np.bincount(samples_test) _, pval = chisquare(f_obs, f_exp) assert 0.05 < pval # Get some coverage on logpdf_score. assert kde.logpdf_score() < 0
Example #9
Source File: engine.py From cgpm with Apache License 2.0 | 6 votes |
def _likelihood_weighted_resample(self, samples, rowid, constraints=None, inputs=None, statenos=None, multiprocess=1): assert len(samples) == \ len(self.states) if statenos is None else len(statenos) assert all(len(s) == len(samples[0]) for s in samples[1:]) N = len(samples[0]) weights = np.zeros(len(samples)) if not constraints else \ self.logpdf(rowid, constraints, inputs, statenos=statenos, multiprocess=multiprocess) n_model = np.bincount(gu.log_pflip(weights, size=N, rng=self.rng)) indexes = [self.rng.choice(N, size=n, replace=False) for n in n_model] resamples = [ [s[i] for i in index] for s, index in zip(samples, indexes) if len(index) > 0 ] return list(itertools.chain.from_iterable(resamples)) # -------------------------------------------------------------------------- # Serialize
Example #10
Source File: test_function_base.py From recruit with Apache License 2.0 | 6 votes |
def test_with_incorrect_minlength(self): x = np.array([], dtype=int) assert_raises_regex(TypeError, "'str' object cannot be interpreted", lambda: np.bincount(x, minlength="foobar")) assert_raises_regex(ValueError, "must not be negative", lambda: np.bincount(x, minlength=-1)) x = np.arange(5) assert_raises_regex(TypeError, "'str' object cannot be interpreted", lambda: np.bincount(x, minlength="foobar")) assert_raises_regex(ValueError, "must not be negative", lambda: np.bincount(x, minlength=-1))
Example #11
Source File: electrode_placement.py From simnibs with GNU General Public License v3.0 | 6 votes |
def _optimize_2D(nodes, triangles, stay=[]): ''' Optimize the locations of the points by moving them towards the center of their patch. This is done iterativally for all points for a number of iterations and using a .05 step length''' edges, tr_edges, adjacency_list = _edge_list(triangles) boundary = edges[adjacency_list[:, 1] == -1].reshape(-1) stay = np.union1d(boundary, stay) stay = stay.astype(int) n_iter = 5 step_length = .05 mean_bar = np.zeros_like(nodes) new_nodes = np.copy(nodes) k = np.bincount(triangles.reshape(-1), minlength=len(nodes)) for n in range(n_iter): bar = np.mean(new_nodes[triangles], axis=1) for i in range(2): mean_bar[:, i] = np.bincount(triangles.reshape(-1), weights=np.repeat(bar[:, i], 3), minlength=len(nodes)) mean_bar /= k[:, None] new_nodes += step_length * (mean_bar - new_nodes) new_nodes[stay] = nodes[stay] return new_nodes
Example #12
Source File: mesh_io.py From simnibs with GNU General Public License v3.0 | 6 votes |
def nodes_areas(self): ''' Areas for all nodes in a surface Returns --------- nd: NodeData NodeData structure with normals for each node ''' areas = self.elements_volumes_and_areas()[self.elm.triangles] triangle_nodes = self.elm[self.elm.triangles, :3] - 1 nd = np.bincount( triangle_nodes.reshape(-1), np.repeat(areas/3., 3), self.nodes.nr ) return NodeData(nd, 'areas')
Example #13
Source File: evaluate.py From pytorch-segmentation-toolbox with MIT License | 6 votes |
def get_confusion_matrix(gt_label, pred_label, class_num): """ Calcute the confusion matrix by given label and pred :param gt_label: the ground truth label :param pred_label: the pred label :param class_num: the nunber of class :return: the confusion matrix """ index = (gt_label * class_num + pred_label).astype('int32') label_count = np.bincount(index) confusion_matrix = np.zeros((class_num, class_num)) for i_label in range(class_num): for i_pred_label in range(class_num): cur_index = i_label * class_num + i_pred_label if cur_index < len(label_count): confusion_matrix[i_label, i_pred_label] = label_count[cur_index] return confusion_matrix
Example #14
Source File: predict.py From License-Plate-Recognition with MIT License | 6 votes |
def preprocess_hog(digits): samples = [] for img in digits: gx = cv2.Sobel(img, cv2.CV_32F, 1, 0) gy = cv2.Sobel(img, cv2.CV_32F, 0, 1) mag, ang = cv2.cartToPolar(gx, gy) bin_n = 16 bin = np.int32(bin_n*ang/(2*np.pi)) bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:] mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:] hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)] hist = np.hstack(hists) # transform to Hellinger kernel eps = 1e-7 hist /= hist.sum() + eps hist = np.sqrt(hist) hist /= norm(hist) + eps samples.append(hist) return np.float32(samples) #不能保证包括所有省份
Example #15
Source File: val.py From Fast_Seg with Apache License 2.0 | 6 votes |
def get_confusion_matrix(gt_label, pred_label, class_num): """ Calcute the confusion matrix by given label and pred :param gt_label: the ground truth label :param pred_label: the pred label :param class_num: the nunber of class :return: the confusion matrix """ index = (gt_label * class_num + pred_label).astype('int32') label_count = np.bincount(index) confusion_matrix = np.zeros((class_num, class_num)) for i_label in range(class_num): for i_pred_label in range(class_num): cur_index = i_label * class_num + i_pred_label if cur_index < len(label_count): confusion_matrix[i_label, i_pred_label] = label_count[cur_index] return confusion_matrix
Example #16
Source File: xrft.py From xrft with MIT License | 6 votes |
def _radial_wvnum(k, l, N, nfactor): """ Creates a radial wavenumber based on two horizontal wavenumbers along with the appropriate index map """ # compute target wavenumbers k = k.values l = l.values K = np.sqrt(k[np.newaxis,:]**2 + l[:,np.newaxis]**2) nbins = int(N/nfactor) if k.max() > l.max(): ki = np.linspace(0., l.max(), nbins) else: ki = np.linspace(0., k.max(), nbins) # compute bin index kidx = np.digitize(np.ravel(K), ki) # compute number of points for each wavenumber area = np.bincount(kidx) # compute the average radial wavenumber for each bin kr = (np.bincount(kidx, weights=K.ravel()) / np.ma.masked_where(area==0, area)) return ki, kr[1:-1]
Example #17
Source File: sampler.py From AerialDetection with Apache License 2.0 | 6 votes |
def __init__(self, dataset, samples_per_gpu=1, num_replicas=None, rank=None): if num_replicas is None: num_replicas = get_world_size() if rank is None: rank = get_rank() self.dataset = dataset self.samples_per_gpu = samples_per_gpu self.num_replicas = num_replicas self.rank = rank self.epoch = 0 assert hasattr(self.dataset, 'flag') self.flag = self.dataset.flag self.group_sizes = np.bincount(self.flag) self.num_samples = 0 for i, j in enumerate(self.group_sizes): self.num_samples += int( math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu / self.num_replicas)) * self.samples_per_gpu self.total_size = self.num_samples * self.num_replicas
Example #18
Source File: cityscape_video.py From Deep-Feature-Flow-Segmentation with MIT License | 6 votes |
def get_confusion_matrix(self, gt_label, pred_label, class_num): """ Calcute the confusion matrix by given label and pred :param gt_label: the ground truth label :param pred_label: the pred label :param class_num: the nunber of class :return: the confusion matrix """ index = (gt_label * class_num + pred_label).astype('int32') label_count = np.bincount(index) confusion_matrix = np.zeros((class_num, class_num)) for i_label in range(class_num): for i_pred_label in range(class_num): cur_index = i_label * class_num + i_pred_label if cur_index < len(label_count): confusion_matrix[i_label, i_pred_label] = label_count[cur_index] return confusion_matrix
Example #19
Source File: pascal_voc.py From Deep-Feature-Flow-Segmentation with MIT License | 6 votes |
def get_confusion_matrix(self, gt_label, pred_label, class_num): """ Calcute the confusion matrix by given label and pred :param gt_label: the ground truth label :param pred_label: the pred label :param class_num: the nunber of class :return: the confusion matrix """ index = (gt_label * class_num + pred_label).astype('int32') label_count = np.bincount(index) confusion_matrix = np.zeros((class_num, class_num)) for i_label in range(class_num): for i_pred_label in range(class_num): cur_index = i_label * class_num + i_pred_label if cur_index < len(label_count): confusion_matrix[i_label, i_pred_label] = label_count[cur_index] return confusion_matrix
Example #20
Source File: calculate_weights.py From overhaul-distillation with MIT License | 6 votes |
def calculate_weigths_labels(dataset, dataloader, num_classes): # Create an instance from the data loader z = np.zeros((num_classes,)) # Initialize tqdm tqdm_batch = tqdm(dataloader) print('Calculating classes weights') for sample in tqdm_batch: y = sample['label'] y = y.detach().cpu().numpy() mask = (y >= 0) & (y < num_classes) labels = y[mask].astype(np.uint8) count_l = np.bincount(labels, minlength=num_classes) z += count_l tqdm_batch.close() total_frequency = np.sum(z) class_weights = [] for frequency in z: class_weight = 1 / (np.log(1.02 + (frequency / total_frequency))) class_weights.append(class_weight) ret = np.array(class_weights) classes_weights_path = os.path.join(Path.db_root_dir(dataset), dataset+'_classes_weights.npy') np.save(classes_weights_path, ret) return ret
Example #21
Source File: loss.py From Parsing-R-CNN with MIT License | 5 votes |
def fast_hist(a, b, n): k = (a >= 0) & (a < n) return np.bincount(n * a[k].astype(int) + b[k], minlength=n ** 2).reshape(n, n)
Example #22
Source File: parsing_eval.py From Parsing-R-CNN with MIT License | 5 votes |
def fast_hist(a, b, n): k = (a >= 0) & (a < n) return np.bincount(n * a[k].astype(int) + b[k], minlength=n ** 2).reshape(n, n)
Example #23
Source File: ensemble.py From isic2019 with MIT License | 5 votes |
def evalEnsemble(currComb,eval_auc=False): currWacc = np.zeros([cvSize]) currAUC = np.zeros([cvSize]) for i in range(cvSize): if evaluate_method == 'vote': pred_argmax = np.argmax(accum_preds[i][currComb,:,:],2) pred_eval = np.zeros([pred_argmax.shape[1],numClasses]) for j in range(pred_eval.shape[0]): pred_eval[j,:] = np.bincount(pred_argmax[:,j],minlength=numClasses) else: pred_eval = np.mean(accum_preds[i][currComb,:,:],0) # Confusion matrix conf = confusion_matrix(np.argmax(final_targets[i],1),np.argmax(pred_eval,1)) # Class weighted accuracy currWacc[i] = np.mean(conf.diagonal()/conf.sum(axis=1)) if eval_auc: currAUC_ = np.zeros([numClasses]) for j in range(numClasses): fpr, tpr, _ = roc_curve(final_targets[i][:,j], pred_eval[:, j]) currAUC_[j] = auc(fpr, tpr) currAUC[i] = np.mean(currAUC_) if eval_auc: currAUCstd = np.std(currAUC) currAUC = np.mean(currAUC) else: currAUCstd = currAUC currWaccStd = np.std(currWacc) currWacc = np.mean(currWacc) if eval_auc: return currWacc, currWaccStd, currAUC, currAUCstd else: return currWacc
Example #24
Source File: metrics.py From PLARD with MIT License | 5 votes |
def _fast_hist(self, label_true, label_pred, n_class): mask = (label_true >= 0) & (label_true < n_class) hist = np.bincount( n_class * label_true[mask].astype(int) + label_pred[mask], minlength=n_class**2).reshape(n_class, n_class) return hist
Example #25
Source File: disabled_test_simulate_univariate.py From cgpm with Apache License 2.0 | 5 votes |
def aligned_bincount(arrays): bincounts = [np.bincount(a.astype(int)) for a in arrays] longest = max(len(b) for b in bincounts) return [np.append(b, np.zeros(longest-len(b))) for b in bincounts]
Example #26
Source File: test_mvknn.py From cgpm with Apache License 2.0 | 5 votes |
def test_conditional_real(knn_xz): # Simulate from the conditional distribution of z|x (see # generate_real_nominal_data) and plot the frequencies of the simulated # values. data = np.asarray(knn_xz.data.values()) indicators = sorted(set(data[:,1].astype(int))) fig, axes = plt.subplots(2,3) fig.suptitle('Conditional Simulation Of Indicator Z Given X', size=20) # Compute representative data sample for each indicator. means = [np.mean(data[data[:,1]==t], axis=0)[0] for t in indicators] for mean, indicator, ax in zip(means, indicators, axes.ravel('F')): samples_subpop = [s[1] for s in knn_xz.simulate(-1, [1], constraints={0:mean}, N=len(data))] # Plot a histogram of the simulated indicator. ax.hist(samples_subpop, color='g', alpha=.4) ax.set_title('True Indicator Z %d' % indicator) ax.set_xlabel('Simulated Indicator Z') ax.set_xticks(indicators) ax.set_ylabel('Frequency') ax.set_ylim([0, ax.get_ylim()[1]+10]) ax.grid() # Check that the simulated indicator agrees with true indicator. true_ind_a = indicator true_ind_b = indicator-1 if indicator % 2 else indicator+1 counts = np.bincount(samples_subpop) frac = sum(counts[[true_ind_a, true_ind_b]])/float(sum(counts)) assert .8 < frac
Example #27
Source File: IOUEval.py From ext_portrait_segmentation with MIT License | 5 votes |
def fast_hist(self, a, b): k = (a >= 0) & (a < self.nClasses) & (b < self.nClasses) # print(np.unique(a[k])) # print(np.unique(b[k])) return np.bincount(self.nClasses * a[k].astype(int) + b[k], minlength=self.nClasses ** 2).reshape(self.nClasses, self.nClasses)
Example #28
Source File: test_function_base.py From recruit with Apache License 2.0 | 5 votes |
def test_simple(self): y = np.bincount(np.arange(4)) assert_array_equal(y, np.ones(4))
Example #29
Source File: test_function_base.py From recruit with Apache License 2.0 | 5 votes |
def test_simple_weight2(self): x = np.array([1, 2, 4, 5, 2]) w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) y = np.bincount(x, w) assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1]))
Example #30
Source File: test_function_base.py From recruit with Apache License 2.0 | 5 votes |
def test_simple2(self): y = np.bincount(np.array([1, 5, 2, 4, 1])) assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1]))