Python sklearn.metrics.precision_recall_fscore_support() Examples
The following are 30
code examples of sklearn.metrics.precision_recall_fscore_support().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sklearn.metrics
, or try the search function
.
Example #1
Source File: utils.py From loglizer with MIT License | 6 votes |
def metrics(y_pred, y_true): """ Calucate evaluation metrics for precision, recall, and f1. Arguments --------- y_pred: ndarry, the predicted result list y_true: ndarray, the ground truth label list Returns ------- precision: float, precision value recall: float, recall value f1: float, f1 measure value """ precision, recall, f1, _ = precision_recall_fscore_support(y_true, y_pred, average='binary') return precision, recall, f1
Example #2
Source File: eval.py From dcase_task2 with MIT License | 6 votes |
def print_result(fold, y, y_predicted, id_class_mapping): """ print result matrix """ n_classes = len(np.unique(y)) p, r, f, s = precision_recall_fscore_support(y, y_predicted, labels=None, pos_label=1, average=None) a = [(accuracy_score(y[y == c], y_predicted[y == c])) for c in xrange(n_classes)] # count occurrences of classes count = Counter(y) print("\n") if fold is not None: print("Results on fold %d" % fold) print("\n") print("%30s | %s | %5s | %4s | %4s | %4s |" % ("LABEL", "CNT", "ACC ", "PR ", "RE ", "F1 ")) print('-' * 70) for c in xrange(n_classes): print("%30s | %03d | %0.3f | %.2f | %.2f | %.3f |" % (id_class_mapping[c], count[c], a[c], p[c], r[c], f[c])) print('-' * 70) print("%30s | %03d | %0.3f | %.2f | %.2f | %.3f |" % ('average', len(y), np.mean(a), np.mean(p), np.mean(r), np.mean(f))) print('=' * 70) print("Overall Accuracy: %.3f %%" % (100.0 * accuracy_score(y, y_predicted))) print('=' * 70)
Example #3
Source File: pci_model.py From PCI-China with GNU Affero General Public License v3.0 | 6 votes |
def summary_util(self, type): if type == "test": Y_hat = self.model.predict(self.X_test) Y = self.Y_test elif type == "train": Y_hat = self.model.predict(self.X_train) Y = self.Y_train elif type == "val": Y_hat = self.model.predict(self.X_val) Y = self.Y_val elif type == "forecast": Y_hat = self.model.predict(self.X_forecast) Y = self.Y_forecast Y_pred = Y_hat > 0.5 precision,recall,F1,junk = precision_recall_fscore_support(Y,Y_pred) out = dict() out['precision']=precision[1] out['recall']=recall[1] out['F1']=F1[1] return out
Example #4
Source File: evaluate.py From RE-CNN-pytorch with MIT License | 6 votes |
def evaluate(model, data_iterator, num_steps, metric_labels): """Evaluate the model on `num_steps` batches.""" # set model to evaluation mode model.eval() output_labels = list() target_labels = list() # compute metrics over the dataset for _ in range(num_steps): # fetch the next evaluation batch batch_data, batch_labels = next(data_iterator) # compute model output batch_output = model(batch_data) # batch_size x num_labels batch_output_labels = torch.max(batch_output, dim=1)[1] output_labels.extend(batch_output_labels.data.cpu().numpy().tolist()) target_labels.extend(batch_labels.data.cpu().numpy().tolist()) # Calculate precision, recall and F1 for all relation categories p_r_f1_s = precision_recall_fscore_support(target_labels, output_labels, labels=metric_labels, average='micro') p_r_f1 = {'precison': p_r_f1_s[0] * 100, 'recall': p_r_f1_s[1] * 100, 'f1': p_r_f1_s[2] * 100} return p_r_f1
Example #5
Source File: fbeta_measure_test.py From allennlp with Apache License 2.0 | 6 votes |
def test_fbeta_multiclass_with_weighted_average(self, device: str): self.predictions = self.predictions.to(device) self.targets = self.targets.to(device) labels = [0, 1] fbeta = FBetaMeasure(average="weighted", labels=labels) fbeta(self.predictions, self.targets) metric = fbeta.get_metric() precisions = metric["precision"] recalls = metric["recall"] fscores = metric["fscore"] weighted_precision, weighted_recall, weighted_fscore, _ = precision_recall_fscore_support( self.targets.cpu().numpy(), self.predictions.argmax(dim=1).cpu().numpy(), labels=labels, average="weighted", ) # check value assert_allclose(precisions, weighted_precision) assert_allclose(recalls, weighted_recall) assert_allclose(fscores, weighted_fscore)
Example #6
Source File: bootstrap.py From CDSS with GNU General Public License v3.0 | 6 votes |
def boot_human(i, sample_size=sample_size): np.random.seed(seed=i) random_pids = np.random.choice(pt_list_unique_sub, size=sample_size, replace=True) test = np.array([p_2_id_sub[pid] for pid in random_pids]) boot_list = [] for ids in test: size = len(ids) boot_list.append(np.random.choice(ids)) y_pred_sub = human_authored[boot_list, :] y_true_sub = subset_y[boot_list, :] # evaluate model # print('calculating') output = precision_recall_fscore_support(y_true_sub.flatten(), y_pred_sub.flatten()) precision = output[0][2] recall = output[1][2] f1 = output[2][2] # print('done') return precision, recall, f1
Example #7
Source File: bootstrap.py From CDSS with GNU General Public License v3.0 | 6 votes |
def boot_human_clinic(i, sample_size=sample_size): np.random.seed(seed=i) random_pids = np.random.choice(pt_list_unique_sub, size=sample_size, replace=True) test = np.array([p_2_id_sub[pid] for pid in random_pids]) boot_list = [] for ids in test: size = len(ids) boot_list.append(np.random.choice(ids)) y_pred_sub = subset_ClinicNet[boot_list, :] y_true_sub = subset_y[boot_list, :] auroc = roc_auc_score(y_true_sub, y_pred_sub, average='micro') avg_precision = average_precision_score(y_true_sub, y_pred_sub, average='micro') y_pred_sub[y_pred_sub<threshold_clinicnet] = 0 y_pred_sub[y_pred_sub>=threshold_clinicnet] = 1 # evaluate model # print('calculating') output = precision_recall_fscore_support(y_true_sub.flatten(), y_pred_sub.flatten()) # print('done') precision = output[0][1] recall = output[1][1] f1 = output[2][1] # print('done') return auroc, avg_precision, precision, recall, f1
Example #8
Source File: test_classification.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_precision_recall_f1_no_labels_average_none(): y_true = np.zeros((20, 3)) y_pred = np.zeros_like(y_true) beta = 1 # tp = [0, 0, 0] # fn = [0, 0, 0] # fp = [0, 0, 0] # support = [0, 0, 0] # |y_hat_i inter y_i | = [0, 0, 0] # |y_i| = [0, 0, 0] # |y_hat_i| = [0, 0, 0] p, r, f, s = assert_warns(UndefinedMetricWarning, precision_recall_fscore_support, y_true, y_pred, average=None, beta=beta) assert_array_almost_equal(p, [0, 0, 0], 2) assert_array_almost_equal(r, [0, 0, 0], 2) assert_array_almost_equal(f, [0, 0, 0], 2) assert_array_almost_equal(s, [0, 0, 0], 2) fbeta = assert_warns(UndefinedMetricWarning, fbeta_score, y_true, y_pred, beta=beta, average=None) assert_array_almost_equal(fbeta, [0, 0, 0], 2)
Example #9
Source File: test_classification.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_precision_recall_f1_no_labels(beta, average): y_true = np.zeros((20, 3)) y_pred = np.zeros_like(y_true) p, r, f, s = assert_warns(UndefinedMetricWarning, precision_recall_fscore_support, y_true, y_pred, average=average, beta=beta) assert_almost_equal(p, 0) assert_almost_equal(r, 0) assert_almost_equal(f, 0) assert_equal(s, None) fbeta = assert_warns(UndefinedMetricWarning, fbeta_score, y_true, y_pred, beta=beta, average=average) assert_almost_equal(fbeta, 0)
Example #10
Source File: bootstrap.py From CDSS with GNU General Public License v3.0 | 6 votes |
def boot_human_logistic(i, sample_size=sample_size): np.random.seed(seed=i) random_pids = np.random.choice(pt_list_unique_sub, size=sample_size, replace=True) test = np.array([p_2_id_sub[pid] for pid in random_pids]) boot_list = [] for ids in test: size = len(ids) boot_list.append(np.random.choice(ids)) y_pred_sub = subset_log[boot_list, :] y_true_sub = subset_y[boot_list, :] auroc = roc_auc_score(y_true_sub, y_pred_sub, average='micro') avg_precision = average_precision_score(y_true_sub, y_pred_sub, average='micro') y_pred_sub[y_pred_sub<threshold_log] = 0 y_pred_sub[y_pred_sub>=threshold_log] = 1 # evaluate model # print('calculating') output = precision_recall_fscore_support(y_true_sub.flatten(), y_pred_sub.flatten()) # print('done') precision = output[0][1] recall = output[1][1] f1 = output[2][1] # print('done') return auroc, avg_precision, precision, recall, f1
Example #11
Source File: test_classification.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_precision_recall_f1_score_binary_averaged(): y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1]) y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1]) # compute scores with default labels introspection ps, rs, fs, _ = precision_recall_fscore_support(y_true, y_pred, average=None) p, r, f, _ = precision_recall_fscore_support(y_true, y_pred, average='macro') assert_equal(p, np.mean(ps)) assert_equal(r, np.mean(rs)) assert_equal(f, np.mean(fs)) p, r, f, _ = precision_recall_fscore_support(y_true, y_pred, average='weighted') support = np.bincount(y_true) assert_equal(p, np.average(ps, weights=support)) assert_equal(r, np.average(rs, weights=support)) assert_equal(f, np.average(fs, weights=support))
Example #12
Source File: analysis_functions.py From DeepLearning_IDS with MIT License | 6 votes |
def validation(classifier, data, y_data, y_target, class_names, title): #kfold = KFold(n_splits=10, shuffle=True, random_state=seed) #cv = kfold t = 'Confusion matrix: '+str(title) x = np.transpose(data) if (classifier == None): print ("No accuracy to be computed") else: accuracy = model_selection.cross_val_score(classifier, x, y_target, scoring='accuracy') print("Accuracy: "+ str(accuracy)) #precision = model_selection.cross_val_score(self.classifier, x, target, scoring='precision') #precision_score(y_true, y_pred, average='macro') #recall = model_selection.cross_val_score(self.classifier, x, target, scoring='recall') precision, recall, fscore, m = precision_recall_fscore_support(y_target, y_data, average='macro') cnf_matrix = confusion_matrix(y_target, y_data) print("Precision: " +str(precision) +", Recall:" +str(recall) + ", f-score:" +str(fscore)) np.set_printoptions(precision=2) # Plot non-normalized confusion matrix plt.figure() plot_confusion_matrix(cnf_matrix, classes=class_names, title=t) print ("... finishing matrix plot") plt.show()
Example #13
Source File: hfusion.py From hfusion with MIT License | 6 votes |
def calc_test_result(result, test_label, test_mask): true_label=[] predicted_label=[] for i in range(result.shape[0]): for j in range(result.shape[1]): if test_mask[i,j]==1: true_label.append(np.argmax(test_label[i,j] )) predicted_label.append(np.argmax(result[i,j] )) print("Confusion Matrix :") print(confusion_matrix(true_label, predicted_label)) print("Classification Report :") print(classification_report(true_label, predicted_label,digits=4)) print("Accuracy ", accuracy_score(true_label, predicted_label)) print("Macro Classification Report :") print(precision_recall_fscore_support(true_label, predicted_label,average='macro')) print("Weighted Classification Report :") print(precision_recall_fscore_support(true_label, predicted_label,average='weighted')) #print "Normal Classification Report :" #print precision_recall_fscore_support(true_label, predicted_label)
Example #14
Source File: model.py From mindmeld with Apache License 2.0 | 6 votes |
def _get_class_stats(y_true, y_pred, labels): """ Method for getting some basic statistics by class. Returns: dict: A structured dictionary containing precision, recall, f_beta, and support \ vectors (1 x number of classes) """ precision, recall, f_beta, support = score( y_true=y_true, y_pred=y_pred, labels=labels ) stats = { "precision": precision, "recall": recall, "f_beta": f_beta, "support": support, } return stats
Example #15
Source File: conv_featuremaps_visualization.py From MCF-3D-CNN with MIT License | 6 votes |
def accuracy(y_true, y_pred): # 计算混淆矩阵 y = np.zeros(len(y_true)) y_ = np.zeros(len(y_true)) for i in range(len(y_true)): y[i] = np.argmax(y_true[i,:]) y_[i] = np.argmax(y_pred[i,:]) cnf_mat = confusion_matrix(y, y_) # Acc = 1.0*(cnf_mat[1][1]+cnf_mat[0][0])/len(y_true) # Sens = 1.0*cnf_mat[1][1]/(cnf_mat[1][1]+cnf_mat[1][0]) # Spec = 1.0*cnf_mat[0][0]/(cnf_mat[0][0]+cnf_mat[0][1]) # # 绘制ROC曲线 # fpr, tpr, thresholds = roc_curve(y_true[:,0], y_pred[:,0]) # Auc = auc(fpr, tpr) # 计算多分类评价值 Sens = recall_score(y, y_, average='macro') Prec = precision_score(y, y_, average='macro') F1 = f1_score(y, y_, average='weighted') Support = precision_recall_fscore_support(y, y_, beta=0.5, average=None) return Sens, Prec, F1, cnf_mat
Example #16
Source File: train_module.py From pynlp with MIT License | 6 votes |
def eval_epoch(model, sess, eval_set): result, labels = [], [] avg_loss, avg_acc, steps, total_len = 0, 0, 0, 0 for batch in eval_set.next_batch(): steps += 1 predictions, batch_loss, batch_acc = sess.run([model.pred, model.loss, model.accuracy], feed_dict={model.dropout_keep_prob: 1.0, model.input_x: batch.texts, model.input_y: batch.labels}) batch_len = len(batch.texts) avg_loss += batch_loss * batch_len avg_acc += batch_acc * batch_len total_len += batch_len result.extend(predictions.tolist()) labels.extend(batch.labels.tolist()) avg_loss, avg_acc = avg_loss / total_len, avg_acc / total_len precision, recall, fscore, support = precision_recall_fscore_support(labels, result, average='weighted') metrics = {'loss': avg_loss, 'accuracy': avg_acc, 'precision': precision, 'recall': recall, 'fscore': fscore } return metrics, result
Example #17
Source File: generic_classifier.py From 2020plus with Apache License 2.0 | 6 votes |
def _update_onco_metrics(self, y_true, y_pred, prob): self.onco_gene_pred = pd.Series(y_pred, self.y.index) self.onco_gene_score = pd.Series(prob, self.y.index) # compute metrics for classification self.onco_gene_count[self.num_pred] = sum(y_pred) prec, recall, fscore, support = metrics.precision_recall_fscore_support(y_true, y_pred) self.onco_precision[self.num_pred] = prec[self.onco_num] self.onco_recall[self.num_pred] = recall[self.onco_num] self.onco_f1_score[self.num_pred] = fscore[self.onco_num] self.logger.debug('Onco Iter %d: Precission=%s, Recall=%s, f1_score=%s' % ( self.num_pred + 1, str(prec), str(recall), str(fscore))) # compute ROC curve metrics fpr, tpr, thresholds = metrics.roc_curve(y_true, prob) self.onco_tpr_array[self.num_pred, :] = interp(self.onco_fpr_array, fpr, tpr) #self.onco_mean_tpr[0] = 0.0 # compute Precision-Recall curve metrics p, r, thresh = metrics.precision_recall_curve(y_true, prob) p, r, thresh = p[::-1], r[::-1], thresh[::-1] # reverse order of results thresh = np.insert(thresh, 0, 1.0) self.onco_precision_array[self.num_pred, :] = interp(self.onco_recall_array, r, p) self.onco_threshold_array[self.num_pred, :] = interp(self.onco_recall_array, r, thresh)
Example #18
Source File: generic_classifier.py From 2020plus with Apache License 2.0 | 6 votes |
def _update_tsg_metrics(self, y_true, y_pred, prob): self.tsg_gene_pred = pd.Series(y_pred, self.y.index) self.tsg_gene_score = pd.Series(prob, self.y.index) # compute metrics for classification self.tsg_gene_count[self.num_pred] = sum(y_pred) prec, recall, fscore, support = metrics.precision_recall_fscore_support(y_true, y_pred) tsg_col = 1 # column for metrics relate to tsg self.tsg_precision[self.num_pred] = prec[tsg_col] self.tsg_recall[self.num_pred] = recall[tsg_col] self.tsg_f1_score[self.num_pred] = fscore[tsg_col] self.logger.debug('Tsg Iter %d: Precission=%s, Recall=%s, f1_score=%s' % ( self.num_pred + 1, str(prec), str(recall), str(fscore))) # compute ROC curve metrics fpr, tpr, thresholds = metrics.roc_curve(y_true, prob) self.tsg_tpr_array[self.num_pred, :] = interp(self.tsg_fpr_array, fpr, tpr) #self.tsg_tpr_array[0] = 0.0 # compute Precision-Recall curve metrics p, r, thresh = metrics.precision_recall_curve(y_true, prob) p, r, thresh = p[::-1], r[::-1], thresh[::-1] # reverse order of results self.tsg_precision_array[self.num_pred, :] = interp(self.tsg_recall_array, r, p)
Example #19
Source File: evaluator.py From DeepADoTS with MIT License | 5 votes |
def get_accuracy_precision_recall_fscore(y_true: list, y_pred: list): accuracy = accuracy_score(y_true, y_pred) # warn_for=() avoids log warnings for any result being zero precision, recall, f_score, _ = prf(y_true, y_pred, average='binary', warn_for=()) if precision == 0 and recall == 0: f01_score = 0 else: f01_score = fbeta_score(y_true, y_pred, average='binary', beta=0.1) return accuracy, precision, recall, f_score, f01_score
Example #20
Source File: Plot.py From RMDL with GNU General Public License v3.0 | 5 votes |
def accuracy(y_test,final_y): np.set_printoptions(precision=2) y_test_temp = np.argmax(y_test, axis=1) F_score = accuracy_score(y_test_temp, final_y) F1 = precision_recall_fscore_support(y_test_temp, final_y, average='micro') F2 = precision_recall_fscore_support(y_test_temp, final_y, average='macro') F3 = precision_recall_fscore_support(y_test_temp, final_y, average='weighted') print(F_score) print(F1) print(F2) print(F3)
Example #21
Source File: model.py From mindmeld with Apache License 2.0 | 5 votes |
def __repr__(self): num_examples = len(self.results) num_correct = len(list(self.correct_results())) accuracy = self.get_accuracy() msg = "<{} score: {:.2%}, {} of {} example{} correct>" return msg.format( self.__class__.__name__, accuracy, num_correct, num_examples, "" if num_examples == 1 else "s", )
Example #22
Source File: GetResults.py From ProFET with GNU General Public License v3.0 | 5 votes |
def get_scores(scores,y,label): ''' Returns a dictionary of metrics for a given classification of the data (given by Cross_val_predict). scores: list Classifier predictions on data y: list True Class labels label: string Name of the classifier used ''' roc_auc = metrics.roc_auc_score(y, scores,average=None) print("roc_auc (No-Av): %0.4f " % (roc_auc)) roc_auc = metrics.roc_auc_score(y, scores,average='weighted') print("roc_auc (weighted-Av): %0.4f " % (roc_auc)) f1_pos = metrics.f1_score(y, scores,average='binary') print("POS f1: %0.4f " % (f1_pos)) av_PR = metrics.average_precision_score(y, scores) # corresponds to the area under the precision-recall curve print("Av_precision (Prec-Recall AUC): %0.3f " % (av_PR)) accuracy = metrics.accuracy_score(y, scores) print("Accuracy: %0.3f " % (accuracy)) precision,recall,fscore,support = metrics.precision_recall_fscore_support(y, scores,average='binary') print("Precision: %0.3f " % (precision)) print("Recall: %0.3f " % (recall)) # print("fscore(fBeta): %0.4f [%s]" % (fscore, label)) mcc = metrics.matthews_corrcoef(y, scores) print("MCC: %0.3f " % (mcc)) results_dict = {'roc_auc(macro)':roc_auc,'f1_Pos':f1_pos,'accuracy':accuracy, 'precision':precision,'recall':recall, # 'fscore-fBeta':fscore, 'average Precision':av_PR,'mcc':mcc } results_dict = {k:round(float(v),4) for k, v in results_dict.items()} return results_dict # E:\Dropbox\Dropbox\Protein Cleavage Prediction\data\NeuroPred\V4\features-11_8_KR.csv
Example #23
Source File: train.py From OAG with MIT License | 5 votes |
def cal_f1(y_true, prob_pred): f1 = 0 threshold = 0 pred = prob_pred.copy() for thr in [j * 0.01 for j in range(100)]: for i in range(len(prob_pred)): pred[i] = prob_pred[i] > thr performance = precision_recall_fscore_support(y_true, pred, average='binary') if performance[2] > f1: precision = performance[0] recall = performance[1] f1 = performance[2] threshold = thr return threshold, precision, recall, f1
Example #24
Source File: models.py From delft with Apache License 2.0 | 5 votes |
def eval(self, x_test=None, y_test=None, run_number=0): ''' Train and eval the nb_runs classifier(s) against holdout set. If nb_runs>1, the final score are averaged over the nb_runs models. ''' start = time.time() predict_examples, y_test = self.processor.get_test_examples(x_test=x_test, y_test=y_test) #y_test_gold = np.asarray([np.argmax(line) for line in y_test]) y_predicts = self.eval_fold(predict_examples) result_intermediate = np.asarray([np.argmax(line) for line in y_predicts]) def vectorize(index, size): result = np.zeros(size) if index < size: result[index] = 1 return result result_binary = np.array([vectorize(xi, len(self.labels)) for xi in result_intermediate]) precision, recall, fscore, support = precision_recall_fscore_support(y_test, result_binary, average=None) print('\n') print('{:>14} {:>12} {:>12} {:>12} {:>12}'.format(" ", "precision", "recall", "f-score", "support")) p = 0 for the_class in self.labels: the_class = the_class[:14] print('{:>14} {:>12} {:>12} {:>12} {:>12}'.format(the_class, "{:10.4f}" .format(precision[p]), "{:10.4f}".format(recall[p]), "{:10.4f}".format(fscore[p]), support[p])) p += 1 runtime = round(time.time() - start, 3) print("Total runtime for eval: " + str(runtime) + " seconds")
Example #25
Source File: pci_model.py From PCI-China with GNU Affero General Public License v3.0 | 5 votes |
def calc_f1_df(x): j1,j2,F1,j3 = precision_recall_fscore_support(x.Y,x.Y_hat) f1 = F1[1] return pd.Series([f1], index=['f1'])
Example #26
Source File: MLP_nets.py From DeepLearning_IDS with MIT License | 5 votes |
def validation(self, data, y_data, y_target): #kfold = KFold(n_splits=10, shuffle=True, random_state=seed) #cv = kfold x = np.transpose(data) accuracy = model_selection.cross_val_score(self.classifier, x, y_target, scoring='accuracy') #precision = model_selection.cross_val_score(self.classifier, x, target, scoring='precision') #precision_score(y_true, y_pred, average='macro') #recall = model_selection.cross_val_score(self.classifier, x, target, scoring='recall') precision, recall, fscore, m = precision_recall_fscore_support(y_target, y_data, average='macro') print("MLP Validation:") print(str(accuracy[0]) +", " +str(precision) +", " +str(recall)) ########################################################################
Example #27
Source File: train.py From Aspect-level-sentiment with Apache License 2.0 | 5 votes |
def macro_f1(y_true, y_pred): preds = np.argmax(y_pred, axis=-1) true = np.argmax(y_true, axis=-1) p_macro, r_macro, f_macro, support_macro \ = precision_recall_fscore_support(true, preds, average='macro') f_macro = 2*p_macro*r_macro/(p_macro+r_macro) return f_macro
Example #28
Source File: main.py From cgcnn with MIT License | 5 votes |
def class_eval(prediction, target): prediction = np.exp(prediction.numpy()) target = target.numpy() pred_label = np.argmax(prediction, axis=1) target_label = np.squeeze(target) if not target_label.shape: target_label = np.asarray([target_label]) if prediction.shape[1] == 2: precision, recall, fscore, _ = metrics.precision_recall_fscore_support( target_label, pred_label, average='binary') auc_score = metrics.roc_auc_score(target_label, prediction[:, 1]) accuracy = metrics.accuracy_score(target_label, pred_label) else: raise NotImplementedError return accuracy, precision, recall, fscore, auc_score
Example #29
Source File: predict.py From cgcnn with MIT License | 5 votes |
def class_eval(prediction, target): prediction = np.exp(prediction.numpy()) target = target.numpy() pred_label = np.argmax(prediction, axis=1) target_label = np.squeeze(target) if prediction.shape[1] == 2: precision, recall, fscore, _ = metrics.precision_recall_fscore_support( target_label, pred_label, average='binary') auc_score = metrics.roc_auc_score(target_label, prediction[:, 1]) accuracy = metrics.accuracy_score(target_label, pred_label) else: raise NotImplementedError return accuracy, precision, recall, fscore, auc_score
Example #30
Source File: plot_test_results.py From marseille with BSD 3-Clause "New" or "Revised" License | 5 votes |
def arg_p_r_f(Y_true, Y_pred, labels, **kwargs): macro_p = [] macro_r = [] macro_f = [] micro_true = [] micro_pred = [] for y_true, y_pred in zip(Y_true, Y_pred): p, r, f, _ = precision_recall_fscore_support(y_true, y_pred, **kwargs) macro_p.append(p) macro_r.append(r) macro_f.append(f) micro_true.extend(y_true) micro_pred.extend(y_pred) micro_p, micro_r, micro_f, _ = precision_recall_fscore_support( micro_true, micro_pred, **kwargs ) kwargs.pop('average') per_class_fs = f1_score(micro_true, micro_pred, average=None, **kwargs) res = { 'p_macro': np.mean(macro_p), 'r_macro': np.mean(macro_r), 'f_macro': np.mean(macro_f), 'p_micro': micro_p, 'r_micro': micro_r, 'f_micro': micro_f } for label, per_class_f in zip(sorted(labels), per_class_fs): res['f_class_{}'.format(label)] = per_class_f return res