Python sklearn.metrics.confusion_matrix() Examples

The following are 30 code examples of sklearn.metrics.confusion_matrix(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sklearn.metrics , or try the search function .
Example #1
Source File: __init__.py    From EDeN with MIT License 11 votes vote down vote up
def plot_confusion_matrix(y_true, y_pred, size=None, normalize=False):
    """plot_confusion_matrix."""
    cm = confusion_matrix(y_true, y_pred)
    fmt = "%d"
    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        fmt = "%.2f"
    xticklabels = list(sorted(set(y_pred)))
    yticklabels = list(sorted(set(y_true)))
    if size is not None:
        plt.figure(figsize=(size, size))
    heatmap(cm, xlabel='Predicted label', ylabel='True label',
            xticklabels=xticklabels, yticklabels=yticklabels,
            cmap=plt.cm.Blues, fmt=fmt)
    if normalize:
        plt.title("Confusion matrix (norm.)")
    else:
        plt.title("Confusion matrix")
    plt.gca().invert_yaxis() 
Example #2
Source File: __init__.py    From speech-emotion-recognition with MIT License 6 votes vote down vote up
def evaluate(self, x_test: numpy.ndarray, y_test: numpy.ndarray) -> None:
        """
        Evaluate the current model on the given test data.

        Predict the labels for test data using the model and print the relevant
        metrics like accuracy and the confusion matrix.

        Args:
            x_test (numpy.ndarray): Numpy nD array or a list like object
                                    containing the samples.
            y_test (numpy.ndarray): Numpy 1D array or list like object
                                    containing the labels for test samples.
        """
        predictions = self.predict(x_test)
        print(y_test)
        print(predictions)
        print('Accuracy:%.3f\n' % accuracy_score(y_pred=predictions,
                                                 y_true=y_test))
        print('Confusion matrix:', confusion_matrix(y_pred=predictions,
                                                    y_true=y_test)) 
Example #3
Source File: utils.py    From MCF-3D-CNN with MIT License 6 votes vote down vote up
def save_cnf_roc(y_true, y_pred, classes, isPlot, save_tag = ''):
    # 计算混淆矩阵
    y = np.zeros(len(y_true))
    y_ = np.zeros(len(y_true))    
    for i in range(len(y_true)): 
        y[i] = np.argmax(y_true[i,:])
        y_[i] = np.argmax(y_pred[i,:])
    cnf_mat = confusion_matrix(y, y_)
    print cnf_mat
    
    # # 记录混淆矩阵
    f = open('experiments/img/confuse_matrixes.txt', 'ab+')
    if save_tag[-1] == '0':
        f.write(save_tag+'\n')
    f.write('No.' + save_tag[-1] + '\n')
    f.write(str(cnf_mat) + '\n')
    f.close()

    # # 记录ROC曲线
    plot_roc_curve(y_true, y_pred, range(classes), 'all/'+save_tag)  

###########################
# 计算TP、TN、FP、FN 
Example #4
Source File: Train Classifier and Test Video Feed.py    From Emotion-Recognition-Using-SVMs with MIT License 6 votes vote down vote up
def train_and_evaluate(clf, X_train, X_test, y_train, y_test):
    clf.fit(X_train, y_train)
    print ("Accuracy on training set:")
    print (clf.score(X_train, y_train))
    print ("Accuracy on testing set:")
    print (clf.score(X_test, y_test))
    y_pred = clf.predict(X_test)
    print ("Classification Report:")
    print (metrics.classification_report(y_test, y_pred))
    print ("Confusion Matrix:")
    print (metrics.confusion_matrix(y_test, y_pred))


# ===============================================================================
# from FaceDetectPredict.py
# =============================================================================== 
Example #5
Source File: curve.py    From 3DGCN with MIT License 6 votes vote down vote up
def draw_confusion_matrix(dataset, model, set_trial=None, filename="test_results.sdf"):
    path = find_average_trial(dataset, model, metric="test_pr") if set_trial is None \
        else "../result/{}/{}/{}/".format(model, dataset, set_trial)

    # Load true, pred value
    true_y, pred_y = [], []
    mols = Chem.SDMolSupplier(path + filename)

    for mol in mols:
        true_y.append(float(mol.GetProp("true")))
        pred_y.append(float(mol.GetProp("pred")))

    true_y = np.array(true_y, dtype=float)
    pred_y = np.array(pred_y, dtype=float).round()

    # Get precision and recall
    confusion = confusion_matrix(true_y, pred_y)
    tn, fp, fn, tp = confusion.ravel()

    print("tn: {}, fp: {}, fn: {}, tp: {}".format(tn, fp, fn, tp)) 
Example #6
Source File: conv_featuremaps_visualization.py    From MCF-3D-CNN with MIT License 6 votes vote down vote up
def accuracy(y_true, y_pred):        
    # 计算混淆矩阵
    y = np.zeros(len(y_true))
    y_ = np.zeros(len(y_true))    
    for i in range(len(y_true)): 
        y[i] = np.argmax(y_true[i,:])
        y_[i] = np.argmax(y_pred[i,:])
    cnf_mat = confusion_matrix(y, y_)
    
    # Acc = 1.0*(cnf_mat[1][1]+cnf_mat[0][0])/len(y_true)
    # Sens = 1.0*cnf_mat[1][1]/(cnf_mat[1][1]+cnf_mat[1][0])
    # Spec = 1.0*cnf_mat[0][0]/(cnf_mat[0][0]+cnf_mat[0][1])
    
    # # 绘制ROC曲线
    # fpr, tpr, thresholds = roc_curve(y_true[:,0], y_pred[:,0])
    # Auc = auc(fpr, tpr)
    
    
    # 计算多分类评价值
    Sens = recall_score(y, y_, average='macro')
    Prec = precision_score(y, y_, average='macro')
    F1 = f1_score(y, y_, average='weighted') 
    Support = precision_recall_fscore_support(y, y_, beta=0.5, average=None)
    return Sens, Prec, F1, cnf_mat 
Example #7
Source File: tutorial_error_analysis_ud.py    From nagisa with MIT License 6 votes vote down vote up
def create_confusion_matrix(tagger, X, Y):
    true_cm = []
    pred_cm = []
    label2id = {}
    for i in range(len(X)):
        words = X[i]
        true_tags = Y[i]
        pred_tags = tagger.decode(words) # decoding

        if true_tags != pred_tags:
            for true_tag, pred_tag in zip(true_tags, pred_tags):
                if true_tag != pred_tag:
                    if true_tag not in label2id:
                        label2id[true_tag] = len(label2id)

                    if pred_tag not in label2id:
                        label2id[pred_tag] = len(label2id)

                    true_cm.append(label2id[true_tag])
                    pred_cm.append(label2id[pred_tag])

    cm = confusion_matrix(true_cm, pred_cm)
    labels = list(label2id.keys())
    cm_labeled = pd.DataFrame(cm, columns=labels, index=labels)
    return cm_labeled 
Example #8
Source File: machine_learning.py    From tindetheus with MIT License 6 votes vote down vote up
def fit_log_reg(X, y):
    # fits a logistic regression model to your data
    model = LogisticRegression(class_weight='balanced')
    model.fit(X, y)
    print('Train size: ', len(X))
    train_score = model.score(X, y)
    print('Training accuracy', train_score)
    ypredz = model.predict(X)
    cm = confusion_matrix(y, ypredz)
    # tn, fp, fn, tp = cm.ravel()
    tn, _, _, tp = cm.ravel()

    # true positive rate When it's actually yes, how often does it predict yes?
    recall = float(tp) / np.sum(cm, axis=1)[1]
    # Specificity: When it's actually no, how often does it predict no?
    specificity = float(tn) / np.sum(cm, axis=1)[0]

    print('Recall/ Like accuracy', recall)
    print('specificity/ Dislike accuracy', specificity)

    # save the model
    joblib.dump(model, 'log_reg_model.pkl') 
Example #9
Source File: utils.py    From tsn-pytorch with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def class_accuracy(prediction, label):
    cf = confusion_matrix(prediction, label)
    cls_cnt = cf.sum(axis=1)
    cls_hit = np.diag(cf)

    cls_acc = cls_hit / cls_cnt.astype(float)

    mean_cls_acc = cls_acc.mean()

    return cls_acc, mean_cls_acc 
Example #10
Source File: utils.py    From Attention-Gated-Networks with MIT License 6 votes vote down vote up
def classification_scores(gts, preds, labels):
    accuracy        = metrics.accuracy_score(gts,  preds)
    class_accuracies = []
    for lab in labels: # TODO Fix
        class_accuracies.append(metrics.accuracy_score(gts[gts == lab], preds[gts == lab]))
    class_accuracies = np.array(class_accuracies)

    f1_micro        = metrics.f1_score(gts,        preds, average='micro')
    precision_micro = metrics.precision_score(gts, preds, average='micro')
    recall_micro    = metrics.recall_score(gts,    preds, average='micro')
    f1_macro        = metrics.f1_score(gts,        preds, average='macro')
    precision_macro = metrics.precision_score(gts, preds, average='macro')
    recall_macro    = metrics.recall_score(gts,    preds, average='macro')

    # class wise score
    f1s        = metrics.f1_score(gts,        preds, average=None)
    precisions = metrics.precision_score(gts, preds, average=None)
    recalls    = metrics.recall_score(gts,    preds, average=None)

    confusion = metrics.confusion_matrix(gts,preds, labels=labels)

    #TODO confusion matrix, recall, precision
    return accuracy, f1_micro, precision_micro, recall_micro, f1_macro, precision_macro, recall_macro, confusion, class_accuracies, f1s, precisions, recalls 
Example #11
Source File: style_transfer.py    From linguistic-style-transfer with Apache License 2.0 6 votes vote down vote up
def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument("--classifier-saved-model-path", type=str)
    parser.add_argument("--text-file-path", type=str, required=True)
    parser.add_argument("--label-index", type=str, required=False)
    parser.add_argument("--label-file-path", type=str, required=False)
    args_namespace = parser.parse_args(argv)
    command_line_args = vars(args_namespace)

    global logger
    logger = log_initializer.setup_custom_logger(global_config.logger_name, "INFO")

    if not command_line_args['label_file_path'] and not command_line_args['label_index']:
        raise Exception("Provide either label-index or label_file_path")

    [style_transfer_score, confusion_matrix] = \
        get_style_transfer_score(command_line_args['classifier_saved_model_path'],
                                 command_line_args['text_file_path'],
                                 command_line_args['label_index'], 
                                 command_line_args['label_file_path'])
    logger.info("style_transfer_score: {}".format(style_transfer_score))
    logger.info("confusion_matrix: {}".format(confusion_matrix)) 
Example #12
Source File: cost.py    From driverlessai-recipes with Apache License 2.0 6 votes vote down vote up
def score(self,
              actual: np.array,
              predicted: np.array,
              sample_weight: typing.Optional[np.array] = None,
              labels: typing.Optional[np.array] = None,
              **kwargs) -> float:
        # label actuals as 1 or 0
        lb = LabelEncoder()
        labels = lb.fit_transform(labels)
        actual = lb.transform(actual)

        # label predictions as 1 or 0
        predicted = predicted >= self._threshold

        # use sklearn to get fp and fn
        cm = confusion_matrix(actual, predicted, sample_weight=sample_weight, labels=labels)
        tn, fp, fn, tp = cm.ravel()

        # calculate`$1*FP + $2*FN`
        return ((fp * self.__class__._fp_cost) + (fn * self.__class__._fn_cost)) / (
                    tn + fp + fn + tp)  # divide by total weighted count to make loss invariant to data size 
Example #13
Source File: hfusion.py    From hfusion with MIT License 6 votes vote down vote up
def calc_test_result(result, test_label, test_mask):

  true_label=[]
  predicted_label=[]

  for i in range(result.shape[0]):
    for j in range(result.shape[1]):
      if test_mask[i,j]==1:
        true_label.append(np.argmax(test_label[i,j] ))
        predicted_label.append(np.argmax(result[i,j] ))
    
  print("Confusion Matrix :")
  print(confusion_matrix(true_label, predicted_label))
  print("Classification Report :")
  print(classification_report(true_label, predicted_label,digits=4))
  print("Accuracy ", accuracy_score(true_label, predicted_label))
  print("Macro Classification Report :")
  print(precision_recall_fscore_support(true_label, predicted_label,average='macro'))
  print("Weighted Classification Report :")
  print(precision_recall_fscore_support(true_label, predicted_label,average='weighted'))
  #print "Normal Classification Report :"
  #print precision_recall_fscore_support(true_label, predicted_label) 
Example #14
Source File: plotting.py    From qb with MIT License 6 votes vote down vote up
def plot_confusion(title, true_labels, predicted_labels, normalized=True):
    labels = list(set(true_labels) | set(predicted_labels))

    if normalized:
        cm = confusion_matrix(true_labels, predicted_labels, labels=labels)
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
    else:
        cm = confusion_matrix(true_labels, predicted_labels, labels=labels)

    fig, ax = plt.subplots(figsize=(10, 10))
    ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
    ax.set_title(title)
    # plt.colorbar()
    tick_marks = np.arange(len(labels))
    ax.set_xticks(tick_marks)
    ax.set_xticklabels(labels, rotation=90)
    ax.set_yticks(tick_marks)
    ax.set_yticklabels(labels)
    ax.set_ylabel('True Label')
    ax.set_xlabel('Predicted Label')
    ax.grid(False)
    return fig, ax 
Example #15
Source File: train_eval.py    From Bert-Chinese-Text-Classification-Pytorch with MIT License 6 votes vote down vote up
def evaluate(config, model, data_iter, test=False):
    model.eval()
    loss_total = 0
    predict_all = np.array([], dtype=int)
    labels_all = np.array([], dtype=int)
    with torch.no_grad():
        for texts, labels in data_iter:
            outputs = model(texts)
            loss = F.cross_entropy(outputs, labels)
            loss_total += loss
            labels = labels.data.cpu().numpy()
            predic = torch.max(outputs.data, 1)[1].cpu().numpy()
            labels_all = np.append(labels_all, labels)
            predict_all = np.append(predict_all, predic)

    acc = metrics.accuracy_score(labels_all, predict_all)
    if test:
        report = metrics.classification_report(labels_all, predict_all, target_names=config.class_list, digits=4)
        confusion = metrics.confusion_matrix(labels_all, predict_all)
        return acc, loss_total / len(data_iter), report, confusion
    return acc, loss_total / len(data_iter) 
Example #16
Source File: test_classification.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_confusion_matrix_multiclass_subset_labels():
    # Test confusion matrix - multi-class case with subset of labels
    y_true, y_pred, _ = make_prediction(binary=False)

    # compute confusion matrix with only first two labels considered
    cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
    assert_array_equal(cm, [[19, 4],
                            [4, 3]])

    # compute confusion matrix with explicit label ordering for only subset
    # of labels
    cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
    assert_array_equal(cm, [[18, 2],
                            [24, 3]])

    # a label not in y_true should result in zeros for that row/column
    extra_label = np.max(y_true) + 1
    cm = confusion_matrix(y_true, y_pred, labels=[2, extra_label])
    assert_array_equal(cm, [[18, 0],
                            [0, 0]])

    # check for exception when none of the specified labels are in y_true
    assert_raises(ValueError, confusion_matrix, y_true, y_pred,
                  labels=[extra_label, extra_label + 1]) 
Example #17
Source File: model.py    From deepchem with MIT License 6 votes vote down vote up
def QuadWeightedKappa(y, y_pred):
  y_pred = np.argmax(y_pred, 1)
  cm = confusion_matrix(y, y_pred)
  classes_y, counts_y = np.unique(y, return_counts=True)
  classes_y_pred, counts_y_pred = np.unique(y_pred, return_counts=True)
  E = np.zeros((classes_y.shape[0], classes_y.shape[0]))
  for i, c1 in enumerate(classes_y):
    for j, c2 in enumerate(classes_y_pred):
      E[c1, c2] = counts_y[i] * counts_y_pred[j]
  E = E / np.sum(E) * np.sum(cm)
  w = np.zeros((classes_y.shape[0], classes_y.shape[0]))
  for i in range(classes_y.shape[0]):
    for j in range(classes_y.shape[0]):
      w[i, j] = float((i - j)**2) / (classes_y.shape[0] - 1)**2
  re = 1 - np.sum(w * cm) / np.sum(w * E)
  return re 
Example #18
Source File: common.py    From typhon with MIT License 6 votes vote down vote up
def _report_ice_cloud(self, output_dir, experiment, test, retrieved):
        # Confusion matrix:
        fig, ax = plt.subplots(figsize=(12, 10))
        cm = confusion_matrix(test.ice_cloud, retrieved.ice_cloud)
        img = self._plot_matrix(cm, classes=["Yes", "No"], normalize=True)
        fig.colorbar(img, label="probability")
        ax.set_title("Ice Cloud Classifier - Performance")
        ax.set_ylabel('real ice cloud')
        ax.set_xlabel('predicted ice cloud')
        fig.tight_layout()
        fig.savefig(join(output_dir, "ice-cloud-confusion-matrix.png"))

        fig, ax = plt.subplots(figsize=(12, 10))
        ax.barh(
            np.arange(len(self.ice_cloud.inputs)),
            self.ice_cloud.estimator.feature_importances_
        )
        ax.set_yticks(np.arange(len(self.ice_cloud.inputs)))
        ax.set_yticklabels(self.ice_cloud.inputs)
        ax.set_xlabel("Feature Importance")
        ax.set_ylabel("Feature")
        ax.set_title("Ice Cloud Classifier - Importance")
        fig.savefig(join(output_dir, "ice-cloud-feature-importance.png")) 
Example #19
Source File: evaluate.py    From rasa_wechat with Apache License 2.0 6 votes vote down vote up
def run_story_evaluation(story_file, policy_model_path, nlu_model_path,
                         out_file, max_stories):
    """Run the evaluation of the stories, plots the results."""
    from sklearn.metrics import confusion_matrix
    from sklearn.utils.multiclass import unique_labels

    test_y, preds = collect_story_predictions(story_file, policy_model_path,
                                              nlu_model_path, max_stories)

    log_evaluation_table(test_y, preds)
    cnf_matrix = confusion_matrix(test_y, preds)
    plot_confusion_matrix(cnf_matrix, classes=unique_labels(test_y, preds),
                          title='Action Confusion matrix')

    fig = plt.gcf()
    fig.set_size_inches(int(20), int(20))
    fig.savefig(out_file, bbox_inches='tight') 
Example #20
Source File: autoencoder.py    From DeepLearningSmells with Apache License 2.0 6 votes vote down vote up
def find_optimal(error_df):
    optimal_threshold = 1000
    max_f1 = 0
    max_pr = 0
    max_re = 0
    for threshold in range(1000, 400000, 5000):
        print("Threshold: " + str(threshold))
        y_pred = [1 if e > threshold else 0 for e in error_df.Reconstruction_error.values]
        conf_matrix = confusion_matrix(error_df.True_class, y_pred)
        precision, recall, f1 = compute_metrics(conf_matrix)
        if f1 > max_f1:
            max_f1 = f1
            optimal_threshold = threshold
            max_pr = precision
            max_re = recall
    return optimal_threshold, max_pr, max_re, max_f1 
Example #21
Source File: test_classification.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_confusion_matrix_binary():
    # Test confusion matrix - binary classification case
    y_true, y_pred, _ = make_prediction(binary=True)

    def test(y_true, y_pred):
        cm = confusion_matrix(y_true, y_pred)
        assert_array_equal(cm, [[22, 3], [8, 17]])

        tp, fp, fn, tn = cm.flatten()
        num = (tp * tn - fp * fn)
        den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))

        true_mcc = 0 if den == 0 else num / den
        mcc = matthews_corrcoef(y_true, y_pred)
        assert_array_almost_equal(mcc, true_mcc, decimal=2)
        assert_array_almost_equal(mcc, 0.57, decimal=2)

    test(y_true, y_pred)
    test([str(y) for y in y_true],
         [str(y) for y in y_pred]) 
Example #22
Source File: run.py    From fever-naacl-2018 with Apache License 2.0 6 votes vote down vote up
def print_evaluation(model,data,ls,log=None):
    features,actual = data
    predictions = predict(model, features, 500).data.numpy().reshape(-1).tolist()

    labels = [ls.idx[i] for i, _ in enumerate(ls.idx)]

    actual = [labels[i] for i in actual]
    predictions = [labels[i] for i in predictions]

    print(accuracy_score(actual, predictions))
    print(classification_report(actual, predictions))
    print(confusion_matrix(actual, predictions))

    data = zip(actual,predictions)
    if log is not None:
        f = open(log, "w+")
        for a,p in data:
            f.write(json.dumps({"actual": a, "predicted": p}) + "\n")
        f.close() 
Example #23
Source File: metrics.py    From TCFPN-ISBA with MIT License 6 votes vote down vote up
def macro_accuracy(P, Y, n_classes, bg_class=None, return_all=False, **kwargs):
    def macro_(P, Y, n_classes=None, bg_class=None, return_all=False):
        conf_matrix = sm.confusion_matrix(Y, P, labels=np.arange(n_classes))
        conf_matrix = conf_matrix / (conf_matrix.sum(0)[:, None] + 1e-5)
        conf_matrix = np.nan_to_num(conf_matrix)
        diag = conf_matrix.diagonal() * 100.

        # Remove background score
        if bg_class is not None:
            diag = np.array([diag[i] for i in range(n_classes) if i != bg_class])

        macro = diag.mean()
        if return_all:
            return macro, diag
        else:
            return macro

    if type(P) == list:
        out = [macro_(P[i], Y[i], n_classes=n_classes, bg_class=bg_class, return_all=return_all) for i in range(len(P))]
        if return_all:
            return (np.mean([o[0] for o in out]), np.mean([o[1] for o in out], 0))
        else:
            return np.mean(out)
    else:
        return macro_(P, Y, n_classes=n_classes, bg_class=bg_class, return_all=return_all) 
Example #24
Source File: loss.py    From sunets with MIT License 6 votes vote down vote up
def prediction_stat_confusion_matrix(logits, annotation, n_classes):
    labels = range(n_classes)

    # First we do argmax on gpu and then transfer it to cpu
    logits = logits.data
    annotation = annotation.data
    _, prediction = logits.max(1)
    prediction = prediction.squeeze(1)

    prediction_np = prediction.cpu().numpy().flatten()
    annotation_np = annotation.cpu().numpy().flatten()

    # Mask-out value is ignored by default in the sklearn
    # read sources to see how that was handled
    current_confusion_matrix = confusion_matrix(y_true=annotation_np,
                                                y_pred=prediction_np,
                                                labels=labels)

    return current_confusion_matrix 
Example #25
Source File: utils.py    From Very-deep-cnn-pytorch with MIT License 5 votes vote down vote up
def get_evaluation(y_true, y_prob, list_metrics):
    y_pred = np.argmax(y_prob, -1)
    output = {}
    if 'accuracy' in list_metrics:
        output['accuracy'] = metrics.accuracy_score(y_true, y_pred)
    if 'loss' in list_metrics:
        try:
            output['loss'] = metrics.log_loss(y_true, y_prob)
        except ValueError:
            output['loss'] = -1
    if 'confusion_matrix' in list_metrics:
        output['confusion_matrix'] = str(metrics.confusion_matrix(y_true, y_pred))
    return output 
Example #26
Source File: models.py    From HSLN-Joint-Sentence-Classification with MIT License 5 votes vote down vote up
def run_evaluate(self, test, report=True):
        """Evaluates performance on test set

        Args:
            test: dataset that yields tuple of (sentences, tags)

        Returns:
            metrics: (dict) metrics["acc"] = 98.4, ...

        """
        accs = []
        labs = []
        labs_pred = []
        for words, labels in minibatches(test, self.config.batch_size):
            labels_pred, document_lengths = self.predict_batch(words)

            for lab, lab_pred, length in zip(labels, labels_pred,
                                             document_lengths):
                lab      = lab[:length]
                lab_pred = lab_pred[:length]
                accs    += [a==b for (a, b) in zip(lab, lab_pred)]

                labs.extend(lab)
                labs_pred.extend(lab_pred)

        labs = [self.idx_to_tag[lab].split('_')[0] for lab in labs]
        labs_pred = [self.idx_to_tag[lab_pred].split('_')[0] for lab_pred in labs_pred]
        _, _, macro_f1, _ = precision_recall_fscore_support(labs, labs_pred, average='macro')
        _, _, micro_f1, _ = precision_recall_fscore_support(labs, labs_pred, average='micro')
        _, _, weighted_f1, _ = precision_recall_fscore_support(labs, labs_pred, average='weighted')
        acc = np.mean(accs)

        if report == True:
            class_report = classification_report(labs, labs_pred, digits=4)
            print(class_report)
            confusion = confusion_matrix(labs, labs_pred)
            print(confusion)

        return {"acc": 100*acc, "macro-f1": 100*macro_f1, "micro-f1": 100*micro_f1, 
                "weighted-f1": 100*weighted_f1, "classification-report": class_report, 
                "confusion-matrix": confusion} 
Example #27
Source File: confusion_matrix.py    From LogClass with MIT License 5 votes vote down vote up
def report(y, pred):
    return confusion_matrix(y, pred) 
Example #28
Source File: helper.py    From practicalDataAnalysisCookbook with GNU General Public License v2.0 5 votes vote down vote up
def printModelSummary(actual, predicted):
    '''
        Method to print out model summaries
    '''
    print('Overall accuracy of the model is {0:.2f} percent'\
        .format(
            (actual == predicted).sum() / \
            len(actual) * 100))
    print('Classification report: \n', 
        mt.classification_report(actual, predicted))
    print('Confusion matrix: \n', 
        mt.confusion_matrix(actual, predicted))
    print('ROC: ', mt.roc_auc_score(actual, predicted)) 
Example #29
Source File: utils.py    From pretorched-x with MIT License 5 votes vote down vote up
def class_accuracy(prediction, label):
    cf = confusion_matrix(prediction, label)
    cls_cnt = cf.sum(axis=1)
    cls_hit = np.diag(cf)
    cls_acc = cls_hit / cls_cnt.astype(float)
    mean_cls_acc = cls_acc.mean()
    return cls_acc, mean_cls_acc 
Example #30
Source File: utils.py    From MCF-3D-CNN with MIT License 5 votes vote down vote up
def cnf_roc(y_true, y_pred, classes, isPlot, save_tag = ''):
    # 计算混淆矩阵
    y = np.zeros(len(y_true))
    y_ = np.zeros(len(y_true))    
    for i in range(len(y_true)): 
        y[i] = np.argmax(y_true[i,:])
        y_[i] = np.argmax(y_pred[i,:])
    cnf_mat = confusion_matrix(y, y_)
    print cnf_mat
    
    if isPlot:
        # # 绘制混淆矩阵
        plot_confusion_matrix(cnf_mat, range(classes), save_tag=save_tag)
        # # 绘制ROC曲线
        plot_roc_curve(y_true, y_pred, range(classes), save_tag)

    if classes > 2: 
        # 计算多分类评价值
        Sens = recall_score(y, y_, average='macro')
        Prec = precision_score(y, y_, average='macro')
        F1 = f1_score(y, y_, average='weighted') 
        Support = precision_recall_fscore_support(y, y_, beta=0.5, average=None)
        print Support
        return Sens, Prec, F1, cnf_mat
    else:
        Acc = 1.0*(cnf_mat[1][1]+cnf_mat[0][0])/len(y_true)
        Sens = 1.0*cnf_mat[1][1]/(cnf_mat[1][1]+cnf_mat[1][0])
        Spec = 1.0*cnf_mat[0][0]/(cnf_mat[0][0]+cnf_mat[0][1])
        # 计算AUC值
        Auc = roc_auc_score(y_true[:,1], y_pred[:,1])
        return Acc, Sens, Spec, Auc