Python sklearn.metrics.multilabel_confusion_matrix() Examples
The following are 7
code examples of sklearn.metrics.multilabel_confusion_matrix().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sklearn.metrics
, or try the search function
.
Example #1
Source File: _sklearn.py From qlik-py-tools with MIT License | 6 votes |
def _prep_confusion_matrix(self, y_test, y_pred, labels): """ Calculate a confusion matrix and add it to the model as a data frame suitable for Qlik """ # Calculate confusion matrix and flatten it to a simple array if len(y_test.shape) == 1: confusion_array = metrics.confusion_matrix(y_test, y_pred).ravel() # Structure into a DataFrame suitable for Qlik result = [] i = 0 for t in labels: for p in labels: result.append([str(t), str(p), confusion_array[i]]) i = i + 1 self.model.confusion_matrix = pd.DataFrame(result, columns=["true_label", "pred_label", "count"]) self.model.confusion_matrix.insert(0, "model_name", self.model.name) # Handle confusion matrix format for multi-label classification else: confusion_array = metrics.multilabel_confusion_matrix(y_test, y_pred) result = pd.DataFrame(confusion_array.reshape(-1, 4), columns=["true_negative", "false_positive", "false_negative", "true_positive"]) self.model.confusion_matrix = pd.DataFrame(np.arange(len(confusion_array)), columns=["step"]) self.model.confusion_matrix = pd.concat([self.model.confusion_matrix, result], axis=1) self.model.confusion_matrix.insert(0, "model_name", self.model.name)
Example #2
Source File: metrics.py From AIF360 with Apache License 2.0 | 6 votes |
def specificity_score(y_true, y_pred, pos_label=1, sample_weight=None): """Compute the specificity or true negative rate. Args: y_true (array-like): Ground truth (correct) target values. y_pred (array-like): Estimated targets as returned by a classifier. pos_label (scalar, optional): The label of the positive class. sample_weight (array-like, optional): Sample weights. """ MCM = multilabel_confusion_matrix(y_true, y_pred, labels=[pos_label], sample_weight=sample_weight) tn, fp, fn, tp = MCM.ravel() negs = tn + fp if negs == 0: warnings.warn('specificity_score is ill-defined and being set to 0.0 ' 'due to no negative samples.', UndefinedMetricWarning) return 0. return tn / negs
Example #3
Source File: test_classification.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_multilabel_confusion_matrix_binary(): # Test multilabel confusion matrix - binary classification case y_true, y_pred, _ = make_prediction(binary=True) def test(y_true, y_pred): cm = multilabel_confusion_matrix(y_true, y_pred) assert_array_equal(cm, [[[17, 8], [3, 22]], [[22, 3], [8, 17]]]) test(y_true, y_pred) test([str(y) for y in y_true], [str(y) for y in y_pred])
Example #4
Source File: test_classification.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_multilabel_confusion_matrix_multiclass(): # Test multilabel confusion matrix - multi-class case y_true, y_pred, _ = make_prediction(binary=False) def test(y_true, y_pred, string_type=False): # compute confusion matrix with default labels introspection cm = multilabel_confusion_matrix(y_true, y_pred) assert_array_equal(cm, [[[47, 4], [5, 19]], [[38, 6], [28, 3]], [[30, 25], [2, 18]]]) # compute confusion matrix with explicit label ordering labels = ['0', '2', '1'] if string_type else [0, 2, 1] cm = multilabel_confusion_matrix(y_true, y_pred, labels=labels) assert_array_equal(cm, [[[47, 4], [5, 19]], [[30, 25], [2, 18]], [[38, 6], [28, 3]]]) # compute confusion matrix with super set of present labels labels = ['0', '2', '1', '3'] if string_type else [0, 2, 1, 3] cm = multilabel_confusion_matrix(y_true, y_pred, labels=labels) assert_array_equal(cm, [[[47, 4], [5, 19]], [[30, 25], [2, 18]], [[38, 6], [28, 3]], [[75, 0], [0, 0]]]) test(y_true, y_pred) test(list(str(y) for y in y_true), list(str(y) for y in y_pred), string_type=True)
Example #5
Source File: test_classification.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_multilabel_confusion_matrix_errors(): y_true = np.array([[1, 0, 1], [0, 1, 0], [1, 1, 0]]) y_pred = np.array([[1, 0, 0], [0, 1, 1], [0, 0, 1]]) # Bad sample_weight assert_raise_message(ValueError, "inconsistent numbers of samples", multilabel_confusion_matrix, y_true, y_pred, sample_weight=[1, 2]) assert_raise_message(ValueError, "bad input shape", multilabel_confusion_matrix, y_true, y_pred, sample_weight=[[1, 2, 3], [2, 3, 4], [3, 4, 5]]) # Bad labels assert_raise_message(ValueError, "All labels must be in [0, n labels)", multilabel_confusion_matrix, y_true, y_pred, labels=[-1]) assert_raise_message(ValueError, "All labels must be in [0, n labels)", multilabel_confusion_matrix, y_true, y_pred, labels=[3]) # Using samplewise outside multilabel assert_raise_message(ValueError, "Samplewise metrics", multilabel_confusion_matrix, [0, 1, 2], [1, 2, 0], samplewise=True) # Bad y_type assert_raise_message(ValueError, "multiclass-multioutput is not supported", multilabel_confusion_matrix, [[0, 1, 2], [2, 1, 0]], [[1, 2, 0], [1, 0, 2]])
Example #6
Source File: util.py From pyss3 with MIT License | 5 votes |
def multilabel_confusion_matrix(*args): """Dummy version of multilabel_confusion_matrix.""" return np.array([])
Example #7
Source File: test_classification.py From Mastering-Elasticsearch-7.0 with MIT License | 4 votes |
def test_multilabel_confusion_matrix_multilabel(): # Test multilabel confusion matrix - multilabel-indicator case from scipy.sparse import csc_matrix, csr_matrix y_true = np.array([[1, 0, 1], [0, 1, 0], [1, 1, 0]]) y_pred = np.array([[1, 0, 0], [0, 1, 1], [0, 0, 1]]) y_true_csr = csr_matrix(y_true) y_pred_csr = csr_matrix(y_pred) y_true_csc = csc_matrix(y_true) y_pred_csc = csc_matrix(y_pred) # cross test different types sample_weight = np.array([2, 1, 3]) real_cm = [[[1, 0], [1, 1]], [[1, 0], [1, 1]], [[0, 2], [1, 0]]] trues = [y_true, y_true_csr, y_true_csc] preds = [y_pred, y_pred_csr, y_pred_csc] for y_true_tmp in trues: for y_pred_tmp in preds: cm = multilabel_confusion_matrix(y_true_tmp, y_pred_tmp) assert_array_equal(cm, real_cm) # test support for samplewise cm = multilabel_confusion_matrix(y_true, y_pred, samplewise=True) assert_array_equal(cm, [[[1, 0], [1, 1]], [[1, 1], [0, 1]], [[0, 1], [2, 0]]]) # test support for labels cm = multilabel_confusion_matrix(y_true, y_pred, labels=[2, 0]) assert_array_equal(cm, [[[0, 2], [1, 0]], [[1, 0], [1, 1]]]) # test support for labels with samplewise cm = multilabel_confusion_matrix(y_true, y_pred, labels=[2, 0], samplewise=True) assert_array_equal(cm, [[[0, 0], [1, 1]], [[1, 1], [0, 0]], [[0, 1], [1, 0]]]) # test support for sample_weight with sample_wise cm = multilabel_confusion_matrix(y_true, y_pred, sample_weight=sample_weight, samplewise=True) assert_array_equal(cm, [[[2, 0], [2, 2]], [[1, 1], [0, 1]], [[0, 3], [6, 0]]])