Python sklearn.metrics.fbeta_score() Examples
The following are 30
code examples of sklearn.metrics.fbeta_score().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sklearn.metrics
, or try the search function
.
Example #1
Source File: test_classification.py From twitter-stock-recommendation with MIT License | 6 votes |
def test_fscore_warnings(): clean_warning_registry() with warnings.catch_warnings(record=True) as record: warnings.simplefilter('always') for score in [f1_score, partial(fbeta_score, beta=2)]: score(np.array([[1, 1], [1, 1]]), np.array([[0, 0], [0, 0]]), average='micro') assert_equal(str(record.pop().message), 'F-score is ill-defined and ' 'being set to 0.0 due to no predicted samples.') score(np.array([[0, 0], [0, 0]]), np.array([[1, 1], [1, 1]]), average='micro') assert_equal(str(record.pop().message), 'F-score is ill-defined and ' 'being set to 0.0 due to no true samples.')
Example #2
Source File: test_classification.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_precision_recall_f1_no_labels(beta, average): y_true = np.zeros((20, 3)) y_pred = np.zeros_like(y_true) p, r, f, s = assert_warns(UndefinedMetricWarning, precision_recall_fscore_support, y_true, y_pred, average=average, beta=beta) assert_almost_equal(p, 0) assert_almost_equal(r, 0) assert_almost_equal(f, 0) assert_equal(s, None) fbeta = assert_warns(UndefinedMetricWarning, fbeta_score, y_true, y_pred, beta=beta, average=average) assert_almost_equal(fbeta, 0)
Example #3
Source File: test_classification.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_precision_recall_f1_no_labels_average_none(): y_true = np.zeros((20, 3)) y_pred = np.zeros_like(y_true) beta = 1 # tp = [0, 0, 0] # fn = [0, 0, 0] # fp = [0, 0, 0] # support = [0, 0, 0] # |y_hat_i inter y_i | = [0, 0, 0] # |y_i| = [0, 0, 0] # |y_hat_i| = [0, 0, 0] p, r, f, s = assert_warns(UndefinedMetricWarning, precision_recall_fscore_support, y_true, y_pred, average=None, beta=beta) assert_array_almost_equal(p, [0, 0, 0], 2) assert_array_almost_equal(r, [0, 0, 0], 2) assert_array_almost_equal(f, [0, 0, 0], 2) assert_array_almost_equal(s, [0, 0, 0], 2) fbeta = assert_warns(UndefinedMetricWarning, fbeta_score, y_true, y_pred, beta=beta, average=None) assert_array_almost_equal(fbeta, [0, 0, 0], 2)
Example #4
Source File: test_classification.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_prf_average_binary_data_non_binary(): # Error if user does not explicitly set non-binary average mode y_true_mc = [1, 2, 3, 3] y_pred_mc = [1, 2, 3, 1] msg_mc = ("Target is multiclass but average='binary'. Please " "choose another average setting, one of [" "None, 'micro', 'macro', 'weighted'].") y_true_ind = np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]]) y_pred_ind = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) msg_ind = ("Target is multilabel-indicator but average='binary'. Please " "choose another average setting, one of [" "None, 'micro', 'macro', 'weighted', 'samples'].") for y_true, y_pred, msg in [ (y_true_mc, y_pred_mc, msg_mc), (y_true_ind, y_pred_ind, msg_ind), ]: for metric in [precision_score, recall_score, f1_score, partial(fbeta_score, beta=2)]: assert_raise_message(ValueError, msg, metric, y_true, y_pred)
Example #5
Source File: f4_score.py From driverlessai-recipes with Apache License 2.0 | 6 votes |
def score(self, actual: np.array, predicted: np.array, sample_weight: typing.Optional[np.array] = None, labels: typing.Optional[np.array] = None, **kwargs) -> float: lb = LabelEncoder() labels = lb.fit_transform(labels) actual = lb.transform(actual) method = "binary" if len(labels) > 2: predicted = np.argmax(predicted, axis=1) method = "micro" else: predicted = (predicted > self._threshold) f4_score = fbeta_score(actual, predicted, labels=labels, average=method, sample_weight=sample_weight, beta=4) return f4_score
Example #6
Source File: f3_score.py From driverlessai-recipes with Apache License 2.0 | 6 votes |
def score(self, actual: np.array, predicted: np.array, sample_weight: typing.Optional[np.array] = None, labels: typing.Optional[np.array] = None, **kwargs) -> float: lb = LabelEncoder() labels = lb.fit_transform(labels) actual = lb.transform(actual) method = "binary" if len(labels) > 2: predicted = np.argmax(predicted, axis=1) method = "micro" else: predicted = (predicted > self._threshold) f3_score = fbeta_score(actual, predicted, labels=labels, average=method, sample_weight=sample_weight, beta=3) return f3_score
Example #7
Source File: test_fbeta.py From tf_metrics with Apache License 2.0 | 6 votes |
def test_fbeta_op(generator_fn, y_true_all, y_pred_all, pos_indices, average, beta): # Precision on the whole dataset pr_sk = fbeta_score( y_true_all, y_pred_all, beta, pos_indices, average=average) # Create Tensorflow graph ds = tf.data.Dataset.from_generator( generator_fn, (tf.int32, tf.int32), ([None], [None])) y_true, y_pred = ds.make_one_shot_iterator().get_next() pr_tf = tf_metrics.fbeta(y_true, y_pred, 4, pos_indices, average=average, beta=beta) with tf.Session() as sess: # Initialize and run the update op on each batch sess.run(tf.local_variables_initializer()) while True: try: sess.run(pr_tf[1]) except OutOfRangeError as e: break # Check final value assert np.allclose(sess.run(pr_tf[0]), pr_sk)
Example #8
Source File: find_best_threshold.py From KagglePlanetPytorch with MIT License | 6 votes |
def optimise_f2_thresholds(y, p, verbose=True, resolution=100): def mf(x): p2 = np.zeros_like(p) for i in range(17): p2[:, i] = (p[:, i] > x[i]).astype(np.int) score = fbeta_score(y, p2, beta=2, average='samples') return score x = [0.2] * 17 for i in range(17): best_i2 = 0 best_score = 0 for i2 in range(resolution): i2 /= resolution x[i] = i2 score = mf(x) if score > best_score: best_i2 = i2 best_score = score x[i] = best_i2 if verbose: print(i, best_i2, best_score) return x
Example #9
Source File: app.py From edge_detection_framework with MIT License | 5 votes |
def f2_score(y_pred, y_true, average='samples'): # fbeta_score throws a confusing error if inputs are not numpy arrays y_true, y_pred, = np.array(y_true), np.array(y_pred) # We need to use average='samples' here, any other average method will generate bogus results return fbeta_score(y_true, y_pred, beta=2, average=average)
Example #10
Source File: custom_scores_HO.py From Auto_ViML with Apache License 2.0 | 5 votes |
def f2_measure(y_true, y_pred): return fbeta_score(y_true, y_pred, beta=2) ### keep all Regression Scorers greater_is_better True since it leaves them as is and minimizes them
Example #11
Source File: test_metrics.py From pandas-ml with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_make_scorer(self): df = pdml.ModelFrame([]) result = df.metrics.make_scorer(metrics.fbeta_score, beta=2) expected = metrics.make_scorer(metrics.fbeta_score, beta=2) self.assertEqual(result._kwargs, expected._kwargs) self.assertEqual(result._sign, expected._sign) self.assertEqual(result._score_func, expected._score_func)
Example #12
Source File: test_metrics.py From pandas-ml with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_make_scorer(self): df = pdml.ModelFrame([]) result = df.metrics.make_scorer(metrics.fbeta_score, beta=2) expected = metrics.make_scorer(metrics.fbeta_score, beta=2) self.assertEqual(result._kwargs, expected._kwargs) self.assertEqual(result._sign, expected._sign) self.assertEqual(result._score_func, expected._score_func)
Example #13
Source File: test_metrics.py From pandas-ml with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_fbeta_score_averaging(self, average): result = self.df.metrics.fbeta_score(beta=0.5, average=average) expected = metrics.fbeta_score(self.target, self.pred, beta=0.5, average=average) self.assertEqual(result, expected)
Example #14
Source File: test_metrics.py From pandas-ml with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_fbeta_score(self): result = self.df.metrics.fbeta_score(beta=0.5, average=None) expected = metrics.fbeta_score(self.target, self.pred, beta=0.5, average=None) self.assertTrue(isinstance(result, pdml.ModelSeries)) self.assert_numpy_array_almost_equal(result.values, expected)
Example #15
Source File: find_best_threshold.py From KagglePlanetPytorch with MIT License | 5 votes |
def fbeta(true_label, prediction): return fbeta_score(true_label, prediction, beta=2, average='samples')
Example #16
Source File: emotion_recognition.py From emotion-recognition-using-speech with MIT License | 5 votes |
def train_fbeta_score(self, beta): y_pred = self.model.predict(self.X_train) return fbeta_score(self.y_train, y_pred, beta, average='micro')
Example #17
Source File: emotion_recognition.py From emotion-recognition-using-speech with MIT License | 5 votes |
def test_fbeta_score(self, beta): y_pred = self.model.predict(self.X_test) return fbeta_score(self.y_test, y_pred, beta, average='micro')
Example #18
Source File: test_classification.py From twitter-stock-recommendation with MIT License | 5 votes |
def test_precision_recall_f1_score_binary(): # Test Precision Recall and F1 Score for binary classification task y_true, y_pred, _ = make_prediction(binary=True) # detailed measures for each class p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None) assert_array_almost_equal(p, [0.73, 0.85], 2) assert_array_almost_equal(r, [0.88, 0.68], 2) assert_array_almost_equal(f, [0.80, 0.76], 2) assert_array_equal(s, [25, 25]) # individual scoring function that can be used for grid search: in the # binary class case the score is the value of the measure for the positive # class (e.g. label == 1). This is deprecated for average != 'binary'. for kwargs, my_assert in [({}, assert_no_warnings), ({'average': 'binary'}, assert_no_warnings)]: ps = my_assert(precision_score, y_true, y_pred, **kwargs) assert_array_almost_equal(ps, 0.85, 2) rs = my_assert(recall_score, y_true, y_pred, **kwargs) assert_array_almost_equal(rs, 0.68, 2) fs = my_assert(f1_score, y_true, y_pred, **kwargs) assert_array_almost_equal(fs, 0.76, 2) assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2, **kwargs), (1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
Example #19
Source File: test_classification.py From twitter-stock-recommendation with MIT License | 5 votes |
def test_precision_recall_f1_no_labels(): y_true = np.zeros((20, 3)) y_pred = np.zeros_like(y_true) # tp = [0, 0, 0] # fn = [0, 0, 0] # fp = [0, 0, 0] # support = [0, 0, 0] # |y_hat_i inter y_i | = [0, 0, 0] # |y_i| = [0, 0, 0] # |y_hat_i| = [0, 0, 0] for beta in [1]: p, r, f, s = assert_warns(UndefinedMetricWarning, precision_recall_fscore_support, y_true, y_pred, average=None, beta=beta) assert_array_almost_equal(p, [0, 0, 0], 2) assert_array_almost_equal(r, [0, 0, 0], 2) assert_array_almost_equal(f, [0, 0, 0], 2) assert_array_almost_equal(s, [0, 0, 0], 2) fbeta = assert_warns(UndefinedMetricWarning, fbeta_score, y_true, y_pred, beta=beta, average=None) assert_array_almost_equal(fbeta, [0, 0, 0], 2) for average in ["macro", "micro", "weighted", "samples"]: p, r, f, s = assert_warns(UndefinedMetricWarning, precision_recall_fscore_support, y_true, y_pred, average=average, beta=beta) assert_almost_equal(p, 0) assert_almost_equal(r, 0) assert_almost_equal(f, 0) assert_equal(s, None) fbeta = assert_warns(UndefinedMetricWarning, fbeta_score, y_true, y_pred, beta=beta, average=average) assert_almost_equal(fbeta, 0)
Example #20
Source File: keras.py From kaggle-tools with MIT License | 5 votes |
def _score_per_threshold(self, predictions, targets, threshold): """ Compute the Fbeta score per threshold. """ # Notice that here I am using the sklearn fbeta_score function. # You can read more about it here: # http://scikit-learn.org/stable/modules/generated/sklearn.metrics.fbeta_score.html thresholded_predictions = (predictions > threshold).astype(int) return fbeta_score(targets, thresholded_predictions, beta=self.beta)
Example #21
Source File: validate.py From pytorch-planet-amazon with Apache License 2.0 | 5 votes |
def f2_score(output, target, threshold): output = (output > threshold) return fbeta_score(target, output, beta=2, average='samples')
Example #22
Source File: metrics_utils.py From ludwig with Apache License 2.0 | 5 votes |
def f1_score(self, idx): return self.fbeta_score(1, idx)
Example #23
Source File: metrics_utils.py From ludwig with Apache License 2.0 | 5 votes |
def avg_fbeta_score(self, beta, average='macro'): return metrics.fbeta_score(self.conditions, self.predictions, beta=beta, average=average)
Example #24
Source File: test_classification.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_precision_recall_f1_score_binary(): # Test Precision Recall and F1 Score for binary classification task y_true, y_pred, _ = make_prediction(binary=True) # detailed measures for each class p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None) assert_array_almost_equal(p, [0.73, 0.85], 2) assert_array_almost_equal(r, [0.88, 0.68], 2) assert_array_almost_equal(f, [0.80, 0.76], 2) assert_array_equal(s, [25, 25]) # individual scoring function that can be used for grid search: in the # binary class case the score is the value of the measure for the positive # class (e.g. label == 1). This is deprecated for average != 'binary'. for kwargs, my_assert in [({}, assert_no_warnings), ({'average': 'binary'}, assert_no_warnings)]: ps = my_assert(precision_score, y_true, y_pred, **kwargs) assert_array_almost_equal(ps, 0.85, 2) rs = my_assert(recall_score, y_true, y_pred, **kwargs) assert_array_almost_equal(rs, 0.68, 2) fs = my_assert(f1_score, y_true, y_pred, **kwargs) assert_array_almost_equal(fs, 0.76, 2) assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2, **kwargs), (1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
Example #25
Source File: test_fbeta.py From tf_metrics with Apache License 2.0 | 5 votes |
def test_fbeta(generator_fn, pos_indices, average, beta): for y_true, y_pred in generator_fn(): pr_tf = tf_metrics.fbeta( y_true, y_pred, 4, pos_indices, average=average, beta=beta) pr_sk = fbeta_score( y_true, y_pred, beta, pos_indices, average=average) with tf.Session() as sess: sess.run(tf.local_variables_initializer()) assert np.allclose(sess.run(pr_tf[1]), pr_sk)
Example #26
Source File: evaluator.py From DeepADoTS with MIT License | 5 votes |
def get_accuracy_precision_recall_fscore(y_true: list, y_pred: list): accuracy = accuracy_score(y_true, y_pred) # warn_for=() avoids log warnings for any result being zero precision, recall, f_score, _ = prf(y_true, y_pred, average='binary', warn_for=()) if precision == 0 and recall == 0: f01_score = 0 else: f01_score = fbeta_score(y_true, y_pred, average='binary', beta=0.1) return accuracy, precision, recall, f_score, f01_score
Example #27
Source File: validate.py From pytorch-planet-amazon with Apache License 2.0 | 5 votes |
def optimise_f2_thresholds(y, p, verbose=True, resolution=100): """ Find optimal threshold values for f2 score. Thanks Anokas https://www.kaggle.com/c/planet-understanding-the-amazon-from-space/discussion/32475 """ size = y.shape[1] def mf(x): p2 = np.zeros_like(p) for i in range(size): p2[:, i] = (p[:, i] > x[i]).astype(np.int) score = fbeta_score(y, p2, beta=2, average='samples') return score x = [0.2] * size for i in range(size): best_i2 = 0 best_score = 0 for i2 in range(resolution): i2 /= resolution x[i] = i2 score = mf(x) if score > best_score: best_i2 = i2 best_score = score x[i] = best_i2 if verbose: print(i, best_i2, best_score) return x, best_score
Example #28
Source File: train.py From pytorch-planet-amazon with Apache License 2.0 | 5 votes |
def f2_score(output, target, threshold): output = (output > threshold) return fbeta_score(target, output, beta=2, average='samples')
Example #29
Source File: train.py From pytorch-planet-amazon with Apache License 2.0 | 5 votes |
def optimise_f2_thresholds(y, p, verbose=True, resolution=100): """ Find optimal threshold values for f2 score. Thanks Anokas https://www.kaggle.com/c/planet-understanding-the-amazon-from-space/discussion/32475 """ size = y.shape[1] def mf(x): p2 = np.zeros_like(p) for i in range(size): p2[:, i] = (p[:, i] > x[i]).astype(np.int) score = fbeta_score(y, p2, beta=2, average='samples') return score x = [0.2] * size for i in range(size): best_i2 = 0 best_score = 0 for i2 in range(resolution): i2 /= resolution x[i] = i2 score = mf(x) if score > best_score: best_i2 = i2 best_score = score x[i] = best_i2 if verbose: print(i, best_i2, best_score) return x, best_score
Example #30
Source File: custom_scores.py From Auto_ViML with Apache License 2.0 | 5 votes |
def f2_measure(y_true, y_pred): return fbeta_score(y_true, y_pred, beta=2)