Python sklearn.metrics.label_ranking_average_precision_score() Examples
The following are 9
code examples of sklearn.metrics.label_ranking_average_precision_score().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sklearn.metrics
, or try the search function
.
Example #1
Source File: test_ranking.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def check_alternative_lrap_implementation(lrap_score, n_classes=5, n_samples=20, random_state=0): _, y_true = make_multilabel_classification(n_features=1, allow_unlabeled=False, random_state=random_state, n_classes=n_classes, n_samples=n_samples) # Score with ties y_score = sparse_random_matrix(n_components=y_true.shape[0], n_features=y_true.shape[1], random_state=random_state) if hasattr(y_score, "toarray"): y_score = y_score.toarray() score_lrap = label_ranking_average_precision_score(y_true, y_score) score_my_lrap = _my_lrap(y_true, y_score) assert_almost_equal(score_lrap, score_my_lrap) # Uniform score random_state = check_random_state(random_state) y_score = random_state.uniform(size=(n_samples, n_classes)) score_lrap = label_ranking_average_precision_score(y_true, y_score) score_my_lrap = _my_lrap(y_true, y_score) assert_almost_equal(score_lrap, score_my_lrap)
Example #2
Source File: test_ranking.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_lrap_sample_weighting_zero_labels(): # Degenerate sample labeling (e.g., zero labels for a sample) is a valid # special case for lrap (the sample is considered to achieve perfect # precision), but this case is not tested in test_common. # For these test samples, the APs are 0.5, 0.75, and 1.0 (default for zero # labels). y_true = np.array([[1, 0, 0, 0], [1, 0, 0, 1], [0, 0, 0, 0]], dtype=np.bool) y_score = np.array([[0.3, 0.4, 0.2, 0.1], [0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]]) samplewise_lraps = np.array([0.5, 0.75, 1.0]) sample_weight = np.array([1.0, 1.0, 0.0]) assert_almost_equal( label_ranking_average_precision_score(y_true, y_score, sample_weight=sample_weight), np.sum(sample_weight * samplewise_lraps) / np.sum(sample_weight))
Example #3
Source File: test_ranking.py From twitter-stock-recommendation with MIT License | 6 votes |
def check_alternative_lrap_implementation(lrap_score, n_classes=5, n_samples=20, random_state=0): _, y_true = make_multilabel_classification(n_features=1, allow_unlabeled=False, random_state=random_state, n_classes=n_classes, n_samples=n_samples) # Score with ties y_score = sparse_random_matrix(n_components=y_true.shape[0], n_features=y_true.shape[1], random_state=random_state) if hasattr(y_score, "toarray"): y_score = y_score.toarray() score_lrap = label_ranking_average_precision_score(y_true, y_score) score_my_lrap = _my_lrap(y_true, y_score) assert_almost_equal(score_lrap, score_my_lrap) # Uniform score random_state = check_random_state(random_state) y_score = random_state.uniform(size=(n_samples, n_classes)) score_lrap = label_ranking_average_precision_score(y_true, y_score) score_my_lrap = _my_lrap(y_true, y_score) assert_almost_equal(score_lrap, score_my_lrap)
Example #4
Source File: utils.py From freesound-classification with Apache License 2.0 | 5 votes |
def lwlrap(truth, scores): """Calculate the overall lwlrap using sklearn.metrics.lrap.""" # sklearn doesn't correctly apply weighting to samples with no labels, so just skip them. sample_weight = np.sum(truth > 0, axis=1) nonzero_weight_sample_indices = np.flatnonzero(sample_weight > 0) overall_lwlrap = label_ranking_average_precision_score( truth[nonzero_weight_sample_indices, :] > 0, scores[nonzero_weight_sample_indices, :], sample_weight=sample_weight[nonzero_weight_sample_indices]) return overall_lwlrap
Example #5
Source File: test_ranking.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_lrap_error_raised(): check_lrap_error_raised(label_ranking_average_precision_score)
Example #6
Source File: test_ranking.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_alternative_lrap_implementation(n_samples, n_classes, random_state): check_alternative_lrap_implementation( label_ranking_average_precision_score, n_classes, n_samples, random_state)
Example #7
Source File: metrics.py From BirdCLEF-Baseline with MIT License | 5 votes |
def lrap(prediction, target): # Calculate the label ranking average precision (LRAP) for every sample return label_ranking_average_precision_score(target, prediction)
Example #8
Source File: test_ranking.py From twitter-stock-recommendation with MIT License | 5 votes |
def test_label_ranking_avp(): for fn in [label_ranking_average_precision_score, _my_lrap]: yield check_lrap_toy, fn yield check_lrap_without_tie_and_increasing_score, fn yield check_lrap_only_ties, fn yield check_zero_or_all_relevant_labels, fn yield check_lrap_error_raised, label_ranking_average_precision_score for n_samples, n_classes, random_state in product((1, 2, 8, 20), (2, 5, 10), range(1)): yield (check_alternative_lrap_implementation, label_ranking_average_precision_score, n_classes, n_samples, random_state)
Example #9
Source File: evaluate.py From bird-species-classification with MIT License | 4 votes |
def evaluate(experiment_path, meta_data=False, xml_dir="", train_dir="", submission_file=""): pickle_path = os.path.join(experiment_path, "predictions.pkl") with open(pickle_path, 'rb') as input: y_trues = pickle.load(input) y_scores = pickle.load(input) training_segments = pickle.load(input) if meta_data: elevation_scores = compute_elevation_scores(training_segments, xml_dir, train_dir) ## Combine the scores using Bayes Thm. normalize = np.array([np.sum(y_s * e_s) for y_s, e_s in zip(y_scores, elevation_scores)]) y_scores = y_scores * elevation_scores / normalize[:, None] if submission_file: write_to_submission_file(submission_file, y_scores, training_segments, train_dir) return map_score = mean_average_precision(y_trues, y_scores) auroc_score = area_under_roc_curve(y_trues, y_scores) # coverage error coverage_error = metrics.coverage_error(y_trues, y_scores) # label ranking average precision lrap = metrics.label_ranking_average_precision_score(y_trues, y_scores) # ranking loss ranking_loss = metrics.label_ranking_loss(y_trues, y_scores) print("") print("- Top 1:", top_n(y_trues, y_scores, 1)) print("- Top 2:", top_n(y_trues, y_scores, 2)) print("- Top 3:", top_n(y_trues, y_scores, 3)) print("- Top 4:", top_n(y_trues, y_scores, 4)) print("- Top 5:", top_n(y_trues, y_scores, 5)) print("") print("Mean Average Precision: ", map_score) print("Area Under ROC Curve: ", auroc_score) print("Coverage Error: ", coverage_error) print("Label Ranking Average Precision: ", lrap) print("Ranking Loss: ", ranking_loss) print("Total predictions: ", len(y_scores)) return { "map":map_score, "auroc":auroc_score, "coverage_error":coverage_error, "lrap":lrap, "ranking_loss": ranking_loss, "top_1":top_n(y_trues, y_scores, 1), "top_5":top_n(y_trues, y_scores, 5), }