Python statistics.harmonic_mean() Examples
The following are 8
code examples of statistics.harmonic_mean().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
statistics
, or try the search function
.
Example #1
Source File: QAAnalysis_dataframe.py From QUANTAXIS with MIT License | 5 votes |
def mean_harmonic(self): return statistics.harmonic_mean(self.price) # 众数
Example #2
Source File: base_datastruct.py From QUANTAXIS with MIT License | 5 votes |
def mean_harmonic(self): '返回DataStruct.price的调和平均数' res = self.price.groupby(level=1 ).apply(lambda x: statistics.harmonic_mean(x)) res.name = 'mean_harmonic' return res # 众数
Example #3
Source File: heavy_hitters_utils.py From federated with Apache License 2.0 | 5 votes |
def f1_score(ground_truth, signal, k): """Computes the f1 score for the top k words between frequency dicts. Args: ground_truth: The ground truth dict. signal: The obtained heavy hitters dict. k: The number of top items that are consider heavy hitters. Returns: F1 score of the signal in detecting a top k item. """ prec = precision(ground_truth, signal, k) rec = recall(ground_truth, signal, k) return statistics.harmonic_mean([prec, rec])
Example #4
Source File: statistic_functions.py From jhTAlib with GNU General Public License v3.0 | 5 votes |
def HARMONIC_MEAN(df, n, price='Close'): """ Harmonic mean of data Returns: list of floats = jhta.HARMONIC_MEAN(df, n, price='Close') """ harmonic_mean_list = [] if n == len(df[price]): start = None for i in range(len(df[price])): if df[price][i] != df[price][i]: harmonic_mean = float('NaN') else: if start is None: start = i end = i + 1 harmonic_mean = statistics.harmonic_mean(df[price][start:end]) harmonic_mean_list.append(harmonic_mean) else: for i in range(len(df[price])): if i + 1 < n: harmonic_mean = float('NaN') else: start = i + 1 - n end = i + 1 harmonic_mean = statistics.harmonic_mean(df[price][start:end]) harmonic_mean_list.append(harmonic_mean) return harmonic_mean_list
Example #5
Source File: msmarco_eval.py From sigir19-neural-ir with Apache License 2.0 | 5 votes |
def compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages,MaxMRRRank = 10): """Compute MRR metric Args: p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping Dict as read in with load_reference or load_reference_from_stream p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates Returns: dict: dictionary of metrics {'MRR': <MRR Score>} """ all_scores = {} MRR = 0 qids_with_relevant_passages = 0 ranking = [] for qid in qids_to_ranked_candidate_passages: if qid in qids_to_relevant_passageids: ranking.append(0) target_pid = qids_to_relevant_passageids[qid] candidate_pid = qids_to_ranked_candidate_passages[qid] for i in range(0,MaxMRRRank): if candidate_pid[i] in target_pid: MRR += 1/(i + 1) ranking.pop() ranking.append(i+1) break if len(ranking) == 0: raise IOError("No matching QIDs found. Are you sure you are scoring the evaluation set?") MRR = MRR/len(ranking) all_scores['MRR'] = MRR all_scores['QueriesRanked'] = len(ranking) all_scores['QueriesWithNoRelevant'] = sum((1 for x in ranking if x == 0)) all_scores['QueriesWithRelevant'] = sum((1 for x in ranking if x > 0)) all_scores['AverageRankGoldLabel@'+str(MaxMRRRank)] = statistics.mean((x for x in ranking if x > 0)) all_scores['MedianRankGoldLabel@'+str(MaxMRRRank)] = statistics.median((x for x in ranking if x > 0)) all_scores['AverageRankGoldLabel'] = statistics.mean(ranking) all_scores['MedianRankGoldLabel'] = statistics.median(ranking) all_scores['HarmonicMeanRankingGoldLabel'] = statistics.harmonic_mean(ranking) return all_scores
Example #6
Source File: stats.py From Turing with MIT License | 5 votes |
def harmonic_mean(args): if "harmonic_mean" not in dir(statistics): return builtins.len(args) / sum([1 / x for x in args]) return statistics.harmonic_mean(args)
Example #7
Source File: msmarco_eval.py From transformer-kernel-ranking with Apache License 2.0 | 5 votes |
def compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages,MaxMRRRank = 10): """Compute MRR metric Args: p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping Dict as read in with load_reference or load_reference_from_stream p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates Returns: dict: dictionary of metrics {'MRR': <MRR Score>} """ all_scores = {} MRR = 0 qids_with_relevant_passages = 0 ranking = [] for qid in qids_to_ranked_candidate_passages: if qid in qids_to_relevant_passageids: ranking.append(0) target_pid = qids_to_relevant_passageids[qid] candidate_pid = qids_to_ranked_candidate_passages[qid] for i in range(0,MaxMRRRank): if candidate_pid[i] in target_pid: MRR += 1/(i + 1) ranking.pop() ranking.append(i+1) break if len(ranking) == 0: raise IOError("No matching QIDs found. Are you sure you are scoring the evaluation set?") MRR = MRR/len(ranking) all_scores['MRR'] = MRR all_scores['QueriesRanked'] = len(ranking) all_scores['QueriesWithNoRelevant'] = sum((1 for x in ranking if x == 0)) all_scores['QueriesWithRelevant'] = sum((1 for x in ranking if x > 0)) all_scores['AverageRankGoldLabel@'+str(MaxMRRRank)] = statistics.mean((x for x in ranking if x > 0)) all_scores['MedianRankGoldLabel@'+str(MaxMRRRank)] = statistics.median((x for x in ranking if x > 0)) all_scores['AverageRankGoldLabel'] = statistics.mean(ranking) all_scores['MedianRankGoldLabel'] = statistics.median(ranking) all_scores['HarmonicMeanRankingGoldLabel'] = statistics.harmonic_mean(ranking) return all_scores
Example #8
Source File: __init__.py From tbot with GNU General Public License v3.0 | 4 votes |
def time_testcase_statistics( testcase: typing.Callable, *args: typing.Any, runs: int = 10, sleep: float = 0, **kwargs: typing.Any, ) -> None: """ Take multiple measurements about the run-time of a testcase and return/display statistics. :param testcase: Testcase to call. :param args,\\ kwargs: Arguments to pass to the testcase. :param int runs: How many samples to take. :param float sleep: How much time to sleep in between the runs. Example use: Maybe the board does not discharge quick enough so it can cause troubles when the subsecuent testcase run tries to boot again the board """ elapsed_times = [] for n in range(runs): elapsed_time, _ = time_testcase(testcase, *args, **kwargs) elapsed_times.append(elapsed_time) time.sleep(sleep) results = TimingResults( statistics.mean(elapsed_times), statistics.harmonic_mean(elapsed_times), statistics.median(elapsed_times), statistics.pvariance(elapsed_times), statistics.pstdev(elapsed_times), ) tbot.log.message( f"""\ Timing Results: {tbot.log.c('mean').green}: {results.mean} {tbot.log.c('harmonic mean').green}: {results.harmonic_mean} {tbot.log.c('median').green}: {results.median} {tbot.log.c('variance').green}: {results.pvariance} {tbot.log.c('standard deviation').green}: {results.pstdev} """ )