Python eval.QGEvalCap() Examples
The following are 6
code examples of eval.QGEvalCap().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
eval
, or try the search function
.
Example #1
Source File: eval.py From unilm with MIT License | 4 votes |
def eval(out_file, src_file, tgt_file, isDIn=False, num_pairs=500): """ Given a filename, calculate the metric scores for that prediction file isDin: boolean value to check whether input file is DirectIn.txt """ pairs = [] with open(src_file, 'r') as infile: for line in infile: pair = {} pair['tokenized_sentence'] = line[:-1].strip().lower() pairs.append(pair) with open(tgt_file, "r") as infile: cnt = 0 for line in infile: pairs[cnt]['tokenized_question'] = line[:-1].strip() cnt += 1 output = [] with open(out_file, 'r') as infile: for line in infile: line = fix_tokenization(line[:-1].strip()).lower() output.append(line) for idx, pair in enumerate(pairs): pair['prediction'] = output[idx] # eval from eval import QGEvalCap import json from json import encoder encoder.FLOAT_REPR = lambda o: format(o, '.4f') res = defaultdict(lambda: []) gts = defaultdict(lambda: []) for pair in pairs[:]: key = pair['tokenized_sentence'] res[key] = [pair['prediction'].encode('utf-8')] # gts gts[key].append(pair['tokenized_question'].encode('utf-8')) QGEval = QGEvalCap(gts, res) return QGEval.evaluate()
Example #2
Source File: eval_on_unilm_tokenized_ref.py From unilm with MIT License | 4 votes |
def eval(out_file, src_file, tgt_file, isDIn=False, num_pairs=500): """ Given a filename, calculate the metric scores for that prediction file isDin: boolean value to check whether input file is DirectIn.txt """ pairs = [] with open(src_file, 'r') as infile: for line in infile: pair = {} pair['tokenized_sentence'] = line[:-1].strip().lower() pairs.append(pair) with open(tgt_file, "r") as infile: cnt = 0 for line in infile: pairs[cnt]['tokenized_question'] = " ".join( detokenize(line[:-1].strip().split())).lower() cnt += 1 output = [] with open(out_file, 'r') as infile: for line in infile: line = line[:-1].strip().lower() output.append(line) for idx, pair in enumerate(pairs): pair['prediction'] = output[idx] # eval from eval import QGEvalCap import json from json import encoder encoder.FLOAT_REPR = lambda o: format(o, '.4f') res = defaultdict(lambda: []) gts = defaultdict(lambda: []) for pair in pairs[:]: key = pair['tokenized_sentence'] res[key] = [pair['prediction'].encode('utf-8')] # gts gts[key].append(pair['tokenized_question'].encode('utf-8')) QGEval = QGEvalCap(gts, res) return QGEval.evaluate()
Example #3
Source File: eval.py From NQG_ASs2s with MIT License | 4 votes |
def eval(out_file, src_file, tgt_file, isDIn = False, num_pairs = 500): """ Given a filename, calculate the metric scores for that prediction file isDin: boolean value to check whether input file is DirectIn.txt """ pairs = [] with open(src_file, 'r') as infile: for line in infile: pair = {} pair['tokenized_sentence'] = line[:-1] pairs.append(pair) with open(tgt_file, "r") as infile: cnt = 0 for line in infile: pairs[cnt]['tokenized_question'] = line[:-1] cnt += 1 output = [] with open(out_file, 'r') as infile: for line in infile: line = line[:-1] output.append(line) for idx, pair in enumerate(pairs): pair['prediction'] = output[idx] ## eval from eval import QGEvalCap import json from json import encoder encoder.FLOAT_REPR = lambda o: format(o, '.4f') res = defaultdict(lambda: []) gts = defaultdict(lambda: []) for pair in pairs[:]: key = pair['tokenized_sentence'] res[key] = [pair['prediction'].encode('utf-8')] ## gts gts[key].append(pair['tokenized_question'].encode('utf-8')) QGEval = QGEvalCap(gts, res) return QGEval.evaluate()
Example #4
Source File: eval.py From RL-based-Graph2Seq-for-NQG with Apache License 2.0 | 4 votes |
def eval(out_file, src_file, tgt_file, isDIn = False, num_pairs = 500): """ Given a filename, calculate the metric scores for that prediction file isDin: boolean value to check whether input file is DirectIn.txt """ pairs = [] with open(src_file, 'r') as infile: for line in infile: pair = {} pair['tokenized_sentence'] = line[:-1] pairs.append(pair) with open(tgt_file, "r") as infile: cnt = 0 for line in infile: pairs[cnt]['tokenized_question'] = line[:-1] cnt += 1 output = [] with open(out_file, 'r') as infile: for line in infile: line = line[:-1] output.append(line) for idx, pair in enumerate(pairs): pair['prediction'] = output[idx] ## eval from eval import QGEvalCap import json from json import encoder encoder.FLOAT_REPR = lambda o: format(o, '.4f') res = defaultdict(lambda: []) gts = defaultdict(lambda: []) for pair in pairs[:]: key = pair['tokenized_sentence'] res[key] = [pair['prediction'].encode('utf-8')] ## gts gts[key].append(pair['tokenized_question'].encode('utf-8')) QGEval = QGEvalCap(gts, res) return QGEval.evaluate()
Example #5
Source File: eval.py From neural-question-generation with MIT License | 4 votes |
def eval(out_file, src_file, tgt_file, isDIn = False, num_pairs = 500): """ Given a filename, calculate the metric scores for that prediction file isDin: boolean value to check whether input file is DirectIn.txt """ pairs = [] with open(src_file, 'r') as infile: for line in infile: pair = {} pair['tokenized_sentence'] = line[:-1] pairs.append(pair) with open(tgt_file, "r") as infile: cnt = 0 for line in infile: pairs[cnt]['tokenized_question'] = line[:-1] cnt += 1 output = [] with open(out_file, 'r') as infile: for line in infile: line = line[:-1] output.append(line) for idx, pair in enumerate(pairs): pair['prediction'] = output[idx] ## eval from eval import QGEvalCap import json from json import encoder encoder.FLOAT_REPR = lambda o: format(o, '.4f') res = defaultdict(lambda: []) gts = defaultdict(lambda: []) for pair in pairs[:]: key = pair['tokenized_sentence'] res[key] = [pair['prediction'].encode('utf-8')] ## gts gts[key].append(pair['tokenized_question'].encode('utf-8')) QGEval = QGEvalCap(gts, res) return QGEval.evaluate()
Example #6
Source File: eval.py From QG-Net with MIT License | 4 votes |
def eval(out_file, src_file, tgt_file, isDIn = False, num_pairs = 500): """ Given a filename, calculate the metric scores for that prediction file isDin: boolean value to check whether input file is DirectIn.txt """ pairs = [] with open(src_file, 'r') as infile: for line in infile: pair = {} pair['tokenized_sentence'] = line[:-1] pairs.append(pair) with open(tgt_file, "r") as infile: cnt = 0 for line in infile: pairs[cnt]['tokenized_question'] = line[:-1] cnt += 1 output = [] with open(out_file, 'r') as infile: for line in infile: line = line[:-1] output.append(line) for idx, pair in enumerate(pairs): pair['prediction'] = output[idx] ## eval from eval import QGEvalCap import json from json import encoder encoder.FLOAT_REPR = lambda o: format(o, '.4f') res = defaultdict(lambda: []) gts = defaultdict(lambda: []) set_trace() for pair in pairs[:]: key = pair['tokenized_sentence'] res[key] = [pair['prediction'].encode('utf-8')] ## gts gts[key].append(pair['tokenized_question'].encode('utf-8')) set_trace() QGEval = QGEvalCap(gts, res) return QGEval.evaluate()