Python cider.cider.Cider() Examples
The following are 19
code examples of cider.cider.Cider().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cider.cider
, or try the search function
.
Example #1
Source File: eval.py From video-caption-openNMT.pytorch with MIT License | 6 votes |
def evaluate(self): # ================================================= # Set up scorers # ================================================= print 'setting up scorers...' scorers = [ (Cider(df=self.df), "CIDEr"), (CiderD(df=self.df), "CIDErD") ] # ================================================= # Compute scores # ================================================= metric_scores = {} for scorer, method in scorers: print 'computing %s score...' % (scorer.method()) score, scores = scorer.compute_score(self.gts, self.res) print "Mean %s score: %0.3f" % (method, score) metric_scores[method] = list(scores) return metric_scores
Example #2
Source File: eval.py From video-caption.pytorch with MIT License | 6 votes |
def evaluate(self): # ================================================= # Set up scorers # ================================================= print 'setting up scorers...' scorers = [ (Cider(df=self.df), "CIDEr"), (CiderD(df=self.df), "CIDErD") ] # ================================================= # Compute scores # ================================================= metric_scores = {} for scorer, method in scorers: print 'computing %s score...' % (scorer.method()) score, scores = scorer.compute_score(self.gts, self.res) print "Mean %s score: %0.3f" % (method, score) metric_scores[method] = list(scores) return metric_scores
Example #3
Source File: eval.py From NeuralBabyTalk with MIT License | 6 votes |
def evaluate(self): # ================================================= # Set up scorers # ================================================= print('setting up scorers...') scorers = [ (Cider(df=self.df), "CIDEr"), (CiderD(df=self.df), "CIDErD") ] # ================================================= # Compute scores # ================================================= metric_scores = {} for scorer, method in scorers: print('computing %s score...' % (scorer.method())) score, scores = scorer.compute_score(self.gts, self.res) print("Mean %s score: %0.3f" % (method, score)) metric_scores[method] = list(scores) return metric_scores
Example #4
Source File: eval.py From video-caption.pytorch with MIT License | 6 votes |
def evaluate(self): # ================================================= # Set up scorers # ================================================= print 'setting up scorers...' scorers = [ (Cider(df=self.df), "CIDEr"), (CiderD(df=self.df), "CIDErD") ] # ================================================= # Compute scores # ================================================= metric_scores = {} for scorer, method in scorers: print 'computing %s score...' % (scorer.method()) score, scores = scorer.compute_score(self.gts, self.res) print "Mean %s score: %0.3f" % (method, score) metric_scores[method] = list(scores) return metric_scores
Example #5
Source File: eval.py From unilm with MIT License | 6 votes |
def evaluate(self): output = [] scorers = [ (Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]), (Meteor(), "METEOR"), (Rouge(), "ROUGE_L"), # (Cider(), "CIDEr") ] # ================================================= # Compute scores # ================================================= for scorer, method in scorers: # print 'computing %s score...'%(scorer.method()) score, scores = scorer.compute_score(self.gts, self.res) if type(method) == list: for sc, scs, m in zip(score, scores, method): print("%s: %0.5f" % (m, sc)) output.append(sc) else: print("%s: %0.5f" % (method, score)) output.append(score) return output
Example #6
Source File: eval_on_unilm_tokenized_ref.py From unilm with MIT License | 6 votes |
def evaluate(self): output = [] scorers = [ (Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]), (Meteor(), "METEOR"), (Rouge(), "ROUGE_L"), # (Cider(), "CIDEr") ] # ================================================= # Compute scores # ================================================= for scorer, method in scorers: # print 'computing %s score...'%(scorer.method()) score, scores = scorer.compute_score(self.gts, self.res) if type(method) == list: for sc, scs, m in zip(score, scores, method): print("%s: %0.5f" % (m, sc)) output.append(sc) else: print("%s: %0.5f" % (method, score)) output.append(score) return output
Example #7
Source File: eval.py From NQG_ASs2s with MIT License | 6 votes |
def evaluate(self): output = [] scorers = [ (Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]), (Meteor(),"METEOR"), (Rouge(), "ROUGE_L"), # (Cider(), "CIDEr") ] # ================================================= # Compute scores # ================================================= for scorer, method in scorers: # print 'computing %s score...'%(scorer.method()) score, scores = scorer.compute_score(self.gts, self.res) if type(method) == list: for sc, scs, m in zip(score, scores, method): print "%s: %0.5f"%(m, sc) output.append(sc) else: print "%s: %0.5f"%(method, score) output.append(score) return output
Example #8
Source File: eval.py From neural-question-generation with MIT License | 6 votes |
def evaluate(self): output = [] scorers = [ (Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]), # (Meteor(),"METEOR"), # (Rouge(), "ROUGE_L"), # (Cider(), "CIDEr") ] # ================================================= # Compute scores # ================================================= for scorer, method in scorers: # print 'computing %s score...'%(scorer.method()) score, scores = scorer.compute_score(self.gts, self.res) if type(method) == list: for sc, scs, m in zip(score, scores, method): print "%s: %0.5f"%(m, sc) output.append(sc) else: print "%s: %0.5f"%(method, score) output.append(score) return output
Example #9
Source File: eval.py From Zeroshot-QuestionGeneration with MIT License | 5 votes |
def evaluate(self): # imgIds = self.coco.getImgIds() gts = dict(zip(range(0, len(self.predicted_list)), self.predicted_list)) res = dict(zip(range(0, len(self.label_list)), self.label_list)) # ================================================= # Set up scorers # ================================================= print 'tokenization...' tokenizer = PTBTokenizer() gts = tokenizer.tokenize(gts) res = tokenizer.tokenize(res) # ================================================= # Set up scorers # ================================================= print 'setting up scorers...' scorers = [ (Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]), (Meteor(),"METEOR"), (Rouge(), "ROUGE_L"), (Cider(), "CIDEr") ] # ================================================= # Compute scores # ================================================= for scorer, method in scorers: print 'computing %s score...'%(scorer.method()) score, scores = scorer.compute_score(gts, res) if type(method) == list: for sc, scs, m in zip(score, scores, method): self.setEval(sc, m) self.set_textid_to_eval(scs, gts.keys(), m) print "%s: %0.3f"%(m, sc) else: self.setEval(score, method) self.set_textid_to_eval(scores, gts.keys(), method) print "%s: %0.3f"%(method, score) self.set_eval()
Example #10
Source File: eval.py From QG-Net with MIT License | 5 votes |
def evaluate(self): output = [] scorers = [ (Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]), (Meteor(),"METEOR"), (Rouge(), "ROUGE_L"), # (Cider(), "CIDEr") ] # ================================================= # Compute scores # ================================================= for scorer, method in scorers: # print 'computing %s score...'%(scorer.method()) score, scores = scorer.compute_score(self.gts, self.res) # set_trace() if type(method) == list: for sc, scs, m in zip(score, scores, method): print "%s: %0.5f"%(m, sc) output.append(sc) else: print "%s: %0.5f"%(method, score) output.append(score) return output
Example #11
Source File: album_eval.py From AREL with MIT License | 5 votes |
def evaluate(self, album_to_Gts, album_to_Res): self.album_to_Res = album_to_Res self.album_to_Gts = album_to_Gts # ================================================= # Set up scorers # ================================================= print 'setting up scorers...' scorers = [] scorers = [ (Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]), (Meteor(), "METEOR"), (Rouge(), "ROUGE_L"), (Cider(), "CIDEr") # df='VIST/VIST-train-words' ] # ================================================= # Compute scores # ================================================= for scorer, method in scorers: print 'computing %s score ...' % (scorer.method()) score, scores = scorer.compute_score(self.album_to_Gts, self.album_to_Res) if type(method) == list: for sc, scs, m in zip(score, scores, method): self.setEval(sc, m) self.setAlbumToEval(scs, self.album_to_Gts.keys(), m) print '%s: %.3f' % (m, sc) else: self.setEval(score, method) self.setAlbumToEval(scores, self.album_to_Gts.keys(), method) print '%s: %.3f' % (method, score) self.setEvalAlbums()
Example #12
Source File: eval.py From deepQuest with BSD 3-Clause "New" or "Revised" License | 4 votes |
def evaluate(self): imgIds = self.params['image_id'] # imgIds = self.coco.getImgIds() gts = {} res = {} for imgId in imgIds: gts[imgId] = self.coco.imgToAnns[imgId] res[imgId] = self.cocoRes.imgToAnns[imgId] # ================================================= # Set up scorers # ================================================= print 'tokenization...' tokenizer = PTBTokenizer() gts = tokenizer.tokenize(gts) res = tokenizer.tokenize(res) # ================================================= # Set up scorers # ================================================= print 'setting up scorers...' scorers = [ (Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]), (Meteor(), "METEOR"), (Rouge(), "ROUGE_L"), (Cider(), "CIDEr") ] # ================================================= # Compute scores # ================================================= for scorer, method in scorers: print 'computing %s score...' % (scorer.method()) score, scores = scorer.compute_score(gts, res) if type(method) == list: for sc, scs, m in zip(score, scores, method): self.setEval(sc, m) self.setImgToEvalImgs(scs, gts.keys(), m) print "%s: %0.3f" % (m, sc) else: self.setEval(score, method) self.setImgToEvalImgs(scores, gts.keys(), method) print "%s: %0.3f" % (method, score) self.setEvalImgs()
Example #13
Source File: eval.py From DialoGPT with MIT License | 4 votes |
def evaluate(self): imgIds = self.params['image_id'] # imgIds = self.coco.getImgIds() gts = {} res = {} for imgId in imgIds: gts[imgId] = self.coco.imgToAnns[imgId] res[imgId] = self.cocoRes.imgToAnns[imgId] # ================================================= # Set up scorers # ================================================= print('tokenization...') tokenizer = PTBTokenizer() gts = tokenizer.tokenize(gts) res = tokenizer.tokenize(res) # ================================================= # Set up scorers # ================================================= print('setting up scorers...') scorers = [ (Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]), (Meteor(),"METEOR"), (Rouge(), "ROUGE_L"), (Cider(), "CIDEr") ] # ================================================= # Compute scores # ================================================= for scorer, method in scorers: print('computing %s score...'%(scorer.method())) score, scores = scorer.compute_score(gts, res) if type(method) == list: for sc, scs, m in zip(score, scores, method): self.setEval(sc, m) self.setImgToEvalImgs(scs, gts.keys(), m) print("%s: %0.3f"%(m, sc)) else: self.setEval(score, method) self.setImgToEvalImgs(scores, gts.keys(), method) print("%s: %0.3f"%(method, score)) self.setEvalImgs()
Example #14
Source File: eval.py From captionGAN with MIT License | 4 votes |
def evaluate(self): imgIds = self.params[self.imgidStr] # imgIds = self.coco.getImgIds() gts = {} res = {} for imgId in imgIds: gts[imgId] = self.coco.imgToAnns[imgId] res[imgId] = self.cocoRes.imgToAnns[imgId] # ================================================= # Set up scorers # ================================================= print 'tokenization...' tokenizer = PTBTokenizer() gts = tokenizer.tokenize(gts) res = tokenizer.tokenize(res) # ================================================= # Set up scorers # ================================================= print 'setting up scorers...' scorers = [ (Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]), (Meteor(),"METEOR"), (Rouge(), "ROUGE_L"), (Cider(), "CIDEr"), (Spice(), "SPICE") ] # ================================================= # Compute scores # ================================================= eval = {} for scorer, method in scorers: print 'computing %s score...'%(scorer.method()) score, scores = scorer.compute_score(gts, res) if type(method) == list: for sc, scs, m in zip(score, scores, method): self.setEval(sc, m) self.setImgToEvalImgs(scs, gts.keys(), m) print "%s: %0.3f"%(m, sc) else: self.setEval(score, method) self.setImgToEvalImgs(scores, gts.keys(), method) print "%s: %0.3f"%(method, score) self.setEvalImgs()
Example #15
Source File: eval.py From TGIF-Release with BSD 3-Clause "New" or "Revised" License | 4 votes |
def main(): import sys res_path = sys.argv[1] gt_path = osp.join(this_dir, 'tgif-v1.0.tsv') test_list_path = osp.join(this_dir, 'splits', 'test.txt') test_keys = load_list(test_list_path) all_sents = load_sentences(gt_path) res = load_sentences(res_path) # make sure res has and only has single sentence # for all testing keys gts = {} for key in test_keys: gts[key] = all_sents[key] if key in res: res[key] = [res[key][0]] else: res[key] = [""] # ================================================= # Convert to COCO format # ================================================= gts = to_coco(gts, res.keys()) res = to_coco(res, res.keys()) # ================================================= # Set up scorers # ================================================= print 'tokenization...' tokenizer = PTBTokenizer() gts = tokenizer.tokenize(gts) res = tokenizer.tokenize(res) # ================================================= # Set up scorers # ================================================= print 'setting up scorers...' scorers = [ (Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]), (Meteor(),"METEOR"), (Rouge(), "ROUGE_L"), (Cider(), "CIDEr") ] # ================================================= # Compute scores # ================================================= eval = {} for scorer, method in scorers: print 'computing %s score...'%(scorer.method()) score, scores = scorer.compute_score(gts, res) if type(method) == list: for sc, scs, m in zip(score, scores, method): print "%s: %0.3f"%(m, sc) else: print "%s: %0.3f"%(method, score)
Example #16
Source File: eval.py From neural-image-captioning with MIT License | 4 votes |
def evaluate(self): imgIds = self.params['image_id'] # imgIds = self.coco.getImgIds() gts = {} res = {} for imgId in imgIds: gts[imgId] = self.coco.imgToAnns[imgId] res[imgId] = self.cocoRes.imgToAnns[imgId] # ================================================= # Set up scorers # ================================================= print 'tokenization...' tokenizer = PTBTokenizer() gts = tokenizer.tokenize(gts) res = tokenizer.tokenize(res) # ================================================= # Set up scorers # ================================================= print 'setting up scorers...' scorers = [ (Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]), (Meteor(),"METEOR"), (Rouge(), "ROUGE_L"), (Cider(), "CIDEr") ] # ================================================= # Compute scores # ================================================= eval = {} for scorer, method in scorers: print 'computing %s score...'%(scorer.method()) score, scores = scorer.compute_score(gts, res) if type(method) == list: for sc, scs, m in zip(score, scores, method): self.setEval(sc, m) self.setImgToEvalImgs(scs, imgIds, m) print "%s: %0.3f"%(m, sc) else: self.setEval(score, method) self.setImgToEvalImgs(scores, imgIds, method) print "%s: %0.3f"%(method, score) self.setEvalImgs()
Example #17
Source File: eval.py From densecap-tensorflow with MIT License | 4 votes |
def evaluate(self): imgIds = self.params['image_id'] # imgIds = self.coco.getImgIds() gts = {} res = {} for imgId in imgIds: gts[imgId] = self.coco.imgToAnns[imgId] res[imgId] = self.cocoRes.imgToAnns[imgId] # ================================================= # Set up scorers # ================================================= print 'tokenization...' tokenizer = PTBTokenizer() gts = tokenizer.tokenize(gts) res = tokenizer.tokenize(res) # ================================================= # Set up scorers # ================================================= print 'setting up scorers...' scorers = [ (Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]), (Meteor(),"METEOR"), (Rouge(), "ROUGE_L"), (Cider(), "CIDEr") ] # ================================================= # Compute scores # ================================================= eval = {} for scorer, method in scorers: print 'computing %s score...'%(scorer.method()) score, scores = scorer.compute_score(gts, res) if type(method) == list: for sc, scs, m in zip(score, scores, method): self.setEval(sc, m) self.setImgToEvalImgs(scs, imgIds, m) print "%s: %0.3f"%(m, sc) else: self.setEval(score, method) self.setImgToEvalImgs(scores, imgIds, method) print "%s: %0.3f"%(method, score) self.setEvalImgs()
Example #18
Source File: eval.py From CommonSenseMultiHopQA with MIT License | 4 votes |
def evaluate(self): imgIds = self.params['image_id'] # imgIds = self.coco.getImgIds() gts = {} res = {} for imgId in imgIds: gts[imgId] = self.coco.imgToAnns[imgId] res[imgId] = self.cocoRes.imgToAnns[imgId] # ================================================= # Set up scorers # ================================================= print 'tokenization...' tokenizer = PTBTokenizer() gts = tokenizer.tokenize(gts) res = tokenizer.tokenize(res) # ================================================= # Set up scorers # ================================================= print 'setting up scorers...' scorers = [ (Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]), (Meteor(),"METEOR"), (Rouge(), "ROUGE_L"), (Cider(), "CIDEr") ] # ================================================= # Compute scores # ================================================= for scorer, method in scorers: print 'computing %s score...'%(scorer.method()) score, scores = scorer.compute_score(gts, res) if type(method) == list: for sc, scs, m in zip(score, scores, method): self.setEval(sc, m) self.setImgToEvalImgs(scs, gts.keys(), m) print "%s: %0.3f"%(m, sc) else: self.setEval(score, method) self.setImgToEvalImgs(scores, gts.keys(), method) print "%s: %0.3f"%(method, score) self.setEvalImgs()
Example #19
Source File: eval.py From image_captioning with MIT License | 4 votes |
def evaluate(self): imgIds = self.params['image_id'] # imgIds = self.coco.getImgIds() gts = {} res = {} for imgId in imgIds: gts[imgId] = self.coco.imgToAnns[imgId] res[imgId] = self.cocoRes.imgToAnns[imgId] # ================================================= # Set up scorers # ================================================= print 'tokenization...' tokenizer = PTBTokenizer() gts = tokenizer.tokenize(gts) res = tokenizer.tokenize(res) # ================================================= # Set up scorers # ================================================= print 'setting up scorers...' scorers = [ (Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]), (Meteor(),"METEOR"), (Rouge(), "ROUGE_L"), (Cider(), "CIDEr") ] # ================================================= # Compute scores # ================================================= for scorer, method in scorers: print 'computing %s score...'%(scorer.method()) score, scores = scorer.compute_score(gts, res) if type(method) == list: for sc, scs, m in zip(score, scores, method): self.setEval(sc, m) self.setImgToEvalImgs(scs, gts.keys(), m) print "%s: %0.3f"%(m, sc) else: self.setEval(score, method) self.setImgToEvalImgs(scores, gts.keys(), method) print "%s: %0.3f"%(method, score) self.setEvalImgs()