Python evaluator.evaluate() Examples

The following are 8 code examples of evaluator.evaluate(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module evaluator , or try the search function .
Example #1
Source File: evaluate.py    From knowledge-net with MIT License 6 votes vote down vote up
def print_evaluation(eval_type):
  gold = copy.deepcopy(gold_dataset)
  prediction = copy.deepcopy(dataset)
  if eval_type == "uri":
    gold, goldProperties = evaluator.filterForURIEvaluation(gold)
    prediction, _ = evaluator.filterForURIEvaluation(prediction)
  else:
    goldProperties = properties
  confusionMatrix, analysis = evaluator.evaluate(gold, prediction, eval_type, goldProperties)

  # Print results
  print("RESULTS FOR",eval_type)

  evals = evaluator.microEvaluation(confusionMatrix, True)
  evals.extend(evaluator.macroEvaluation(confusionMatrix))
  
  evaluator.writeAnalysisFile(analysis, 'tmp', eval_type)
  evaluator.writeHtmlFile(analysis, 'tmp', eval_type, goldProperties) 
Example #2
Source File: special_forms.py    From kimi with MIT License 6 votes vote down vote up
def lamb(args, env):
    # print("\n")
    # print("args (" + str(len(args)) + "):", args)
    # print("env: ", env.name)
    if len(args) < 2:
        throw_error("syntax", "Incorrect use of (lambda ...): must take at least two arguments (at least one variable and a body).")
    largs = args[:-1]
    lbody = args[-1]
    # print("largs (" + str(len(largs)) + "):", largs)
    for l in largs:
        assert_or_throw(l['type'] == 'symbol', "syntax", "Incorrect use of (lambda ...): the anonymous function's variables must be symbols.")
    largs = tuple(la['value'] for la in largs)
    # print("lbody:", lbody)
    def anonymous(*arguments):
        # print("inside anonymous function")
        # print("arguments(" + str(len(arguments)) + "):", arguments)
        if len(arguments) != len(largs):
            throw_error("syntax", "This function takes " + str(len(largs)) + " arguments (" + str(len(arguments)) + " provided).")
        lenv = Environment(name="anon_fn", outer=env, variables=largs, values=arguments)
        return ev.evaluate(lbody, lenv)
    return anonymous 
Example #3
Source File: test_wf_gen.py    From mc3 with Apache License 2.0 5 votes vote down vote up
def run_stats(args):
    import evaluator
    rev_map = {}
    for k, v in fake_metadata.items():
        rev_map[v['participant_id']] = k
    
    basedir = os.path.dirname( os.path.dirname(__file__) )
    exome_dir = os.path.join(basedir, "testexomes")
    
    out_scores = {}
    for donor_dir in glob(os.path.join(args.out_dir, "*")):
        donor = os.path.basename(donor_dir)
        if rev_map[donor] not in out_scores:
            out_scores[rev_map[donor]] = {}
        for vcf_file in glob( os.path.join(donor_dir, "*.vcf")):
            method = os.path.basename(vcf_file).replace(".vcf", "")
            vtype = None
            if method in SNP_METHOD:
                vtype = "SNV"
            if method in INDEL_METHOD:
                vtype = "INDEL"
            truth_file = os.path.join(exome_dir, "testexome" + rev_map[donor][-1:] + ".truth.vcf.gz" )
            scores = evaluator.evaluate(vcf_file, truth_file, vtype=vtype, truthmask=False)
            out_scores[rev_map[donor]][method] = scores
    print out_scores
    
    totals = {}
    for v in out_scores.values():
        for method, values in v.items():
            if method not in totals:
                totals[method] = []
            totals[method].append( values )
    for method, values in totals.items():
        out = []
        for i in range(3):
            out.append( "%s" % (sum( j[i] for j in values  ) / float(len(values) )) )
        print method, "\t".join(out) 
Example #4
Source File: mc3_mut.py    From mc3 with Apache License 2.0 5 votes vote down vote up
def run_stats(args):
    import evaluator
    rev_map = {}
    for k, v in fake_metadata.items():
        rev_map[v['participant_id']] = k
    
    basedir = os.path.dirname( os.path.dirname(__file__) )
    exome_dir = os.path.join(basedir, "testexomes")
    
    out_scores = {}
    for donor_dir in glob(os.path.join(args.out_dir, "*")):
        donor = os.path.basename(donor_dir)
        if rev_map[donor] not in out_scores:
            out_scores[rev_map[donor]] = {}
        for vcf_file in glob( os.path.join(donor_dir, "*.vcf")):
            method = os.path.basename(vcf_file).replace(".vcf", "")
            vtype = None
            if method in SNP_METHOD:
                vtype = "SNV"
            if method in INDEL_METHOD:
                vtype = "INDEL"
            truth_file = os.path.join(exome_dir, "testexome" + rev_map[donor][-1:] + ".truth.vcf.gz" )
            scores = evaluator.evaluate(vcf_file, truth_file, vtype=vtype, truthmask=False)
            out_scores[rev_map[donor]][method] = scores
    print out_scores
    
    totals = {}
    for v in out_scores.values():
        for method, values in v.items():
            if method not in totals:
                totals[method] = []
            totals[method].append( values )
    for method, values in totals.items():
        out = []
        for i in range(3):
            out.append( "%s" % (sum( j[i] for j in values  ) / float(len(values) )) )
        print method, "\t".join(out) 
Example #5
Source File: special_forms.py    From kimi with MIT License 5 votes vote down vote up
def do(args, env):
    do_env = Environment(name="do", outer=env)
    if len(args) == 0:
        throw_error("syntax", "Incorrect use of (do ...): must take at least one argument.")
    result = None
    for a in args:
        result = ev.evaluate(a, do_env)
    return result 
Example #6
Source File: special_forms.py    From kimi with MIT License 5 votes vote down vote up
def define(args, env):
    if len(args) != 2:
        throw_error("syntax", "Incorrect use of (define ...): must take exactly two arguments.")
    assert_or_throw(args[0]['type'] == 'symbol', "type", "Incorrect use of (define ...): the variable must be a symbol.")
    variable = args[0]['value']
    value = ev.evaluate(args[1], env)
    env.set(variable, value)
    return value 
Example #7
Source File: special_forms.py    From kimi with MIT License 5 votes vote down vote up
def cond(args, env):
    if len(args) != 3:
        throw_error("syntax", "Incorrect use of (if ...): must take exactly three arguments (a test, a pass case, and a fail case).")
    test = ev.evaluate(args[0], env)
    if type(test) != bool:
        throw_error("type", "Incorrect use of (if ...): the test must evaluate to a boolean.")
    if test:
        return ev.evaluate(args[1], env)
    else:
        return ev.evaluate(args[2], env) 
Example #8
Source File: eval.py    From models with Apache License 2.0 4 votes vote down vote up
def main(unused_argv):
  if (FLAGS.omp > 0):
    if not os.environ.get("OMP_NUM_THREADS"):
      logging.info('OMP_NUM_THREADS value= %d', FLAGS.omp)
      os.environ["OMP_NUM_THREADS"] = str(FLAGS.omp)
    if not os.environ.get("KMP_BLOCKTIME"):
      logging.info('KMP_BLOCKTIME value= %d', FLAGS.blocktime)
      os.environ["KMP_BLOCKTIME"] = str(FLAGS.blocktime)
    if not os.environ.get("KMP_SETTINGS"):
      os.environ["KMP_SETTINGS"] = "1"
    # os.environ["KMP_AFFINITY"]= "granularity=fine,verbose,compact,1,0"
  assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
  assert FLAGS.eval_dir, '`eval_dir` is missing.'
  tf.io.gfile.makedirs(FLAGS.eval_dir)
  if FLAGS.pipeline_config_path:
    configs = config_util.get_configs_from_pipeline_file(
        FLAGS.pipeline_config_path)
    tf.io.gfile.copy(FLAGS.pipeline_config_path,
                  os.path.join(FLAGS.eval_dir, 'pipeline.config'),
                  overwrite=True)
  else:
    configs = config_util.get_configs_from_multiple_files(
        model_config_path=FLAGS.model_config_path,
        eval_config_path=FLAGS.eval_config_path,
        eval_input_config_path=FLAGS.input_config_path)
    for name, config in [('model.config', FLAGS.model_config_path),
                         ('eval.config', FLAGS.eval_config_path),
                         ('input.config', FLAGS.input_config_path)]:
      tf.io.gfile.copy(config,
                    os.path.join(FLAGS.eval_dir, name),
                    overwrite=True)

  model_config = configs['model']
  eval_config = configs['eval_config']
  input_config = configs['eval_input_config']
  if FLAGS.eval_training_data:
    input_config = configs['train_input_config']

  model_fn = functools.partial(
      model_builder.build,
      model_config=model_config,
      is_training=False)

  def get_next(config):
    return tf.compat.v1.data.make_initializable_iterator(
        dataset_util, dataset_builder.build(config)).get_next()

  create_input_dict_fn = functools.partial(get_next, input_config)

  label_map = label_map_util.load_labelmap(input_config.label_map_path)
  max_num_classes = max([item.id for item in label_map.item])
  categories = label_map_util.convert_label_map_to_categories(
      label_map, max_num_classes)

  if FLAGS.run_once:
    eval_config.max_evals = 1

  evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, categories,
                     FLAGS.checkpoint_dir, FLAGS.eval_dir, intra_op=FLAGS.intra_op, inter_op=FLAGS.inter_op)