Python utils.evaluate() Examples
The following are 3
code examples of utils.evaluate().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
utils
, or try the search function
.
Example #1
Source File: apifuzz.py From IMF with MIT License | 6 votes |
def load_apilog(self, log_fname, limit): with open(log_fname, 'rb') as f: data = f.read().split('\n')[:-1] if len(data) %2 !=0: data = data[:-1] idx = 0 apilogs = [] while idx < len(data) and idx < limit*2: if data[idx][:2] == 'IN': il = utils.evaluate(data[idx][2:]) else: utils.error('load_apilog: parse IN error') if data[idx+1][:3] == 'OUT' : ol = utils.evaluate(data[idx+1][3:]) else: utils.error('load_apilog: parse OUT error') apilog = log.ApiLog(self.apis[il[0]], il, ol) apilogs.append(apilog) idx+=2 return apilogs
Example #2
Source File: test.py From next-prediction with Apache License 2.0 | 5 votes |
def main(args): """Run testing.""" test_data = utils.read_data(args, "test") print("total test samples:%s" % test_data.num_examples) if args.random_other: print("warning, testing mode with 'random_other' will result in " "different results every run...") model = models.get_model(args, gpuid=args.gpuid) tfconfig = tf.ConfigProto(allow_soft_placement=True) tfconfig.gpu_options.allow_growth = True tfconfig.gpu_options.visible_device_list = "%s" % ( ",".join(["%s" % i for i in [args.gpuid]])) with tf.Session(config=tfconfig) as sess: utils.initialize(load=True, load_best=args.load_best, args=args, sess=sess) # load the graph and variables tester = models.Tester(model, args, sess) perf = utils.evaluate(test_data, args, sess, tester) print("performance:") numbers = [] for k in sorted(perf.keys()): print("%s, %s" % (k, perf[k])) numbers.append("%s" % perf[k]) print(" ".join(sorted(perf.keys()))) print(" ".join(numbers))
Example #3
Source File: train.py From RecNet with MIT License | 4 votes |
def main(): args = parse_args() C = importlib.import_module(args.config).TrainConfig print("MODEL ID: {}".format(C.model_id)) summary_writer = SummaryWriter(C.log_dpath) train_iter, val_iter, test_iter, vocab = build_loaders(C) model = build_model(C, vocab) optimizer = torch.optim.Adam(model.parameters(), lr=C.lr, weight_decay=C.weight_decay, amsgrad=True) lr_scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=C.lr_decay_gamma, patience=C.lr_decay_patience, verbose=True) best_val_scores = { 'CIDEr': 0. } best_epoch = 0 best_ckpt_fpath = None for e in range(1, C.epochs + 1): ckpt_fpath = C.ckpt_fpath_tpl.format(e) """ Train """ print("\n") train_loss = train(e, model, optimizer, train_iter, vocab, C.decoder.rnn_teacher_forcing_ratio, C.reg_lambda, C.recon_lambda, C.gradient_clip) log_train(C, summary_writer, e, train_loss, get_lr(optimizer)) """ Validation """ val_loss = test(model, val_iter, vocab, C.reg_lambda, C.recon_lambda) val_scores = evaluate(val_iter, model, model.vocab) log_val(C, summary_writer, e, val_loss, val_scores) if e >= C.save_from and e % C.save_every == 0: print("Saving checkpoint at epoch={} to {}".format(e, ckpt_fpath)) save_checkpoint(e, model, ckpt_fpath, C) if e >= C.lr_decay_start_from: lr_scheduler.step(val_loss['total']) if e == 1 or val_scores['CIDEr'] > best_val_scores['CIDEr']: best_epoch = e best_val_scores = val_scores best_ckpt_fpath = ckpt_fpath """ Test with Best Model """ print("\n\n\n[BEST]") best_model = load_checkpoint(model, best_ckpt_fpath) test_scores = evaluate(test_iter, best_model, best_model.vocab) log_test(C, summary_writer, best_epoch, test_scores) save_checkpoint(best_epoch, best_model, C.ckpt_fpath_tpl.format("best"), C)