Python data_utils.test_set() Examples
The following are 3
code examples of data_utils.test_set().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
data_utils
, or try the search function
.
Example #1
Source File: neural_gpu_trainer.py From Action_Recognition_Zoo with MIT License | 5 votes |
def evaluate(): """Evaluate an existing model.""" batch_size = FLAGS.batch_size tasks = FLAGS.task.split("-") with tf.Session() as sess: model, min_length, max_length, _, _, ensemble = initialize(sess) bound = data.bins[-1] + 1 for t in tasks: l = min_length while l < max_length + EXTRA_EVAL and l < bound: _, seq_err, _ = single_test(l, model, sess, t, FLAGS.nprint, batch_size, ensemble=ensemble) l += 1 while l < bound + 1 and not data.test_set[t][l]: l += 1 # Animate. if FLAGS.animate: anim_size = 2 _, _, test_data = single_test(l, model, sess, t, 0, anim_size, get_steps=True) animate(l, test_data, anim_size) # More tests. _, seq_err = multi_test(data.forward_max, model, sess, t, FLAGS.nprint, batch_size * 4, ensemble=ensemble) if seq_err < 0.01: # Super-test if we're very good and in large-test mode. if data.forward_max > 4000 and len(tasks) == 1: multi_test(data.forward_max, model, sess, tasks[0], FLAGS.nprint, batch_size * 64, 0, ensemble=ensemble)
Example #2
Source File: neural_gpu_trainer.py From ECO-pytorch with BSD 2-Clause "Simplified" License | 5 votes |
def evaluate(): """Evaluate an existing model.""" batch_size = FLAGS.batch_size tasks = FLAGS.task.split("-") with tf.Session() as sess: model, min_length, max_length, _, _, ensemble = initialize(sess) bound = data.bins[-1] + 1 for t in tasks: l = min_length while l < max_length + EXTRA_EVAL and l < bound: _, seq_err, _ = single_test(l, model, sess, t, FLAGS.nprint, batch_size, ensemble=ensemble) l += 1 while l < bound + 1 and not data.test_set[t][l]: l += 1 # Animate. if FLAGS.animate: anim_size = 2 _, _, test_data = single_test(l, model, sess, t, 0, anim_size, get_steps=True) animate(l, test_data, anim_size) # More tests. _, seq_err = multi_test(data.forward_max, model, sess, t, FLAGS.nprint, batch_size * 4, ensemble=ensemble) if seq_err < 0.01: # Super-test if we're very good and in large-test mode. if data.forward_max > 4000 and len(tasks) == 1: multi_test(data.forward_max, model, sess, tasks[0], FLAGS.nprint, batch_size * 64, 0, ensemble=ensemble)
Example #3
Source File: neural_gpu_trainer.py From AI_Reader with Apache License 2.0 | 5 votes |
def evaluate(): """Evaluate an existing model.""" batch_size = FLAGS.batch_size tasks = FLAGS.task.split("-") with tf.Session() as sess: model, min_length, max_length, _, _, ensemble = initialize(sess) bound = data.bins[-1] + 1 for t in tasks: l = min_length while l < max_length + EXTRA_EVAL and l < bound: _, seq_err, _ = single_test(l, model, sess, t, FLAGS.nprint, batch_size, ensemble=ensemble) l += 1 while l < bound + 1 and not data.test_set[t][l]: l += 1 # Animate. if FLAGS.animate: anim_size = 2 _, _, test_data = single_test(l, model, sess, t, 0, anim_size, get_steps=True) animate(l, test_data, anim_size) # More tests. _, seq_err = multi_test(data.forward_max, model, sess, t, FLAGS.nprint, batch_size * 4, ensemble=ensemble) if seq_err < 0.01: # Super-test if we're very good and in large-test mode. if data.forward_max > 4000 and len(tasks) == 1: multi_test(data.forward_max, model, sess, tasks[0], FLAGS.nprint, batch_size * 64, 0, ensemble=ensemble)