Python data_utils.bins() Examples

The following are 30 code examples of data_utils.bins(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module data_utils , or try the search function .
Example #1
Source File: neural_gpu_trainer.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def get_bucket_id(train_buckets_scale_c, max_cur_length, data_set):
  """Get a random bucket id."""
  # Choose a bucket according to data distribution. Pick a random number
  # in [0, 1] and use the corresponding interval in train_buckets_scale.
  random_number_01 = np.random.random_sample()
  bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
                   if train_buckets_scale_c[i] > random_number_01])
  while bucket_id > 0 and not data_set[bucket_id]:
    bucket_id -= 1
  for _ in xrange(10 if np.random.random_sample() < 0.9 else 1):
    if data.bins[bucket_id] > max_cur_length:
      random_number_01 = min(random_number_01, np.random.random_sample())
      bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
                       if train_buckets_scale_c[i] > random_number_01])
      while bucket_id > 0 and not data_set[bucket_id]:
        bucket_id -= 1
  return bucket_id 
Example #2
Source File: neural_gpu_trainer.py    From yolo_v2 with Apache License 2.0 6 votes vote down vote up
def get_bucket_id(train_buckets_scale_c, max_cur_length, data_set):
  """Get a random bucket id."""
  # Choose a bucket according to data distribution. Pick a random number
  # in [0, 1] and use the corresponding interval in train_buckets_scale.
  random_number_01 = np.random.random_sample()
  bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
                   if train_buckets_scale_c[i] > random_number_01])
  while bucket_id > 0 and not data_set[bucket_id]:
    bucket_id -= 1
  for _ in xrange(10 if np.random.random_sample() < 0.9 else 1):
    if data.bins[bucket_id] > max_cur_length:
      random_number_01 = min(random_number_01, np.random.random_sample())
      bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
                       if train_buckets_scale_c[i] > random_number_01])
      while bucket_id > 0 and not data_set[bucket_id]:
        bucket_id -= 1
  return bucket_id 
Example #3
Source File: neural_gpu_trainer.py    From multilabel-image-classification-tensorflow with MIT License 6 votes vote down vote up
def get_bucket_id(train_buckets_scale_c, max_cur_length, data_set):
  """Get a random bucket id."""
  # Choose a bucket according to data distribution. Pick a random number
  # in [0, 1] and use the corresponding interval in train_buckets_scale.
  random_number_01 = np.random.random_sample()
  bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
                   if train_buckets_scale_c[i] > random_number_01])
  while bucket_id > 0 and not data_set[bucket_id]:
    bucket_id -= 1
  for _ in xrange(10 if np.random.random_sample() < 0.9 else 1):
    if data.bins[bucket_id] > max_cur_length:
      random_number_01 = min(random_number_01, np.random.random_sample())
      bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
                       if train_buckets_scale_c[i] > random_number_01])
      while bucket_id > 0 and not data_set[bucket_id]:
        bucket_id -= 1
  return bucket_id 
Example #4
Source File: neural_gpu_trainer.py    From Gun-Detector with Apache License 2.0 6 votes vote down vote up
def get_bucket_id(train_buckets_scale_c, max_cur_length, data_set):
  """Get a random bucket id."""
  # Choose a bucket according to data distribution. Pick a random number
  # in [0, 1] and use the corresponding interval in train_buckets_scale.
  random_number_01 = np.random.random_sample()
  bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
                   if train_buckets_scale_c[i] > random_number_01])
  while bucket_id > 0 and not data_set[bucket_id]:
    bucket_id -= 1
  for _ in xrange(10 if np.random.random_sample() < 0.9 else 1):
    if data.bins[bucket_id] > max_cur_length:
      random_number_01 = min(random_number_01, np.random.random_sample())
      bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
                       if train_buckets_scale_c[i] > random_number_01])
      while bucket_id > 0 and not data_set[bucket_id]:
        bucket_id -= 1
  return bucket_id 
Example #5
Source File: neural_gpu_trainer.py    From object_detection_with_tensorflow with MIT License 6 votes vote down vote up
def get_bucket_id(train_buckets_scale_c, max_cur_length, data_set):
  """Get a random bucket id."""
  # Choose a bucket according to data distribution. Pick a random number
  # in [0, 1] and use the corresponding interval in train_buckets_scale.
  random_number_01 = np.random.random_sample()
  bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
                   if train_buckets_scale_c[i] > random_number_01])
  while bucket_id > 0 and not data_set[bucket_id]:
    bucket_id -= 1
  for _ in xrange(10 if np.random.random_sample() < 0.9 else 1):
    if data.bins[bucket_id] > max_cur_length:
      random_number_01 = min(random_number_01, np.random.random_sample())
      bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
                       if train_buckets_scale_c[i] > random_number_01])
      while bucket_id > 0 and not data_set[bucket_id]:
        bucket_id -= 1
  return bucket_id 
Example #6
Source File: neural_gpu_trainer.py    From HumanRecognition with MIT License 6 votes vote down vote up
def get_bucket_id(train_buckets_scale_c, max_cur_length, data_set):
  """Get a random bucket id."""
  # Choose a bucket according to data distribution. Pick a random number
  # in [0, 1] and use the corresponding interval in train_buckets_scale.
  random_number_01 = np.random.random_sample()
  bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
                   if train_buckets_scale_c[i] > random_number_01])
  while bucket_id > 0 and not data_set[bucket_id]:
    bucket_id -= 1
  for _ in xrange(10 if np.random.random_sample() < 0.9 else 1):
    if data.bins[bucket_id] > max_cur_length:
      random_number_01 = min(random_number_01, np.random.random_sample())
      bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
                       if train_buckets_scale_c[i] > random_number_01])
      while bucket_id > 0 and not data_set[bucket_id]:
        bucket_id -= 1
  return bucket_id 
Example #7
Source File: neural_gpu_trainer.py    From models with Apache License 2.0 6 votes vote down vote up
def get_bucket_id(train_buckets_scale_c, max_cur_length, data_set):
  """Get a random bucket id."""
  # Choose a bucket according to data distribution. Pick a random number
  # in [0, 1] and use the corresponding interval in train_buckets_scale.
  random_number_01 = np.random.random_sample()
  bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
                   if train_buckets_scale_c[i] > random_number_01])
  while bucket_id > 0 and not data_set[bucket_id]:
    bucket_id -= 1
  for _ in xrange(10 if np.random.random_sample() < 0.9 else 1):
    if data.bins[bucket_id] > max_cur_length:
      random_number_01 = min(random_number_01, np.random.random_sample())
      bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
                       if train_buckets_scale_c[i] > random_number_01])
      while bucket_id > 0 and not data_set[bucket_id]:
        bucket_id -= 1
  return bucket_id 
Example #8
Source File: neural_gpu_trainer.py    From object_detection_kitti with Apache License 2.0 6 votes vote down vote up
def get_bucket_id(train_buckets_scale_c, max_cur_length, data_set):
  """Get a random bucket id."""
  # Choose a bucket according to data distribution. Pick a random number
  # in [0, 1] and use the corresponding interval in train_buckets_scale.
  random_number_01 = np.random.random_sample()
  bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
                   if train_buckets_scale_c[i] > random_number_01])
  while bucket_id > 0 and not data_set[bucket_id]:
    bucket_id -= 1
  for _ in xrange(10 if np.random.random_sample() < 0.9 else 1):
    if data.bins[bucket_id] > max_cur_length:
      random_number_01 = min(random_number_01, np.random.random_sample())
      bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
                       if train_buckets_scale_c[i] > random_number_01])
      while bucket_id > 0 and not data_set[bucket_id]:
        bucket_id -= 1
  return bucket_id 
Example #9
Source File: neural_gpu_trainer.py    From g-tensorflow-models with Apache License 2.0 6 votes vote down vote up
def get_bucket_id(train_buckets_scale_c, max_cur_length, data_set):
  """Get a random bucket id."""
  # Choose a bucket according to data distribution. Pick a random number
  # in [0, 1] and use the corresponding interval in train_buckets_scale.
  random_number_01 = np.random.random_sample()
  bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
                   if train_buckets_scale_c[i] > random_number_01])
  while bucket_id > 0 and not data_set[bucket_id]:
    bucket_id -= 1
  for _ in xrange(10 if np.random.random_sample() < 0.9 else 1):
    if data.bins[bucket_id] > max_cur_length:
      random_number_01 = min(random_number_01, np.random.random_sample())
      bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
                       if train_buckets_scale_c[i] > random_number_01])
      while bucket_id > 0 and not data_set[bucket_id]:
        bucket_id -= 1
  return bucket_id 
Example #10
Source File: neural_gpu_trainer.py    From hands-detection with MIT License 6 votes vote down vote up
def get_bucket_id(train_buckets_scale_c, max_cur_length, data_set):
  """Get a random bucket id."""
  # Choose a bucket according to data distribution. Pick a random number
  # in [0, 1] and use the corresponding interval in train_buckets_scale.
  random_number_01 = np.random.random_sample()
  bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
                   if train_buckets_scale_c[i] > random_number_01])
  while bucket_id > 0 and not data_set[bucket_id]:
    bucket_id -= 1
  for _ in xrange(10 if np.random.random_sample() < 0.9 else 1):
    if data.bins[bucket_id] > max_cur_length:
      random_number_01 = min(random_number_01, np.random.random_sample())
      bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
                       if train_buckets_scale_c[i] > random_number_01])
      while bucket_id > 0 and not data_set[bucket_id]:
        bucket_id -= 1
  return bucket_id 
Example #11
Source File: neural_gpu_trainer.py    From HumanRecognition with MIT License 5 votes vote down vote up
def single_test(bin_id, model, sess, nprint, batch_size, dev, p, print_out=True,
                offset=None, beam_model=None):
  """Test model on test data of length l using the given session."""
  if not dev[p][bin_id]:
    data.print_out("  bin %d (%d)\t%s\tppl NA errors NA seq-errors NA"
                   % (bin_id, data.bins[bin_id], p))
    return 1.0, 1.0, 0.0
  inpt, target = data.get_batch(
      bin_id, batch_size, dev[p], FLAGS.height, offset)
  if FLAGS.beam_size > 1 and beam_model:
    loss, res, new_tgt, scores = m_step(
        model, beam_model, sess, batch_size, inpt, target, bin_id,
        FLAGS.eval_beam_steps, p)
    score_avgs = [sum(s) / float(len(s)) for s in scores]
    score_maxs = [max(s) for s in scores]
    score_str = ["(%.2f, %.2f)" % (score_avgs[i], score_maxs[i])
                 for i in xrange(FLAGS.eval_beam_steps)]
    data.print_out("  == scores (avg, max): %s" % "; ".join(score_str))
    errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
                                           nprint, new_tgt, scores[-1])
  else:
    loss, res, _, _ = model.step(sess, inpt, target, False)
    errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
                                           nprint)
  seq_err = float(seq_err) / batch_size
  if total > 0:
    errors = float(errors) / total
  if print_out:
    data.print_out("  bin %d (%d)\t%s\tppl %.2f errors %.2f seq-errors %.2f"
                   % (bin_id, data.bins[bin_id], p, data.safe_exp(loss),
                      100 * errors, 100 * seq_err))
  return (errors, seq_err, loss) 
Example #12
Source File: neural_gpu_trainer.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def get_best_beam(beam_model, sess, inp, target, batch_size, beam_size,
                  bucket, history, p, test_mode=False):
  """Run beam_model, score beams, and return the best as target and in input."""
  _, output_logits, _, _ = beam_model.step(
      sess, inp, target, None, beam_size=FLAGS.beam_size)
  new_targets, new_firsts, scores, new_inp = [], [], [], np.copy(inp)
  for b in xrange(batch_size):
    outputs = []
    history_b = [[h[b, 0, l] for l in xrange(data.bins[bucket])]
                 for h in history]
    for beam_idx in xrange(beam_size):
      outputs.append([int(o[beam_idx * batch_size + b])
                      for o in output_logits])
    target_t = [target[b, 0, l] for l in xrange(data.bins[bucket])]
    best, best_score = score_beams(
        outputs, [t for t in target_t if t > 0], inp[b, :, :],
        [[t for t in h if t > 0] for h in history_b], p, test_mode=test_mode)
    scores.append(best_score)
    if 1 in best:  # Only until _EOS.
      best = best[:best.index(1) + 1]
    best += [0 for _ in xrange(len(target_t) - len(best))]
    new_targets.append([best])
    first, _ = score_beams(
        outputs, [t for t in target_t if t > 0], inp[b, :, :],
        [[t for t in h if t > 0] for h in history_b], p, test_mode=True)
    if 1 in first:  # Only until _EOS.
      first = first[:first.index(1) + 1]
    first += [0 for _ in xrange(len(target_t) - len(first))]
    new_inp[b, 0, :] = np.array(first, dtype=np.int32)
    new_firsts.append([first])
  # Change target if we found a great answer.
  new_target = np.array(new_targets, dtype=np.int32)
  for b in xrange(batch_size):
    if scores[b] >= 10.0:
      target[b, 0, :] = new_target[b, 0, :]
  new_first = np.array(new_firsts, dtype=np.int32)
  return new_target, new_first, new_inp, scores 
Example #13
Source File: neural_gpu_trainer.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def single_test(bin_id, model, sess, nprint, batch_size, dev, p, print_out=True,
                offset=None, beam_model=None):
  """Test model on test data of length l using the given session."""
  if not dev[p][bin_id]:
    data.print_out("  bin %d (%d)\t%s\tppl NA errors NA seq-errors NA"
                   % (bin_id, data.bins[bin_id], p))
    return 1.0, 1.0, 0.0
  inpt, target = data.get_batch(
      bin_id, batch_size, dev[p], FLAGS.height, offset)
  if FLAGS.beam_size > 1 and beam_model:
    loss, res, new_tgt, scores = m_step(
        model, beam_model, sess, batch_size, inpt, target, bin_id,
        FLAGS.eval_beam_steps, p)
    score_avgs = [sum(s) / float(len(s)) for s in scores]
    score_maxs = [max(s) for s in scores]
    score_str = ["(%.2f, %.2f)" % (score_avgs[i], score_maxs[i])
                 for i in xrange(FLAGS.eval_beam_steps)]
    data.print_out("  == scores (avg, max): %s" % "; ".join(score_str))
    errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
                                           nprint, new_tgt, scores[-1])
  else:
    loss, res, _, _ = model.step(sess, inpt, target, False)
    errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
                                           nprint)
  seq_err = float(seq_err) / batch_size
  if total > 0:
    errors = float(errors) / total
  if print_out:
    data.print_out("  bin %d (%d)\t%s\tppl %.2f errors %.2f seq-errors %.2f"
                   % (bin_id, data.bins[bin_id], p, data.safe_exp(loss),
                      100 * errors, 100 * seq_err))
  return (errors, seq_err, loss) 
Example #14
Source File: neural_gpu_trainer.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def get_best_beam(beam_model, sess, inp, target, batch_size, beam_size,
                  bucket, history, p, test_mode=False):
  """Run beam_model, score beams, and return the best as target and in input."""
  _, output_logits, _, _ = beam_model.step(
      sess, inp, target, None, beam_size=FLAGS.beam_size)
  new_targets, new_firsts, scores, new_inp = [], [], [], np.copy(inp)
  for b in xrange(batch_size):
    outputs = []
    history_b = [[h[b, 0, l] for l in xrange(data.bins[bucket])]
                 for h in history]
    for beam_idx in xrange(beam_size):
      outputs.append([int(o[beam_idx * batch_size + b])
                      for o in output_logits])
    target_t = [target[b, 0, l] for l in xrange(data.bins[bucket])]
    best, best_score = score_beams(
        outputs, [t for t in target_t if t > 0], inp[b, :, :],
        [[t for t in h if t > 0] for h in history_b], p, test_mode=test_mode)
    scores.append(best_score)
    if 1 in best:  # Only until _EOS.
      best = best[:best.index(1) + 1]
    best += [0 for _ in xrange(len(target_t) - len(best))]
    new_targets.append([best])
    first, _ = score_beams(
        outputs, [t for t in target_t if t > 0], inp[b, :, :],
        [[t for t in h if t > 0] for h in history_b], p, test_mode=True)
    if 1 in first:  # Only until _EOS.
      first = first[:first.index(1) + 1]
    first += [0 for _ in xrange(len(target_t) - len(first))]
    new_inp[b, 0, :] = np.array(first, dtype=np.int32)
    new_firsts.append([first])
  # Change target if we found a great answer.
  new_target = np.array(new_targets, dtype=np.int32)
  for b in xrange(batch_size):
    if scores[b] >= 10.0:
      target[b, 0, :] = new_target[b, 0, :]
  new_first = np.array(new_firsts, dtype=np.int32)
  return new_target, new_first, new_inp, scores 
Example #15
Source File: neural_gpu_trainer.py    From models with Apache License 2.0 5 votes vote down vote up
def single_test(bin_id, model, sess, nprint, batch_size, dev, p, print_out=True,
                offset=None, beam_model=None):
  """Test model on test data of length l using the given session."""
  if not dev[p][bin_id]:
    data.print_out("  bin %d (%d)\t%s\tppl NA errors NA seq-errors NA"
                   % (bin_id, data.bins[bin_id], p))
    return 1.0, 1.0, 0.0
  inpt, target = data.get_batch(
      bin_id, batch_size, dev[p], FLAGS.height, offset)
  if FLAGS.beam_size > 1 and beam_model:
    loss, res, new_tgt, scores = m_step(
        model, beam_model, sess, batch_size, inpt, target, bin_id,
        FLAGS.eval_beam_steps, p)
    score_avgs = [sum(s) / float(len(s)) for s in scores]
    score_maxs = [max(s) for s in scores]
    score_str = ["(%.2f, %.2f)" % (score_avgs[i], score_maxs[i])
                 for i in xrange(FLAGS.eval_beam_steps)]
    data.print_out("  == scores (avg, max): %s" % "; ".join(score_str))
    errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
                                           nprint, new_tgt, scores[-1])
  else:
    loss, res, _, _ = model.step(sess, inpt, target, False)
    errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
                                           nprint)
  seq_err = float(seq_err) / batch_size
  if total > 0:
    errors = float(errors) / total
  if print_out:
    data.print_out("  bin %d (%d)\t%s\tppl %.2f errors %.2f seq-errors %.2f"
                   % (bin_id, data.bins[bin_id], p, data.safe_exp(loss),
                      100 * errors, 100 * seq_err))
  return (errors, seq_err, loss) 
Example #16
Source File: neural_gpu_trainer.py    From models with Apache License 2.0 5 votes vote down vote up
def get_best_beam(beam_model, sess, inp, target, batch_size, beam_size,
                  bucket, history, p, test_mode=False):
  """Run beam_model, score beams, and return the best as target and in input."""
  _, output_logits, _, _ = beam_model.step(
      sess, inp, target, None, beam_size=FLAGS.beam_size)
  new_targets, new_firsts, scores, new_inp = [], [], [], np.copy(inp)
  for b in xrange(batch_size):
    outputs = []
    history_b = [[h[b, 0, l] for l in xrange(data.bins[bucket])]
                 for h in history]
    for beam_idx in xrange(beam_size):
      outputs.append([int(o[beam_idx * batch_size + b])
                      for o in output_logits])
    target_t = [target[b, 0, l] for l in xrange(data.bins[bucket])]
    best, best_score = score_beams(
        outputs, [t for t in target_t if t > 0], inp[b, :, :],
        [[t for t in h if t > 0] for h in history_b], p, test_mode=test_mode)
    scores.append(best_score)
    if 1 in best:  # Only until _EOS.
      best = best[:best.index(1) + 1]
    best += [0 for _ in xrange(len(target_t) - len(best))]
    new_targets.append([best])
    first, _ = score_beams(
        outputs, [t for t in target_t if t > 0], inp[b, :, :],
        [[t for t in h if t > 0] for h in history_b], p, test_mode=True)
    if 1 in first:  # Only until _EOS.
      first = first[:first.index(1) + 1]
    first += [0 for _ in xrange(len(target_t) - len(first))]
    new_inp[b, 0, :] = np.array(first, dtype=np.int32)
    new_firsts.append([first])
  # Change target if we found a great answer.
  new_target = np.array(new_targets, dtype=np.int32)
  for b in xrange(batch_size):
    if scores[b] >= 10.0:
      target[b, 0, :] = new_target[b, 0, :]
  new_first = np.array(new_firsts, dtype=np.int32)
  return new_target, new_first, new_inp, scores 
Example #17
Source File: neural_gpu_trainer.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def single_test(bin_id, model, sess, nprint, batch_size, dev, p, print_out=True,
                offset=None, beam_model=None):
  """Test model on test data of length l using the given session."""
  if not dev[p][bin_id]:
    data.print_out("  bin %d (%d)\t%s\tppl NA errors NA seq-errors NA"
                   % (bin_id, data.bins[bin_id], p))
    return 1.0, 1.0, 0.0
  inpt, target = data.get_batch(
      bin_id, batch_size, dev[p], FLAGS.height, offset)
  if FLAGS.beam_size > 1 and beam_model:
    loss, res, new_tgt, scores = m_step(
        model, beam_model, sess, batch_size, inpt, target, bin_id,
        FLAGS.eval_beam_steps, p)
    score_avgs = [sum(s) / float(len(s)) for s in scores]
    score_maxs = [max(s) for s in scores]
    score_str = ["(%.2f, %.2f)" % (score_avgs[i], score_maxs[i])
                 for i in xrange(FLAGS.eval_beam_steps)]
    data.print_out("  == scores (avg, max): %s" % "; ".join(score_str))
    errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
                                           nprint, new_tgt, scores[-1])
  else:
    loss, res, _, _ = model.step(sess, inpt, target, False)
    errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
                                           nprint)
  seq_err = float(seq_err) / batch_size
  if total > 0:
    errors = float(errors) / total
  if print_out:
    data.print_out("  bin %d (%d)\t%s\tppl %.2f errors %.2f seq-errors %.2f"
                   % (bin_id, data.bins[bin_id], p, data.safe_exp(loss),
                      100 * errors, 100 * seq_err))
  return (errors, seq_err, loss) 
Example #18
Source File: neural_gpu_trainer.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def single_test(bin_id, model, sess, nprint, batch_size, dev, p, print_out=True,
                offset=None, beam_model=None):
  """Test model on test data of length l using the given session."""
  if not dev[p][bin_id]:
    data.print_out("  bin %d (%d)\t%s\tppl NA errors NA seq-errors NA"
                   % (bin_id, data.bins[bin_id], p))
    return 1.0, 1.0, 0.0
  inpt, target = data.get_batch(
      bin_id, batch_size, dev[p], FLAGS.height, offset)
  if FLAGS.beam_size > 1 and beam_model:
    loss, res, new_tgt, scores = m_step(
        model, beam_model, sess, batch_size, inpt, target, bin_id,
        FLAGS.eval_beam_steps, p)
    score_avgs = [sum(s) / float(len(s)) for s in scores]
    score_maxs = [max(s) for s in scores]
    score_str = ["(%.2f, %.2f)" % (score_avgs[i], score_maxs[i])
                 for i in xrange(FLAGS.eval_beam_steps)]
    data.print_out("  == scores (avg, max): %s" % "; ".join(score_str))
    errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
                                           nprint, new_tgt, scores[-1])
  else:
    loss, res, _, _ = model.step(sess, inpt, target, False)
    errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
                                           nprint)
  seq_err = float(seq_err) / batch_size
  if total > 0:
    errors = float(errors) / total
  if print_out:
    data.print_out("  bin %d (%d)\t%s\tppl %.2f errors %.2f seq-errors %.2f"
                   % (bin_id, data.bins[bin_id], p, data.safe_exp(loss),
                      100 * errors, 100 * seq_err))
  return (errors, seq_err, loss) 
Example #19
Source File: neural_gpu_trainer.py    From AI_Reader with Apache License 2.0 5 votes vote down vote up
def evaluate():
  """Evaluate an existing model."""
  batch_size = FLAGS.batch_size
  tasks = FLAGS.task.split("-")
  with tf.Session() as sess:
    model, min_length, max_length, _, _, ensemble = initialize(sess)
    bound = data.bins[-1] + 1
    for t in tasks:
      l = min_length
      while l < max_length + EXTRA_EVAL and l < bound:
        _, seq_err, _ = single_test(l, model, sess, t, FLAGS.nprint,
                                    batch_size, ensemble=ensemble)
        l += 1
        while l < bound + 1 and not data.test_set[t][l]:
          l += 1
      # Animate.
      if FLAGS.animate:
        anim_size = 2
        _, _, test_data = single_test(l, model, sess, t, 0, anim_size,
                                      get_steps=True)
        animate(l, test_data, anim_size)
      # More tests.
      _, seq_err = multi_test(data.forward_max, model, sess, t, FLAGS.nprint,
                              batch_size * 4, ensemble=ensemble)
    if seq_err < 0.01:  # Super-test if we're very good and in large-test mode.
      if data.forward_max > 4000 and len(tasks) == 1:
        multi_test(data.forward_max, model, sess, tasks[0], FLAGS.nprint,
                   batch_size * 64, 0, ensemble=ensemble) 
Example #20
Source File: neural_gpu.py    From AI_Reader with Apache License 2.0 5 votes vote down vote up
def step(self, sess, inp, target, do_backward, noise_param=None,
           get_steps=False):
    """Run a step of the network."""
    assert len(inp) == len(target)
    length = len(target)
    feed_in = {}
    feed_in[self.noise_param.name] = noise_param if noise_param else 0.0
    feed_in[self.do_training.name] = 1.0 if do_backward else 0.0
    feed_out = []
    index = len(data_utils.bins)
    if length < data_utils.bins[-1] + 1:
      index = data_utils.bins.index(length)
    if do_backward:
      feed_out.append(self.updates[index])
      feed_out.append(self.grad_norms[index])
    feed_out.append(self.losses[index])
    for l in xrange(length):
      feed_in[self.input[l].name] = inp[l]
    for l in xrange(length):
      feed_in[self.target[l].name] = target[l]
      feed_out.append(self.outputs[index][l])
    if get_steps:
      for l in xrange(length+1):
        feed_out.append(self.steps[index][l])
    res = sess.run(feed_out, feed_in)
    offset = 0
    norm = None
    if do_backward:
      offset = 2
      norm = res[1]
    outputs = res[offset + 1:offset + 1 + length]
    steps = res[offset + 1 + length:] if get_steps else None
    return res[offset], outputs, norm, steps 
Example #21
Source File: neural_gpu_trainer.py    From object_detection_with_tensorflow with MIT License 5 votes vote down vote up
def single_test(bin_id, model, sess, nprint, batch_size, dev, p, print_out=True,
                offset=None, beam_model=None):
  """Test model on test data of length l using the given session."""
  if not dev[p][bin_id]:
    data.print_out("  bin %d (%d)\t%s\tppl NA errors NA seq-errors NA"
                   % (bin_id, data.bins[bin_id], p))
    return 1.0, 1.0, 0.0
  inpt, target = data.get_batch(
      bin_id, batch_size, dev[p], FLAGS.height, offset)
  if FLAGS.beam_size > 1 and beam_model:
    loss, res, new_tgt, scores = m_step(
        model, beam_model, sess, batch_size, inpt, target, bin_id,
        FLAGS.eval_beam_steps, p)
    score_avgs = [sum(s) / float(len(s)) for s in scores]
    score_maxs = [max(s) for s in scores]
    score_str = ["(%.2f, %.2f)" % (score_avgs[i], score_maxs[i])
                 for i in xrange(FLAGS.eval_beam_steps)]
    data.print_out("  == scores (avg, max): %s" % "; ".join(score_str))
    errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
                                           nprint, new_tgt, scores[-1])
  else:
    loss, res, _, _ = model.step(sess, inpt, target, False)
    errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
                                           nprint)
  seq_err = float(seq_err) / batch_size
  if total > 0:
    errors = float(errors) / total
  if print_out:
    data.print_out("  bin %d (%d)\t%s\tppl %.2f errors %.2f seq-errors %.2f"
                   % (bin_id, data.bins[bin_id], p, data.safe_exp(loss),
                      100 * errors, 100 * seq_err))
  return (errors, seq_err, loss) 
Example #22
Source File: neural_gpu_trainer.py    From object_detection_kitti with Apache License 2.0 5 votes vote down vote up
def single_test(bin_id, model, sess, nprint, batch_size, dev, p, print_out=True,
                offset=None, beam_model=None):
  """Test model on test data of length l using the given session."""
  if not dev[p][bin_id]:
    data.print_out("  bin %d (%d)\t%s\tppl NA errors NA seq-errors NA"
                   % (bin_id, data.bins[bin_id], p))
    return 1.0, 1.0, 0.0
  inpt, target = data.get_batch(
      bin_id, batch_size, dev[p], FLAGS.height, offset)
  if FLAGS.beam_size > 1 and beam_model:
    loss, res, new_tgt, scores = m_step(
        model, beam_model, sess, batch_size, inpt, target, bin_id,
        FLAGS.eval_beam_steps, p)
    score_avgs = [sum(s) / float(len(s)) for s in scores]
    score_maxs = [max(s) for s in scores]
    score_str = ["(%.2f, %.2f)" % (score_avgs[i], score_maxs[i])
                 for i in xrange(FLAGS.eval_beam_steps)]
    data.print_out("  == scores (avg, max): %s" % "; ".join(score_str))
    errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
                                           nprint, new_tgt, scores[-1])
  else:
    loss, res, _, _ = model.step(sess, inpt, target, False)
    errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
                                           nprint)
  seq_err = float(seq_err) / batch_size
  if total > 0:
    errors = float(errors) / total
  if print_out:
    data.print_out("  bin %d (%d)\t%s\tppl %.2f errors %.2f seq-errors %.2f"
                   % (bin_id, data.bins[bin_id], p, data.safe_exp(loss),
                      100 * errors, 100 * seq_err))
  return (errors, seq_err, loss) 
Example #23
Source File: neural_gpu_trainer.py    From hands-detection with MIT License 5 votes vote down vote up
def single_test(bin_id, model, sess, nprint, batch_size, dev, p, print_out=True,
                offset=None, beam_model=None):
  """Test model on test data of length l using the given session."""
  if not dev[p][bin_id]:
    data.print_out("  bin %d (%d)\t%s\tppl NA errors NA seq-errors NA"
                   % (bin_id, data.bins[bin_id], p))
    return 1.0, 1.0, 0.0
  inpt, target = data.get_batch(
      bin_id, batch_size, dev[p], FLAGS.height, offset)
  if FLAGS.beam_size > 1 and beam_model:
    loss, res, new_tgt, scores = m_step(
        model, beam_model, sess, batch_size, inpt, target, bin_id,
        FLAGS.eval_beam_steps, p)
    score_avgs = [sum(s) / float(len(s)) for s in scores]
    score_maxs = [max(s) for s in scores]
    score_str = ["(%.2f, %.2f)" % (score_avgs[i], score_maxs[i])
                 for i in xrange(FLAGS.eval_beam_steps)]
    data.print_out("  == scores (avg, max): %s" % "; ".join(score_str))
    errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
                                           nprint, new_tgt, scores[-1])
  else:
    loss, res, _, _ = model.step(sess, inpt, target, False)
    errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
                                           nprint)
  seq_err = float(seq_err) / batch_size
  if total > 0:
    errors = float(errors) / total
  if print_out:
    data.print_out("  bin %d (%d)\t%s\tppl %.2f errors %.2f seq-errors %.2f"
                   % (bin_id, data.bins[bin_id], p, data.safe_exp(loss),
                      100 * errors, 100 * seq_err))
  return (errors, seq_err, loss) 
Example #24
Source File: neural_gpu_trainer.py    From ECO-pytorch with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def evaluate():
  """Evaluate an existing model."""
  batch_size = FLAGS.batch_size
  tasks = FLAGS.task.split("-")
  with tf.Session() as sess:
    model, min_length, max_length, _, _, ensemble = initialize(sess)
    bound = data.bins[-1] + 1
    for t in tasks:
      l = min_length
      while l < max_length + EXTRA_EVAL and l < bound:
        _, seq_err, _ = single_test(l, model, sess, t, FLAGS.nprint,
                                    batch_size, ensemble=ensemble)
        l += 1
        while l < bound + 1 and not data.test_set[t][l]:
          l += 1
      # Animate.
      if FLAGS.animate:
        anim_size = 2
        _, _, test_data = single_test(l, model, sess, t, 0, anim_size,
                                      get_steps=True)
        animate(l, test_data, anim_size)
      # More tests.
      _, seq_err = multi_test(data.forward_max, model, sess, t, FLAGS.nprint,
                              batch_size * 4, ensemble=ensemble)
    if seq_err < 0.01:  # Super-test if we're very good and in large-test mode.
      if data.forward_max > 4000 and len(tasks) == 1:
        multi_test(data.forward_max, model, sess, tasks[0], FLAGS.nprint,
                   batch_size * 64, 0, ensemble=ensemble) 
Example #25
Source File: neural_gpu.py    From ECO-pytorch with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def step(self, sess, inp, target, do_backward, noise_param=None,
           get_steps=False):
    """Run a step of the network."""
    assert len(inp) == len(target)
    length = len(target)
    feed_in = {}
    feed_in[self.noise_param.name] = noise_param if noise_param else 0.0
    feed_in[self.do_training.name] = 1.0 if do_backward else 0.0
    feed_out = []
    index = len(data_utils.bins)
    if length < data_utils.bins[-1] + 1:
      index = data_utils.bins.index(length)
    if do_backward:
      feed_out.append(self.updates[index])
      feed_out.append(self.grad_norms[index])
    feed_out.append(self.losses[index])
    for l in xrange(length):
      feed_in[self.input[l].name] = inp[l]
    for l in xrange(length):
      feed_in[self.target[l].name] = target[l]
      feed_out.append(self.outputs[index][l])
    if get_steps:
      for l in xrange(length+1):
        feed_out.append(self.steps[index][l])
    res = sess.run(feed_out, feed_in)
    offset = 0
    norm = None
    if do_backward:
      offset = 2
      norm = res[1]
    outputs = res[offset + 1:offset + 1 + length]
    steps = res[offset + 1 + length:] if get_steps else None
    return res[offset], outputs, norm, steps 
Example #26
Source File: neural_gpu_trainer.py    From Action_Recognition_Zoo with MIT License 5 votes vote down vote up
def evaluate():
  """Evaluate an existing model."""
  batch_size = FLAGS.batch_size
  tasks = FLAGS.task.split("-")
  with tf.Session() as sess:
    model, min_length, max_length, _, _, ensemble = initialize(sess)
    bound = data.bins[-1] + 1
    for t in tasks:
      l = min_length
      while l < max_length + EXTRA_EVAL and l < bound:
        _, seq_err, _ = single_test(l, model, sess, t, FLAGS.nprint,
                                    batch_size, ensemble=ensemble)
        l += 1
        while l < bound + 1 and not data.test_set[t][l]:
          l += 1
      # Animate.
      if FLAGS.animate:
        anim_size = 2
        _, _, test_data = single_test(l, model, sess, t, 0, anim_size,
                                      get_steps=True)
        animate(l, test_data, anim_size)
      # More tests.
      _, seq_err = multi_test(data.forward_max, model, sess, t, FLAGS.nprint,
                              batch_size * 4, ensemble=ensemble)
    if seq_err < 0.01:  # Super-test if we're very good and in large-test mode.
      if data.forward_max > 4000 and len(tasks) == 1:
        multi_test(data.forward_max, model, sess, tasks[0], FLAGS.nprint,
                   batch_size * 64, 0, ensemble=ensemble) 
Example #27
Source File: neural_gpu.py    From Action_Recognition_Zoo with MIT License 5 votes vote down vote up
def step(self, sess, inp, target, do_backward, noise_param=None,
           get_steps=False):
    """Run a step of the network."""
    assert len(inp) == len(target)
    length = len(target)
    feed_in = {}
    feed_in[self.noise_param.name] = noise_param if noise_param else 0.0
    feed_in[self.do_training.name] = 1.0 if do_backward else 0.0
    feed_out = []
    index = len(data_utils.bins)
    if length < data_utils.bins[-1] + 1:
      index = data_utils.bins.index(length)
    if do_backward:
      feed_out.append(self.updates[index])
      feed_out.append(self.grad_norms[index])
    feed_out.append(self.losses[index])
    for l in xrange(length):
      feed_in[self.input[l].name] = inp[l]
    for l in xrange(length):
      feed_in[self.target[l].name] = target[l]
      feed_out.append(self.outputs[index][l])
    if get_steps:
      for l in xrange(length+1):
        feed_out.append(self.steps[index][l])
    res = sess.run(feed_out, feed_in)
    offset = 0
    norm = None
    if do_backward:
      offset = 2
      norm = res[1]
    outputs = res[offset + 1:offset + 1 + length]
    steps = res[offset + 1 + length:] if get_steps else None
    return res[offset], outputs, norm, steps 
Example #28
Source File: neural_gpu_trainer.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def get_best_beam(beam_model, sess, inp, target, batch_size, beam_size,
                  bucket, history, p, test_mode=False):
  """Run beam_model, score beams, and return the best as target and in input."""
  _, output_logits, _, _ = beam_model.step(
      sess, inp, target, None, beam_size=FLAGS.beam_size)
  new_targets, new_firsts, scores, new_inp = [], [], [], np.copy(inp)
  for b in xrange(batch_size):
    outputs = []
    history_b = [[h[b, 0, l] for l in xrange(data.bins[bucket])]
                 for h in history]
    for beam_idx in xrange(beam_size):
      outputs.append([int(o[beam_idx * batch_size + b])
                      for o in output_logits])
    target_t = [target[b, 0, l] for l in xrange(data.bins[bucket])]
    best, best_score = score_beams(
        outputs, [t for t in target_t if t > 0], inp[b, :, :],
        [[t for t in h if t > 0] for h in history_b], p, test_mode=test_mode)
    scores.append(best_score)
    if 1 in best:  # Only until _EOS.
      best = best[:best.index(1) + 1]
    best += [0 for _ in xrange(len(target_t) - len(best))]
    new_targets.append([best])
    first, _ = score_beams(
        outputs, [t for t in target_t if t > 0], inp[b, :, :],
        [[t for t in h if t > 0] for h in history_b], p, test_mode=True)
    if 1 in first:  # Only until _EOS.
      first = first[:first.index(1) + 1]
    first += [0 for _ in xrange(len(target_t) - len(first))]
    new_inp[b, 0, :] = np.array(first, dtype=np.int32)
    new_firsts.append([first])
  # Change target if we found a great answer.
  new_target = np.array(new_targets, dtype=np.int32)
  for b in xrange(batch_size):
    if scores[b] >= 10.0:
      target[b, 0, :] = new_target[b, 0, :]
  new_first = np.array(new_firsts, dtype=np.int32)
  return new_target, new_first, new_inp, scores 
Example #29
Source File: neural_gpu_trainer.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def single_test(bin_id, model, sess, nprint, batch_size, dev, p, print_out=True,
                offset=None, beam_model=None):
  """Test model on test data of length l using the given session."""
  if not dev[p][bin_id]:
    data.print_out("  bin %d (%d)\t%s\tppl NA errors NA seq-errors NA"
                   % (bin_id, data.bins[bin_id], p))
    return 1.0, 1.0, 0.0
  inpt, target = data.get_batch(
      bin_id, batch_size, dev[p], FLAGS.height, offset)
  if FLAGS.beam_size > 1 and beam_model:
    loss, res, new_tgt, scores = m_step(
        model, beam_model, sess, batch_size, inpt, target, bin_id,
        FLAGS.eval_beam_steps, p)
    score_avgs = [sum(s) / float(len(s)) for s in scores]
    score_maxs = [max(s) for s in scores]
    score_str = ["(%.2f, %.2f)" % (score_avgs[i], score_maxs[i])
                 for i in xrange(FLAGS.eval_beam_steps)]
    data.print_out("  == scores (avg, max): %s" % "; ".join(score_str))
    errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
                                           nprint, new_tgt, scores[-1])
  else:
    loss, res, _, _ = model.step(sess, inpt, target, False)
    errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
                                           nprint)
  seq_err = float(seq_err) / batch_size
  if total > 0:
    errors = float(errors) / total
  if print_out:
    data.print_out("  bin %d (%d)\t%s\tppl %.2f errors %.2f seq-errors %.2f"
                   % (bin_id, data.bins[bin_id], p, data.safe_exp(loss),
                      100 * errors, 100 * seq_err))
  return (errors, seq_err, loss) 
Example #30
Source File: neural_gpu_trainer.py    From yolo_v2 with Apache License 2.0 5 votes vote down vote up
def single_test(bin_id, model, sess, nprint, batch_size, dev, p, print_out=True,
                offset=None, beam_model=None):
  """Test model on test data of length l using the given session."""
  if not dev[p][bin_id]:
    data.print_out("  bin %d (%d)\t%s\tppl NA errors NA seq-errors NA"
                   % (bin_id, data.bins[bin_id], p))
    return 1.0, 1.0, 0.0
  inpt, target = data.get_batch(
      bin_id, batch_size, dev[p], FLAGS.height, offset)
  if FLAGS.beam_size > 1 and beam_model:
    loss, res, new_tgt, scores = m_step(
        model, beam_model, sess, batch_size, inpt, target, bin_id,
        FLAGS.eval_beam_steps, p)
    score_avgs = [sum(s) / float(len(s)) for s in scores]
    score_maxs = [max(s) for s in scores]
    score_str = ["(%.2f, %.2f)" % (score_avgs[i], score_maxs[i])
                 for i in xrange(FLAGS.eval_beam_steps)]
    data.print_out("  == scores (avg, max): %s" % "; ".join(score_str))
    errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
                                           nprint, new_tgt, scores[-1])
  else:
    loss, res, _, _ = model.step(sess, inpt, target, False)
    errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
                                           nprint)
  seq_err = float(seq_err) / batch_size
  if total > 0:
    errors = float(errors) / total
  if print_out:
    data.print_out("  bin %d (%d)\t%s\tppl %.2f errors %.2f seq-errors %.2f"
                   % (bin_id, data.bins[bin_id], p, data.safe_exp(loss),
                      100 * errors, 100 * seq_err))
  return (errors, seq_err, loss)