Python utils.merge() Examples

The following are 9 code examples of utils.merge(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module utils , or try the search function .
Example #1
Source File: shape.py    From visual-analogy-tensorflow with MIT License 6 votes vote down vote up
def test(self, name="test", options=None, fixed=False):
    if options == None:
      options = self.options

    t = strfnow()

    for option in options:
      if fixed == True:
        a, b, c, d = self.loader.tests[option]
      else:
        a, b, c, d = self.loader.next(set_option=option)

      feed = {self.a: a,
              self.b: b,
              self.c: c,
              self.d: d}

      fname = "%s/%s_option:%s_time:%s.png" % (self.sample_dir, name, option, t)
      g_img, g2_img, g3_img = self.sess.run([self.g1_img, self.g2_img, self.g3_img], feed_dict=feed)

      imsave(fname, merge(a, b, c, d, g_img, g2_img, g3_img)) 
Example #2
Source File: data_loader.py    From kaggle-heart with MIT License 6 votes vote down vote up
def generate_train_batch(required_input_keys, required_output_keys):
    """Creates an iterator that returns train batches."""

    sunny_chunk_size = _config().sunny_batch_size * _config().batches_per_chunk
    chunk_size = _config().batch_size * _config().batches_per_chunk

    while True:
        result = {}
        input_keys_to_do = list(required_input_keys) #clone
        output_keys_to_do = list(required_output_keys) #clone
        if "sunny" in input_keys_to_do or "segmentation" in output_keys_to_do:
            indices = _config().rng.randint(0, len(sunny_train_images), sunny_chunk_size)
            sunny_patient_data = get_sunny_patient_data(indices, set="train")
            result = utils.merge(result, sunny_patient_data)
            input_keys_to_do.remove("sunny")
            output_keys_to_do.remove("segmentation")

        indices = _config().rng.randint(0, len(train_patient_folders), chunk_size)  #
        kaggle_data = get_patient_data(indices, input_keys_to_do, output_keys_to_do, set="train",
                                       preprocess_function=_config().preprocess_train)

        result = utils.merge(result, kaggle_data)

        yield result 
Example #3
Source File: run_lib.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def evaluate(override_cfg, model_dir, continuous=True):
  """Run training and evaluation."""
  tf.logging.info("model_dir = " + model_dir)
  try:
    cfg = _load_config(model_dir)
  except tf.errors.NotFoundError:
    tf.logging.info("Model directory does not exist yet. Creating new config.")
    cfg = model.build_config(model_dir=model_dir, data_path=FLAGS.data_path)
  tf.logging.info(cfg)
  tf.logging.info(override_cfg)
  cfg = utils.merge(cfg, override_cfg)

  cfg.tpu.enable = False
  cfg.dataset.max_length = None

  # Construct inputs and estimator
  _, eval_input = data.build_dataset(cfg.dataset, is_tpu=cfg.tpu.enable)
  estimator = model.get_estimator(**cfg)
  if continuous:
    checkpoints_iterator = tf.contrib.training.checkpoints_iterator(
        cfg.model_dir)
    eval_metrics = None
    for ckpt_path in checkpoints_iterator:
      eval_metrics = estimator.evaluate(
          input_fn=eval_input, checkpoint_path=ckpt_path)
      tf.logging.info(pprint.pformat(eval_metrics))
    return eval_metrics
  else:
    eval_metrics = estimator.evaluate(input_fn=eval_input)
    return eval_metrics 
Example #4
Source File: run_lib.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def predict(override_cfg, model_dir):
  """Run model over a dataset and dump predictions to json file."""
  assert FLAGS.predict_path
  cfg = _load_config(model_dir)
  cfg = utils.merge(cfg, override_cfg)
  input_fn = data.get_input_fn(
      split=cfg.dataset.eval_split,
      max_length=None,
      repeat=False,
      shuffle=False,
      cache=False,
      limit=None,
      data_path=cfg.dataset.data_path,
      vocab_path=cfg.dataset.vocab_path,
      is_tpu=False,
      use_generator=True,
      is_training=False)
  estimator = model.get_estimator(**cfg)
  predictions = dict()
  for i, prediction in enumerate(estimator.predict(input_fn)):
    predictions[prediction["id"]] = prediction["answer"]
    if i % 100 == 0:
      tf.logging.info("Prediction %s | %s: %s" % (i, prediction["id"],
                                                  prediction["answer"]))

  # Dump results to a file
  with tf.gfile.GFile(FLAGS.predict_path, "w") as f:
    json.dump(predictions, f) 
Example #5
Source File: run_lib.py    From tpu_models with Apache License 2.0 5 votes vote down vote up
def evaluate(override_cfg, model_dir, continuous=True):
  """Run training and evaluation."""
  tf.logging.info("model_dir = " + model_dir)
  try:
    cfg = _load_config(model_dir)
  except tf.errors.NotFoundError:
    tf.logging.info("Model directory does not exist yet. Creating new config.")
    cfg = model.build_config(model_dir=model_dir, data_path=FLAGS.data_path)
  tf.logging.info(cfg)
  tf.logging.info(override_cfg)
  cfg = utils.merge(cfg, override_cfg)

  cfg.tpu.enable = False
  cfg.dataset.max_length = None

  # Construct inputs and estimator
  _, eval_input = data.build_dataset(cfg.dataset, is_tpu=cfg.tpu.enable)
  estimator = model.get_estimator(**cfg)
  if continuous:
    checkpoints_iterator = tf.contrib.training.checkpoints_iterator(
        cfg.model_dir)
    eval_metrics = None
    for ckpt_path in checkpoints_iterator:
      eval_metrics = estimator.evaluate(
          input_fn=eval_input, checkpoint_path=ckpt_path)
      tf.logging.info(pprint.pformat(eval_metrics))
    return eval_metrics
  else:
    eval_metrics = estimator.evaluate(input_fn=eval_input)
    return eval_metrics 
Example #6
Source File: run_lib.py    From tpu_models with Apache License 2.0 5 votes vote down vote up
def predict(override_cfg, model_dir):
  """Run model over a dataset and dump predictions to json file."""
  assert FLAGS.predict_path
  cfg = _load_config(model_dir)
  cfg = utils.merge(cfg, override_cfg)
  input_fn = data.get_input_fn(
      split=cfg.dataset.eval_split,
      max_length=None,
      repeat=False,
      shuffle=False,
      cache=False,
      limit=None,
      data_path=cfg.dataset.data_path,
      vocab_path=cfg.dataset.vocab_path,
      is_tpu=False,
      use_generator=True,
      is_training=False)
  estimator = model.get_estimator(**cfg)
  predictions = dict()
  for i, prediction in enumerate(estimator.predict(input_fn)):
    predictions[prediction["id"]] = prediction["answer"]
    if i % 100 == 0:
      tf.logging.info("Prediction %s | %s: %s" % (i, prediction["id"],
                                                  prediction["answer"]))

  # Dump results to a file
  with tf.gfile.GFile(FLAGS.predict_path, "w") as f:
    json.dump(predictions, f) 
Example #7
Source File: run_lib.py    From class-balanced-loss with MIT License 5 votes vote down vote up
def evaluate(override_cfg, model_dir, continuous=True):
  """Run training and evaluation."""
  tf.logging.info("model_dir = " + model_dir)
  try:
    cfg = _load_config(model_dir)
  except tf.errors.NotFoundError:
    tf.logging.info("Model directory does not exist yet. Creating new config.")
    cfg = model.build_config(model_dir=model_dir, data_path=FLAGS.data_path)
  tf.logging.info(cfg)
  tf.logging.info(override_cfg)
  cfg = utils.merge(cfg, override_cfg)

  cfg.tpu.enable = False
  cfg.dataset.max_length = None

  # Construct inputs and estimator
  _, eval_input = data.build_dataset(cfg.dataset, is_tpu=cfg.tpu.enable)
  estimator = model.get_estimator(**cfg)
  if continuous:
    checkpoints_iterator = tf.contrib.training.checkpoints_iterator(
        cfg.model_dir)
    eval_metrics = None
    for ckpt_path in checkpoints_iterator:
      eval_metrics = estimator.evaluate(
          input_fn=eval_input, checkpoint_path=ckpt_path)
      tf.logging.info(pprint.pformat(eval_metrics))
    return eval_metrics
  else:
    eval_metrics = estimator.evaluate(input_fn=eval_input)
    return eval_metrics 
Example #8
Source File: run_lib.py    From class-balanced-loss with MIT License 5 votes vote down vote up
def predict(override_cfg, model_dir):
  """Run model over a dataset and dump predictions to json file."""
  assert FLAGS.predict_path
  cfg = _load_config(model_dir)
  cfg = utils.merge(cfg, override_cfg)
  input_fn = data.get_input_fn(
      split=cfg.dataset.eval_split,
      max_length=None,
      repeat=False,
      shuffle=False,
      cache=False,
      limit=None,
      data_path=cfg.dataset.data_path,
      vocab_path=cfg.dataset.vocab_path,
      is_tpu=False,
      use_generator=True,
      is_training=False)
  estimator = model.get_estimator(**cfg)
  predictions = dict()
  for i, prediction in enumerate(estimator.predict(input_fn)):
    predictions[prediction["id"]] = prediction["answer"]
    if i % 100 == 0:
      tf.logging.info("Prediction %s | %s: %s" % (i, prediction["id"],
                                                  prediction["answer"]))

  # Dump results to a file
  with tf.gfile.GFile(FLAGS.predict_path, "w") as f:
    json.dump(predictions, f) 
Example #9
Source File: data_loader.py    From kaggle-heart with MIT License 5 votes vote down vote up
def generate_validation_batch(required_input_keys, required_output_keys, set="validation"):
    # generate sunny data
    sunny_length = get_lenght_of_set(name="sunny", set=set)
    regular_length = get_lenght_of_set(name="regular", set=set)

    sunny_batches = int(np.ceil(sunny_length / float(_config().sunny_batch_size)))
    regular_batches = int(np.ceil(regular_length / float(_config().batch_size)))

    if "sunny" in required_input_keys or "segmentation" in required_output_keys:
        num_batches = max(sunny_batches, regular_batches)
    else:
        num_batches = regular_batches

    num_chunks = int(np.ceil(num_batches / float(_config().batches_per_chunk)))

    sunny_chunk_size = _config().batches_per_chunk * _config().sunny_batch_size
    regular_chunk_size = _config().batches_per_chunk * _config().batch_size

    for n in xrange(num_chunks):

        result = {}
        input_keys_to_do  = list(required_input_keys)  # clone
        output_keys_to_do = list(required_output_keys) # clone

        if "sunny" in input_keys_to_do or "segmentation" in output_keys_to_do:

            indices = range(n*sunny_chunk_size, (n+1)*sunny_chunk_size)

            sunny_patient_data = get_sunny_patient_data(indices, set="train")
            result = utils.merge(result, sunny_patient_data)
            input_keys_to_do.remove("sunny")
            output_keys_to_do.remove("segmentation")

        indices = range(n*regular_chunk_size, (n+1)*regular_chunk_size)
        kaggle_data = get_patient_data(indices, input_keys_to_do, output_keys_to_do, set=set,
                                       preprocess_function=_config().preprocess_validation)

        result = utils.merge(result, kaggle_data)

        yield result