Python tensorflow.all_variables() Examples

The following are 30 code examples of tensorflow.all_variables(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: train.py    From tensorflow with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def train_neural_network():
	logits, last_state, _, _, _ = neural_network()
	targets = tf.reshape(output_targets, [-1])
	loss = tf.nn.seq2seq.sequence_loss_by_example([logits], [targets], [tf.ones_like(targets, dtype=tf.float32)], len(words))
	cost = tf.reduce_mean(loss)
	learning_rate = tf.Variable(0.0, trainable=False)
	tvars = tf.trainable_variables()
	grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), 5)
	optimizer = tf.train.AdamOptimizer(learning_rate)
	train_op = optimizer.apply_gradients(zip(grads, tvars))

	with tf.Session() as sess:
		sess.run(tf.initialize_all_variables())
		saver = tf.train.Saver(tf.all_variables())

		for epoch in range(50):
			sess.run(tf.assign(learning_rate, 0.002 * (0.97 ** epoch)))
			n = 0
			for batche in range(n_chunk):
				train_loss, _ , _ = sess.run([cost, last_state, train_op], feed_dict={input_data: x_batches[n], output_targets: y_batches[n]})
				n += 1
				print(epoch, batche, train_loss)
			if epoch % 7 == 0:
				saver.save(sess, 'poetry.module', global_step=epoch) 
Example #2
Source File: ssd_meta_arch.py    From garbage-object-detection-tensorflow with MIT License 6 votes vote down vote up
def restore_map(self, from_detection_checkpoint=True):
    """Returns a map of variables to load from a foreign checkpoint.

    See parent class for details.

    Args:
      from_detection_checkpoint: whether to restore from a full detection
        checkpoint (with compatible variable names) or to restore from a
        classification checkpoint for initialization prior to training.

    Returns:
      A dict mapping variable names (to load from a checkpoint) to variables in
      the model graph.
    """
    variables_to_restore = {}
    for variable in tf.all_variables():
      if variable.op.name.startswith(self._extract_features_scope):
        var_name = variable.op.name
        if not from_detection_checkpoint:
          var_name = (re.split('^' + self._extract_features_scope + '/',
                               var_name)[-1])
        variables_to_restore[var_name] = variable
    return variables_to_restore 
Example #3
Source File: session_manager_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testPrepareSessionWithReadyNotReadyForLocal(self):
    with tf.Graph().as_default():
      v = tf.Variable(1, name="v")
      w = tf.Variable(
          v,
          trainable=False,
          collections=[tf.GraphKeys.LOCAL_VARIABLES],
          name="w")
      with self.test_session():
        self.assertEqual(False, tf.is_variable_initialized(v).eval())
        self.assertEqual(False, tf.is_variable_initialized(w).eval())
      sm2 = tf.train.SessionManager(
          ready_op=tf.report_uninitialized_variables(),
          ready_for_local_init_op=tf.report_uninitialized_variables(
              tf.all_variables()),
          local_init_op=w.initializer)
      with self.assertRaisesRegexp(
          RuntimeError,
          "Init operations did not make model ready for local_init"):
        sm2.prepare_session("", init_op=None) 
Example #4
Source File: rnn_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
    # REMARKS: factory(scope) is a function accepting a scope
    #          as an argument, such scope can be None, a string
    #          or a VariableScope instance.
    with self.test_session(use_gpu=True, graph=tf.Graph()):
      if use_outer_scope:
        with tf.variable_scope(prefix) as scope:
          factory(scope)
      else:
        factory(prefix)

      # check that all the variables names starts with the proper scope.
      tf.global_variables_initializer()
      all_vars = tf.all_variables()
      prefix = prefix or "StackRNN"
      scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
      tf.logging.info("StackRNN with scope: %s (%s)"
                      % (prefix, "scope" if use_outer_scope else "str"))
      for v in scope_vars:
        tf.logging.info(v.name)
      self.assertEqual(len(scope_vars), len(all_vars)) 
Example #5
Source File: optimizers_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testAdaptiveGradientClip(self):
    with self.test_session() as session:
      x, var, loss, global_step = _setup_model()
      clip_gradients = tf.contrib.layers.adaptive_clipping_fn()
      train = tf.contrib.layers.optimize_loss(loss,
                                              global_step,
                                              learning_rate=0.1,
                                              optimizer="SGD",
                                              clip_gradients=clip_gradients)
      tf.global_variables_initializer().run()
      session.run(train, feed_dict={x: 5})
      var_value, global_step_value = session.run([var, global_step])
      self.assertAlmostEqual(var_value, 9.8916, 4)
      self.assertEqual(global_step_value, 1)
      var_count = 0
      for var in tf.all_variables():
        if var.name.startswith("OptimizeLoss/AdaptiveMaxNorm"):
          var_count += 1
      self.assertEqual(2, var_count) 
Example #6
Source File: supervisor_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testLocalInitOpForNonChief(self):
    logdir = _test_dir("default_local_init_op_non_chief")
    with tf.Graph().as_default():
      with tf.device("/job:localhost"):
        # A local variable.
        v = tf.Variable([1.0, 2.0, 3.0],
                        trainable=False,
                        collections=[tf.GraphKeys.LOCAL_VARIABLES])
        # This shouldn't add a variable to the VARIABLES collection responsible
        # for variables that are saved/restored from checkpoints.
        self.assertEquals(len(tf.all_variables()), 0)

      # Suppress normal variable inits to make sure the local one is
      # initialized via local_init_op.
      sv = tf.train.Supervisor(logdir=logdir, init_op=None, is_chief=False)
      sess = sv.prepare_or_wait_for_session("")
      self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
      sv.stop() 
Example #7
Source File: supervisor_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testLocalInitOp(self):
    logdir = _test_dir("default_local_init_op")
    with tf.Graph().as_default():
      # A local variable.
      v = tf.Variable([1.0, 2.0, 3.0],
                      trainable=False,
                      collections=[tf.GraphKeys.LOCAL_VARIABLES])

      # An entity which is initialized through a TABLE_INITIALIZER.
      w = tf.Variable([4, 5, 6], trainable=False, collections=[])
      tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, w.initializer)

      # This shouldn't add a variable to the VARIABLES collection responsible
      # for variables that are saved/restored from checkpoints.
      self.assertEquals(len(tf.all_variables()), 0)

      # Suppress normal variable inits to make sure the local one is
      # initialized via local_init_op.
      sv = tf.train.Supervisor(logdir=logdir, init_op=None)
      sess = sv.prepare_or_wait_for_session("")
      self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
      self.assertAllClose([4, 5, 6], sess.run(w))
      sv.stop() 
Example #8
Source File: coldStart.py    From neural-el with Apache License 2.0 6 votes vote down vote up
def typeAndWikiDescBasedColdEmbExp(self, ckptName="FigerModel-20001"):
        ''' Train cold embeddings using wiki desc loss
        '''
        saver = tf.train.Saver(var_list=tf.all_variables())

        print("Loading Model ... ")
        if ckptName == None:
            print("Given CKPT Name")
            sys.exit()
        else:
            load_status = self.fm.loadSpecificCKPT(
              saver=saver, checkpoint_dir=self.fm.checkpoint_dir,
              ckptName=ckptName, attrs=self.fm._attrs)
        if not load_status:
            print("No model to load. Exiting")
            sys.exit(0)

        self._makeDescLossGraph()
        self.fm.sess.run(tf.initialize_variables(self.allcoldvars))
        self._trainColdEmbFromTypesAndDesc(epochsToTrain=5)

        self.runEval()

    # EVALUATION FOR COLD START WHEN INITIALIZING COLD EMB FROM WIKI DESC ENCODING 
Example #9
Source File: rnn_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
    with self.test_session(use_gpu=True, graph=tf.Graph()):
      if use_outer_scope:
        with tf.variable_scope(prefix) as scope:
          factory(scope)
      else:
        factory(prefix)
        tf.global_variables_initializer()

      # check that all the variables names starts
      # with the proper scope.
      all_vars = tf.all_variables()
      prefix = prefix or "RNN"
      scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
      tf.logging.info("RNN with scope: %s (%s)"
                      % (prefix, "scope" if use_outer_scope else "str"))
      for v in scope_vars:
        tf.logging.info(v.name)
      self.assertEqual(len(scope_vars), len(all_vars)) 
Example #10
Source File: stochastic_variables_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testStochasticVariablesWithConstantInitializer(self):
    shape = (10, 20)
    with tf.variable_scope(
        "stochastic_variables",
        custom_getter=sv.make_stochastic_variable_getter(
            dist_cls=dist.NormalWithSoftplusSigma,
            dist_kwargs={"validate_args": True},
            param_initializers={
                "mu": np.ones(shape) * 4.,
                "sigma": np.ones(shape) * 2.
            })):
      v = tf.get_variable("sv")

    for var in tf.all_variables():
      if "mu" in var.name:
        mu_var = var
      if "sigma" in var.name:
        sigma_var = var

    v = tf.convert_to_tensor(v)
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      self.assertAllEqual(np.ones(shape) * 4., sess.run(mu_var))
      self.assertAllEqual(np.ones(shape) * 2., sess.run(sigma_var))
      self.assertEqual(shape, sess.run(v).shape) 
Example #11
Source File: rnn_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
    with self.test_session(use_gpu=True, graph=tf.Graph()):
      if use_outer_scope:
        with tf.variable_scope(prefix) as scope:
          factory(scope)
      else:
        factory(prefix)
        tf.global_variables_initializer()

      # check that all the variables names starts
      # with the proper scope.
      all_vars = tf.all_variables()
      prefix = prefix or "RNN"
      scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
      tf.logging.info("RNN with scope: %s (%s)"
                      % (prefix, "scope" if use_outer_scope else "str"))
      for v in scope_vars:
        tf.logging.info(v.name)
      self.assertEqual(len(scope_vars), len(all_vars)) 
Example #12
Source File: run_summarization.py    From MAX-Text-Summarizer with Apache License 2.0 6 votes vote down vote up
def restore_best_model():
    """Load bestmodel file from eval directory, add variables for adagrad, and save to train directory"""
    tf.logging.info("Restoring best model for training...")

    # Initialize all vars in the model
    sess = tf.Session(config=util.get_config())
    print("Initializing all variables...")
    sess.run(tf.initialize_all_variables())

    # Restore the best model from eval dir
    saver = tf.train.Saver([v for v in tf.all_variables() if "Adagrad" not in v.name])
    print("Restoring all non-adagrad variables from best model in eval dir...")
    curr_ckpt = util.load_ckpt(saver, sess, "eval")
    print("Restored %s." % curr_ckpt)

    # Save this model to train dir and quit
    new_model_name = curr_ckpt.split("/")[-1].replace("bestmodel", "model")
    new_fname = os.path.join(FLAGS.log_root, "train", new_model_name)
    print("Saving model to %s..." % new_fname)
    new_saver = tf.train.Saver()  # this saver saves all variables that now exist, including Adagrad variables
    new_saver.save(sess, new_fname)
    print("Saved.")
    exit() 
Example #13
Source File: ssd_meta_arch.py    From Hands-On-Machine-Learning-with-OpenCV-4 with MIT License 6 votes vote down vote up
def restore_map(self, from_detection_checkpoint=True):
    """Returns a map of variables to load from a foreign checkpoint.

    See parent class for details.

    Args:
      from_detection_checkpoint: whether to restore from a full detection
        checkpoint (with compatible variable names) or to restore from a
        classification checkpoint for initialization prior to training.

    Returns:
      A dict mapping variable names (to load from a checkpoint) to variables in
      the model graph.
    """
    variables_to_restore = {}
    for variable in tf.all_variables():
      if variable.op.name.startswith(self._extract_features_scope):
        var_name = variable.op.name
        if not from_detection_checkpoint:
          var_name = (re.split('^' + self._extract_features_scope + '/',
                               var_name)[-1])
        variables_to_restore[var_name] = variable
    return variables_to_restore 
Example #14
Source File: rnn_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
    # REMARKS: factory(scope) is a function accepting a scope
    #          as an argument, such scope can be None, a string
    #          or a VariableScope instance.
    with self.test_session(use_gpu=True, graph=tf.Graph()):
      if use_outer_scope:
        with tf.variable_scope(prefix) as scope:
          factory(scope)
      else:
        factory(prefix)

      # check that all the variables names starts
      # with the proper scope.
      tf.global_variables_initializer()
      all_vars = tf.all_variables()
      prefix = prefix or "BiRNN"
      scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
      tf.logging.info("BiRNN with scope: %s (%s)"
                      % (prefix, "scope" if use_outer_scope else "str"))
      for v in scope_vars:
        tf.logging.info(v.name)
      self.assertEqual(len(scope_vars), len(all_vars)) 
Example #15
Source File: ssd_meta_arch.py    From tensorflow with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def restore_map(self, from_detection_checkpoint=True):
    """Returns a map of variables to load from a foreign checkpoint.

    See parent class for details.

    Args:
      from_detection_checkpoint: whether to restore from a full detection
        checkpoint (with compatible variable names) or to restore from a
        classification checkpoint for initialization prior to training.

    Returns:
      A dict mapping variable names (to load from a checkpoint) to variables in
      the model graph.
    """
    variables_to_restore = {}
    for variable in tf.all_variables():
      if variable.op.name.startswith(self._extract_features_scope):
        var_name = variable.op.name
        if not from_detection_checkpoint:
          var_name = (re.split('^' + self._extract_features_scope + '/',
                               var_name)[-1])
        variables_to_restore[var_name] = variable
    return variables_to_restore 
Example #16
Source File: rnn_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
    with self.test_session(use_gpu=True, graph=tf.Graph()):
      if use_outer_scope:
        with tf.variable_scope(prefix) as scope:
          factory(scope)
      else:
        factory(prefix)
        tf.global_variables_initializer()

      # check that all the variables names starts
      # with the proper scope.
      all_vars = tf.all_variables()
      prefix = prefix or "RNN"
      scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
      tf.logging.info("RNN with scope: %s (%s)"
                      % (prefix, "scope" if use_outer_scope else "str"))
      for v in scope_vars:
        tf.logging.info(v.name)
      self.assertEqual(len(scope_vars), len(all_vars)) 
Example #17
Source File: run_summarization.py    From RLSeq2Seq with MIT License 6 votes vote down vote up
def restore_best_model(self):
    """Load bestmodel file from eval directory, add variables for adagrad, and save to train directory"""
    tf.logging.info("Restoring bestmodel for training...")

    # Initialize all vars in the model
    sess = tf.Session(config=util.get_config())
    print("Initializing all variables...")
    sess.run(tf.initialize_all_variables())

    # Restore the best model from eval dir
    saver = tf.train.Saver([v for v in tf.all_variables() if "Adagrad" not in v.name])
    print("Restoring all non-adagrad variables from best model in eval dir...")
    curr_ckpt = util.load_ckpt(saver, sess, "eval")
    print("Restored %s." % curr_ckpt)

    # Save this model to train dir and quit
    new_model_name = curr_ckpt.split("/")[-1].replace("bestmodel", "model")
    new_fname = os.path.join(FLAGS.log_root, "train", new_model_name)
    print("Saving model to %s..." % (new_fname))
    new_saver = tf.train.Saver() # this saver saves all variables that now exist, including Adagrad variables
    new_saver.save(sess, new_fname)
    print("Saved.")
    exit() 
Example #18
Source File: trainer.py    From StackGAN with MIT License 6 votes vote down vote up
def build_model(self, sess):
        self.init_opt()

        sess.run(tf.initialize_all_variables())
        if len(self.model_path) > 0:
            print("Reading model parameters from %s" % self.model_path)
            all_vars = tf.trainable_variables()
            # all_vars = tf.all_variables()
            restore_vars = []
            for var in all_vars:
                if var.name.startswith('g_') or var.name.startswith('d_'):
                    restore_vars.append(var)
                    # print(var.name)
            saver = tf.train.Saver(restore_vars)
            saver.restore(sess, self.model_path)

            istart = self.model_path.rfind('_') + 1
            iend = self.model_path.rfind('.')
            counter = self.model_path[istart:iend]
            counter = int(counter)
        else:
            print("Created model with fresh parameters.")
            counter = 0
        return counter 
Example #19
Source File: utils.py    From variational-continual-learning with Apache License 2.0 6 votes vote down vote up
def load_params(sess, filename, checkpoint, init_all = True):
    params = tf.trainable_variables()
    filename = filename + '_' + str(checkpoint)
    f = open(filename + '.pkl', 'r')
    param_dict = cPickle.load(f)
    print 'param loaded', len(param_dict)
    f.close()
    ops = []
    for v in params:
        if v.name in param_dict.keys():
            ops.append(tf.assign(v, param_dict[v.name]))
    sess.run(ops)
    # init uninitialised params
    if init_all:
        all_var = tf.all_variables()
        var = [v for v in all_var if v not in params]
        sess.run(tf.initialize_variables(var))
    print 'loaded parameters from ' + filename + '.pkl' 
Example #20
Source File: trainer.py    From StackGAN with MIT License 6 votes vote down vote up
def build_model(self, sess):
        self.init_opt()
        sess.run(tf.initialize_all_variables())

        if len(self.model_path) > 0:
            print("Reading model parameters from %s" % self.model_path)
            restore_vars = tf.all_variables()
            # all_vars = tf.all_variables()
            # restore_vars = [var for var in all_vars if
            #                 var.name.startswith('g_') or
            #                 var.name.startswith('d_')]
            saver = tf.train.Saver(restore_vars)
            saver.restore(sess, self.model_path)

            istart = self.model_path.rfind('_') + 1
            iend = self.model_path.rfind('.')
            counter = self.model_path[istart:iend]
            counter = int(counter)
        else:
            print("Created model with fresh parameters.")
            counter = 0
        return counter 
Example #21
Source File: birds_skip_thought_demo.py    From StackGAN with MIT License 6 votes vote down vote up
def build_model(sess, embedding_dim, batch_size):
    model = CondGAN(
        lr_imsize=cfg.TEST.LR_IMSIZE,
        hr_lr_ratio=int(cfg.TEST.HR_IMSIZE/cfg.TEST.LR_IMSIZE))

    embeddings = tf.placeholder(
        tf.float32, [batch_size, embedding_dim],
        name='conditional_embeddings')
    with pt.defaults_scope(phase=pt.Phase.test):
        with tf.variable_scope("g_net"):
            c = sample_encoded_context(embeddings, model)
            z = tf.random_normal([batch_size, cfg.Z_DIM])
            fake_images = model.get_generator(tf.concat(1, [c, z]))
        with tf.variable_scope("hr_g_net"):
            hr_c = sample_encoded_context(embeddings, model)
            hr_fake_images = model.hr_get_generator(fake_images, hr_c)

    ckt_path = cfg.TEST.PRETRAINED_MODEL
    if ckt_path.find('.ckpt') != -1:
        print("Reading model parameters from %s" % ckt_path)
        saver = tf.train.Saver(tf.all_variables())
        saver.restore(sess, ckt_path)
    else:
        print("Input a valid model path.")
    return embeddings, fake_images, hr_fake_images 
Example #22
Source File: demo.py    From StackGAN with MIT License 6 votes vote down vote up
def build_model(sess, embedding_dim, batch_size):
    model = CondGAN(
        lr_imsize=cfg.TEST.LR_IMSIZE,
        hr_lr_ratio=int(cfg.TEST.HR_IMSIZE/cfg.TEST.LR_IMSIZE))

    embeddings = tf.placeholder(
        tf.float32, [batch_size, embedding_dim],
        name='conditional_embeddings')
    with pt.defaults_scope(phase=pt.Phase.test):
        with tf.variable_scope("g_net"):
            c = sample_encoded_context(embeddings, model)
            z = tf.random_normal([batch_size, cfg.Z_DIM])
            fake_images = model.get_generator(tf.concat(1, [c, z]))
        with tf.variable_scope("hr_g_net"):
            hr_c = sample_encoded_context(embeddings, model)
            hr_fake_images = model.hr_get_generator(fake_images, hr_c)

    ckt_path = cfg.TEST.PRETRAINED_MODEL
    if ckt_path.find('.ckpt') != -1:
        print("Reading model parameters from %s" % ckt_path)
        saver = tf.train.Saver(tf.all_variables())
        saver.restore(sess, ckt_path)
    else:
        print("Input a valid model path.")
    return embeddings, fake_images, hr_fake_images 
Example #23
Source File: rnn_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
    with self.test_session(use_gpu=True, graph=tf.Graph()):
      if use_outer_scope:
        with tf.variable_scope(prefix) as scope:
          factory(scope)
      else:
        factory(prefix)

      # check that all the variables names starts
      # with the proper scope.
      tf.global_variables_initializer()
      all_vars = tf.all_variables()
      prefix = prefix or "RNN"
      scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
      tf.logging.info("RNN with scope: %s (%s)"
                      % (prefix, "scope" if use_outer_scope else "str"))
      for v in scope_vars:
        tf.logging.info(v.name)
      self.assertEqual(len(scope_vars), len(all_vars)) 
Example #24
Source File: run_summarization.py    From TransferRL with MIT License 6 votes vote down vote up
def restore_best_model(self):
    """Load bestmodel file from eval directory, add variables for adagrad, and save to train directory"""
    tf.logging.info("Restoring bestmodel for training...")

    # Initialize all vars in the model
    sess = tf.Session(config=util.get_config())
    print("Initializing all variables...")
    sess.run(tf.initialize_all_variables())

    # Restore the best model from eval dir
    saver = tf.train.Saver([v for v in tf.all_variables() if "Adagrad" not in v.name])
    print("Restoring all non-adagrad variables from best model in eval dir...")
    curr_ckpt = util.load_ckpt(saver, sess, "eval")
    print("Restored %s." % curr_ckpt)

    # Save this model to train dir and quit
    new_model_name = curr_ckpt.split("/")[-1].replace("bestmodel", "model")
    new_fname = os.path.join(FLAGS.log_root, "train", new_model_name)
    print("Saving model to %s..." % (new_fname))
    new_saver = tf.train.Saver() # this saver saves all variables that now exist, including Adagrad variables
    new_saver.save(sess, new_fname)
    print("Saved.")
    exit() 
Example #25
Source File: utils.py    From variational-continual-learning with Apache License 2.0 5 votes vote down vote up
def init_variables(sess, old_var_list = set([])):
    all_var_list = set(tf.all_variables())
    init = tf.initialize_variables(var_list = all_var_list - old_var_list)
    sess.run(init)
    return all_var_list 
Example #26
Source File: param.py    From VDAIC2017 with MIT License 5 votes vote down vote up
def setup_graph(self):
        all_vars = tf.all_variables()
        for v in all_vars:
            if v.name == self.var_name:
                self.var = v
                break
        else:
            raise ValueError("{} is not a VARIABLE in the graph!".format(self.var_name))

        self.val_holder = tf.placeholder(tf.float32, shape=self.shape,
                                         name=self._readable_name + '_feed')
        self.assign_op = self.var.assign(self.val_holder) 
Example #27
Source File: model_utils.py    From embedding with MIT License 5 votes vote down vote up
def avg_checkpoints(model_dir, output_model_dir, last_k):
  tf.reset_default_graph()

  checkpoint_state = tf.train.get_checkpoint_state(model_dir)
  checkpoints = checkpoint_state.all_model_checkpoint_paths[- last_k:]
  var_list = tf.contrib.framework.list_variables(checkpoints[0])
  var_values, var_dtypes = {}, {}
  for (name, shape) in var_list:
    if not name.startswith("global_step"):
      var_values[name] = np.zeros(shape)
  for checkpoint in checkpoints:
    reader = tf.contrib.framework.load_checkpoint(checkpoint)
    for name in var_values:
      tensor = reader.get_tensor(name)
      var_dtypes[name] = tensor.dtype
      var_values[name] += tensor
    tf.logging.info("Read from checkpoint %s", checkpoint)
  for name in var_values:  # Average.
    var_values[name] /= len(checkpoints)

  with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
    tf_vars = [
        tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[v])
        for v in var_values
    ]
  placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars]
  assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)]
  global_step = tf.Variable(
      0, name="global_step", trainable=False, dtype=tf.int64)
  saver = tf.train.Saver(tf.all_variables())

  # Build a model consisting only of variables, set them to the average values.
  with tf.Session() as sess:
    sess.run(tf.initialize_all_variables())
    for p, assign_op, (name, value) in zip(placeholders, assign_ops,
                                           six.iteritems(var_values)):
      sess.run(assign_op, {p: value})
    # Use the built saver to save the averaged checkpoint.
    saver.save(sess, join(output_model_dir, "model.ckpt"),
        global_step=global_step) 
Example #28
Source File: session_manager_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testInitWithNoneLocalInitOpError(self):
    # Creating a SessionManager with a None local_init_op but
    # non-None ready_for_local_init_op raises ValueError
    with self.assertRaisesRegexp(ValueError,
                                 "If you pass a ready_for_local_init_op "
                                 "you must also pass a local_init_op "):
      tf.train.SessionManager(
          ready_for_local_init_op=tf.report_uninitialized_variables(
              tf.all_variables()),
          local_init_op=None) 
Example #29
Source File: EncoderNet.py    From DeepSim with MIT License 5 votes vote down vote up
def __init__(self, trainable=True):
        self.original_image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='original_image')
        self.image = subtract_mean(crop(self.original_image, cfg.RESIZED_SIZE, cfg.IMAGE_SIZE))  # crop to fixed size and subtract
        self.classes = tf.placeholder(tf.float32, shape=[None, cfg.N_CLASSES])

        h = conv(self.image, 3, 3, 64, 1, 1, name='conv1_1', trainable=False)
        h = conv(h, 3, 3, 64, 1, 1, name='conv1_2', trainable=False)
        h = max_pool(h, 2, 2, 2, 2, pad='VALID', name='pool1')
        h = conv(h, 3, 3, 128, 1, 1, name='conv2_1', trainable=False)
        h = conv(h, 3, 3, 128, 1, 1, name='conv2_2', trainable=False)
        h = max_pool(h, 2, 2, 2, 2, pad='VALID', name='pool2')
        h = conv(h, 3, 3, 256, 1, 1, name='conv3_1', trainable=trainable)
        h = conv(h, 3, 3, 256, 1, 1, name='conv3_2', trainable=trainable)
        h = conv(h, 3, 3, 256, 1, 1, name='conv3_3', trainable=trainable)
        h = max_pool(h, 2, 2, 2, 2, pad='VALID', name='pool3')
        h = conv(h, 3, 3, 512, 1, 1, name='conv4_1', trainable=trainable)
        h = conv(h, 3, 3, 512, 1, 1, name='conv4_2', trainable=trainable)
        h = conv(h, 3, 3, 512, 1, 1, name='conv4_3', trainable=trainable)
        h = max_pool(h, 2, 2, 2, 2, pad='VALID', name='pool4')
        h = conv(h, 3, 3, 512, 1, 1, name='conv5_1', trainable = trainable)
        h = conv(h, 3, 3, 512, 1, 1, name='conv5_2', trainable = trainable)
        h = conv(h, 3, 3, 512, 1, 1, name='conv5_3', trainable = trainable) # 14x14x512
        h = max_pool(h, 2, 2, 2, 2, pad='VALID', name='pool5') # 7x7x512
        h = tf.reshape(h, [-1, 7*7*512], name='reshape_pool5')
        if trainable:
            h = fc(h, 4096, name='fc6', trainable=trainable)
            h = tf.nn.dropout(h, 0.5, name='drop6')
            h = fc(h, 4096, name='fc7', trainable=trainable)
            h = tf.nn.dropout(h, 0.5, name='drop7')
        else:
            h = fc(h, 4096, name='fc6', trainable=trainable)
            h = fc(h, 4096, name='fc7', trainable=trainable)
        self.outputs = fc(h, cfg.N_CLASSES, activation='', name='cls_score', trainable=trainable)

        # Classification loss.
        self.cls_loss = tf.losses.sigmoid_cross_entropy(self.classes, self.outputs)
        self.outputs = tf.nn.sigmoid(self.outputs)

        # Variable collector.
        self.restore_variables = [var for var in tf.all_variables() if not var.name.startswith('cls_score')]
        self.trainable_variables = tf.trainable_variables() 
Example #30
Source File: model.py    From rl-attack-detection with MIT License 5 votes vote down vote up
def restore(self, sess, ckpt, var_scope=None):
        # sess: tf session
        # ckpt: ckpt path (str)
        if var_scope != None:
            all_vars = tf.all_variables()
            g_vars = [k for k in all_vars if k.name.startswith(var_scope)]
            saver = tf.train.Saver({v.op.name[2:]: v for v in g_vars})
        else:
            saver = tf.train.Saver()

        saver.restore(sess, ckpt)