Python tensorflow.get_default_session() Examples

The following are 30 code examples of tensorflow.get_default_session(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: facenet.py    From TNT with GNU General Public License v3.0 6 votes vote down vote up
def load_model(model, input_map=None):
    # Check if the model is a model directory (containing a metagraph and a checkpoint file)
    #  or if it is a protobuf file with a frozen graph
    model_exp = os.path.expanduser(model)
    if (os.path.isfile(model_exp)):
        print('Model filename: %s' % model_exp)
        with gfile.FastGFile(model_exp,'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, input_map=input_map, name='')
    else:
        print('Model directory: %s' % model_exp)
        meta_file, ckpt_file = get_model_filenames(model_exp)
        
        print('Metagraph file: %s' % meta_file)
        print('Checkpoint file: %s' % ckpt_file)
      
        saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file), input_map=input_map)
        saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file)) 
Example #2
Source File: pretrained.py    From tensornets with MIT License 6 votes vote down vote up
def assign(scopes):
    if not isinstance(scopes, list):
        scopes = [scopes]
    for scope in scopes:
        model_name = parse_scopes(scope)[0]
        try:
            __load_dict__[model_name](scope)
        except KeyError:
            try:
                tf.get_default_session().run(scope.pretrained())
            except:
                found = False
                for (key, fun) in __load_dict__.items():
                    if key in model_name.lower():
                        found = True
                        fun(scope)
                        break
                if not found:
                    warnings.warn('Random initialization will be performed '
                                  'because the pre-trained weights for ' +
                                  model_name + ' are not found.')
                    init(scope) 
Example #3
Source File: facenet.py    From TNT with GNU General Public License v3.0 6 votes vote down vote up
def load_model(model, input_map=None):
    # Check if the model is a model directory (containing a metagraph and a checkpoint file)
    #  or if it is a protobuf file with a frozen graph
    model_exp = os.path.expanduser(model)
    if (os.path.isfile(model_exp)):
        print('Model filename: %s' % model_exp)
        with gfile.FastGFile(model_exp,'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, input_map=input_map, name='')
    else:
        print('Model directory: %s' % model_exp)
        meta_file, ckpt_file = get_model_filenames(model_exp)
        
        print('Metagraph file: %s' % meta_file)
        print('Checkpoint file: %s' % ckpt_file)
      
        saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file), input_map=input_map)
        saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file)) 
Example #4
Source File: facenet.py    From TNT with GNU General Public License v3.0 6 votes vote down vote up
def load_model(model, input_map=None):
    # Check if the model is a model directory (containing a metagraph and a checkpoint file)
    #  or if it is a protobuf file with a frozen graph
    model_exp = os.path.expanduser(model)
    if (os.path.isfile(model_exp)):
        print('Model filename: %s' % model_exp)
        with gfile.FastGFile(model_exp,'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, input_map=input_map, name='')
    else:
        print('Model directory: %s' % model_exp)
        meta_file, ckpt_file = get_model_filenames(model_exp)
        
        print('Metagraph file: %s' % meta_file)
        print('Checkpoint file: %s' % ckpt_file)
      
        saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file), input_map=input_map)
        saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file)) 
Example #5
Source File: tf_util.py    From stable-baselines with MIT License 6 votes vote down vote up
def __call__(self, *args, sess=None, **kwargs):
        assert len(args) <= len(self.inputs), "Too many arguments provided"
        if sess is None:
            sess = tf.get_default_session()
        feed_dict = {}
        # Update the args
        for inpt, value in zip(self.inputs, args):
            self._feed_input(feed_dict, inpt, value)
        # Update feed dict with givens.
        for inpt in self.givens:
            feed_dict[inpt] = feed_dict.get(inpt, self.givens[inpt])
        results = sess.run(self.outputs_update, feed_dict=feed_dict, **kwargs)[:-1]
        return results


# ================================================================
# Flat vectors
# ================================================================ 
Example #6
Source File: tf_util.py    From stable-baselines with MIT License 6 votes vote down vote up
def initialize(sess=None):
    """
    Initialize all the uninitialized variables in the global scope.

    :param sess: (TensorFlow Session)
    """
    if sess is None:
        sess = tf.get_default_session()
    new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
    sess.run(tf.variables_initializer(new_variables))
    ALREADY_INITIALIZED.update(new_variables)


# ================================================================
# Theano-like Function
# ================================================================ 
Example #7
Source File: adversary.py    From stable-baselines with MIT License 6 votes vote down vote up
def get_reward(self, obs, actions):
        """
        Predict the reward using the observation and action

        :param obs: (tf.Tensor or np.ndarray) the observation
        :param actions: (tf.Tensor or np.ndarray) the action
        :return: (np.ndarray) the reward
        """
        sess = tf.get_default_session()
        if len(obs.shape) == 1:
            obs = np.expand_dims(obs, 0)
        if len(actions.shape) == 1:
            actions = np.expand_dims(actions, 0)
        elif len(actions.shape) == 0:
            # one discrete action
            actions = np.expand_dims(actions, 0)

        feed_dict = {self.generator_obs_ph: obs, self.generator_acs_ph: actions}
        reward = sess.run(self.reward_op, feed_dict)
        return reward 
Example #8
Source File: mpi_adam.py    From rl_graph_generation with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_MpiAdam():
    np.random.seed(0)
    tf.set_random_seed(0)

    a = tf.Variable(np.random.randn(3).astype('float32'))
    b = tf.Variable(np.random.randn(2,5).astype('float32'))
    loss = tf.reduce_sum(tf.square(a)) + tf.reduce_sum(tf.sin(b))

    stepsize = 1e-2
    update_op = tf.train.AdamOptimizer(stepsize).minimize(loss)
    do_update = U.function([], loss, updates=[update_op])

    tf.get_default_session().run(tf.global_variables_initializer())
    for i in range(10):
        print(i,do_update())

    tf.set_random_seed(0)
    tf.get_default_session().run(tf.global_variables_initializer())

    var_list = [a,b]
    lossandgrad = U.function([], [loss, U.flatgrad(loss, var_list)], updates=[update_op])
    adam = MpiAdam(var_list)

    for i in range(10):
        l,g = lossandgrad()
        adam.update(g, stepsize)
        print(i,l) 
Example #9
Source File: adversary.py    From rl_graph_generation with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_reward(self, obs, acs):
        sess = tf.get_default_session()
        if len(obs.shape) == 1:
            obs = np.expand_dims(obs, 0)
        if len(acs.shape) == 1:
            acs = np.expand_dims(acs, 0)
        feed_dict = {self.generator_obs_ph: obs, self.generator_acs_ph: acs}
        reward = sess.run(self.reward_op, feed_dict)
        return reward 
Example #10
Source File: test_distri.py    From stable-baselines with MIT License 5 votes vote down vote up
def validate_probtype(probtype, pdparam):
    """
    validate probability distribution types

    :param probtype: (ProbabilityDistributionType) the type to validate
    :param pdparam: ([float]) the flat probabilities to test
    """
    number_samples = 100000
    # Check to see if mean negative log likelihood == differential entropy
    mval = np.repeat(pdparam[None, :], number_samples, axis=0)
    mval_ph = probtype.param_placeholder([number_samples])
    xval_ph = probtype.sample_placeholder([number_samples])
    proba_distribution = probtype.proba_distribution_from_flat(mval_ph)
    calcloglik = tf_util.function([xval_ph, mval_ph], proba_distribution.logp(xval_ph))
    calcent = tf_util.function([mval_ph], proba_distribution.entropy())
    xval = tf.get_default_session().run(proba_distribution.sample(), feed_dict={mval_ph: mval})
    logliks = calcloglik(xval, mval)
    entval_ll = - logliks.mean()
    entval_ll_stderr = logliks.std() / np.sqrt(number_samples)
    entval = calcent(mval).mean()
    assert np.abs(entval - entval_ll) < 3 * entval_ll_stderr  # within 3 sigmas

    # Check to see if kldiv[p,q] = - ent[p] - E_p[log q]
    mval2_ph = probtype.param_placeholder([number_samples])
    pd2 = probtype.proba_distribution_from_flat(mval2_ph)
    tmp = pdparam + np.random.randn(pdparam.size) * 0.1
    mval2 = np.repeat(tmp[None, :], number_samples, axis=0)
    calckl = tf_util.function([mval_ph, mval2_ph], proba_distribution.kl(pd2))
    klval = calckl(mval, mval2).mean()
    logliks = calcloglik(xval, mval2)
    klval_ll = - entval - logliks.mean()
    klval_ll_stderr = logliks.std() / np.sqrt(number_samples)
    assert np.abs(klval - klval_ll) < 3 * klval_ll_stderr  # within 3 sigmas
    print('ok on', probtype, pdparam) 
Example #11
Source File: tf_util.py    From stable-baselines with MIT License 5 votes vote down vote up
def __call__(self, theta):
        if self.sess is None:
            return tf.get_default_session().run(self.operation, feed_dict={self.theta: theta})
        else:
            return self.sess.run(self.operation, feed_dict={self.theta: theta}) 
Example #12
Source File: tf_util.py    From stable-baselines with MIT License 5 votes vote down vote up
def __call__(self):
        if self.sess is None:
            return tf.get_default_session().run(self.operation)
        else:
            return self.sess.run(self.operation)


# ================================================================
# retrieving variables
# ================================================================ 
Example #13
Source File: mpi_adam.py    From stable-baselines with MIT License 5 votes vote down vote up
def test_mpi_adam():
    """
    tests the MpiAdam object's functionality
    """
    np.random.seed(0)
    tf.set_random_seed(0)

    a_var = tf.Variable(np.random.randn(3).astype('float32'))
    b_var = tf.Variable(np.random.randn(2, 5).astype('float32'))
    loss = tf.reduce_sum(tf.square(a_var)) + tf.reduce_sum(tf.sin(b_var))

    learning_rate = 1e-2
    update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
    do_update = tf_utils.function([], loss, updates=[update_op])

    tf.get_default_session().run(tf.global_variables_initializer())
    for step in range(10):
        print(step, do_update())

    tf.set_random_seed(0)
    tf.get_default_session().run(tf.global_variables_initializer())

    var_list = [a_var, b_var]
    lossandgrad = tf_utils.function([], [loss, tf_utils.flatgrad(loss, var_list)], updates=[update_op])
    adam = MpiAdam(var_list)

    for step in range(10):
        loss, grad = lossandgrad()
        adam.update(grad, learning_rate)
        print(step, loss) 
Example #14
Source File: gpu_utils.py    From XLnet-gen with MIT License 5 votes vote down vote up
def load_from_checkpoint(saver, logdir):
    sess = tf.get_default_session()
    ckpt = tf.train.get_checkpoint_state(logdir)
    if ckpt and ckpt.model_checkpoint_path:
        if os.path.isabs(ckpt.model_checkpoint_path):
            # Restores from checkpoint with absolute path.
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            # Restores from checkpoint with relative path.
            saver.restore(sess, os.path.join(logdir, ckpt.model_checkpoint_path))
        return True
    return False 
Example #15
Source File: tfutil.py    From disentangling_conditional_gans with MIT License 5 votes vote down vote up
def run(*args, **kwargs): # Run the specified ops in the default session.
    return tf.get_default_session().run(*args, **kwargs) 
Example #16
Source File: bc.py    From imitation with MIT License 5 votes vote down vote up
def reconstruct_policy(
        policy_path: str, sess: Optional[tf.Session] = None,
    ) -> BasePolicy:
        """Reconstruct a saved policy.

        Args:
            policy_path: path a policy produced by `.save_policy()`.
            sess: optional session to construct policy under,
              if not the default session.

        Returns:
            policy: policy with reloaded weights.
        """
        if sess is None:
            sess = tf.get_default_session()
            assert sess is not None, "must supply session via kwarg or context mgr"

        # re-read data from dict
        with open(policy_path, "rb") as fp:
            loaded_pickle = cloudpickle.load(fp)

        # construct the policy class
        klass = loaded_pickle["class"]
        kwargs = loaded_pickle["kwargs"]
        with tf.variable_scope("reconstructed_policy"):
            rv_pol = klass(sess=sess, **kwargs)
            inner_scope = tf.get_variable_scope().name

        # set values for the new policy's parameters
        param_values = loaded_pickle["params"]
        set_tf_vars(values=param_values, scope=inner_scope, sess=sess)

        return rv_pol 
Example #17
Source File: freeze_graph.py    From tf_ctpn with MIT License 5 votes vote down vote up
def main(args):
    with tf.Graph().as_default():
        with tf.Session() as sess:
            # Load the model metagraph and checkpoint
            print('Model directory: %s' % args.ckpt_dir)
            meta_file, ckpt_file = get_model_filenames(args.ckpt_dir)

            print('Metagraph file: %s' % meta_file)
            print('Checkpoint file: %s' % ckpt_file)

            saver = tf.train.import_meta_graph(meta_file, clear_devices=True)
            tf.get_default_session().run(tf.global_variables_initializer())
            tf.get_default_session().run(tf.local_variables_initializer())
            saver.restore(sess, ckpt_file)

            input_graph_def = tf.get_default_graph().as_graph_def()

            for node in input_graph_def.node:
                if node.name == "vgg_16_1/rpn_bbox_pred/Conv2D":
                    node.name = "RPN/rpn_bbox_pred/Conv2D"

                if node.name == "vgg_16_1/rpn_cls_score_reshape":
                    node.name = "RPN/rpn_cls_score_reshape"

            output_node_names = ['RPN/rpn_bbox_pred/Conv2D', 'RPN/rpn_cls_prob_reshape']

            # We use a built-in TF helper to export variables to constants
            output_graph_def = tf.graph_util.convert_variables_to_constants(
                sess,  # The session is used to retrieve the weights
                input_graph_def,  # The graph_def is used to retrieve the nodes
                output_node_names  # The output node names are used to select the usefull nodes
            )

            # Serialize and dump the output graph to the filesystem
            with tf.gfile.GFile(args.output_file, 'wb') as f:
                f.write(output_graph_def.SerializeToString())
                pb_file_size = f.size() / 1024. / 1024.
            print("%d ops in the final graph: %s, size: %d mb" %
                  (len(output_graph_def.node), args.output_file, pb_file_size)) 
Example #18
Source File: utils.py    From rl_graph_generation with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def save_state(fname):
    os.makedirs(os.path.dirname(fname), exist_ok=True)
    saver = tf.train.Saver()
    saver.save(tf.get_default_session(), fname)

# ================================================================
# Placeholders
# ================================================================ 
Example #19
Source File: utils.py    From rl_graph_generation with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def load_state(fname):
    saver = tf.train.Saver()
    saver.restore(tf.get_default_session(), fname) 
Example #20
Source File: freeze_graph.py    From TNT with GNU General Public License v3.0 5 votes vote down vote up
def main(args):
    with tf.Graph().as_default():
        with tf.Session() as sess:
            # Load the model metagraph and checkpoint
            print('Model directory: %s' % args.model_dir)
            meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(args.model_dir))
            
            print('Metagraph file: %s' % meta_file)
            print('Checkpoint file: %s' % ckpt_file)

            model_dir_exp = os.path.expanduser(args.model_dir)
            saver = tf.train.import_meta_graph(os.path.join(model_dir_exp, meta_file), clear_devices=True)
            tf.get_default_session().run(tf.global_variables_initializer())
            tf.get_default_session().run(tf.local_variables_initializer())
            saver.restore(tf.get_default_session(), os.path.join(model_dir_exp, ckpt_file))
            
            # Retrieve the protobuf graph definition and fix the batch norm nodes
            input_graph_def = sess.graph.as_graph_def()
            
            # Freeze the graph def
            output_graph_def = freeze_graph_def(sess, input_graph_def, 'embeddings,label_batch')

        # Serialize and dump the output graph to the filesystem
        with tf.gfile.GFile(args.output_file, 'wb') as f:
            f.write(output_graph_def.SerializeToString())
        print("%d ops in the final graph: %s" % (len(output_graph_def.node), args.output_file)) 
Example #21
Source File: ops_test.py    From HyperGAN with MIT License 5 votes vote down vote up
def test_lookup_activations(self):
        x = tf.constant(-1.0, shape=[2, 2])
        with self.test_session():
            activations = ['relu','prelu','selu','crelu']
            for activation in activations:
                activation = ops.lookup(activation)(x)
                
                tf.get_default_session().run(tf.global_variables_initializer())
            
                self.assertNotEqual(x.eval()[0][0], activation.eval()[0][0]) 
Example #22
Source File: pose_dataset.py    From tf-pose with Apache License 2.0 5 votes vote down vote up
def start(self):
        self._sess = tf.get_default_session()
        super(DataFlowToQueue).start() 
Example #23
Source File: rewards.py    From lm-human-preferences with MIT License 5 votes vote down vote up
def set_reward_norm(self, *, old_mean, old_std, new_mean, new_std):
        """Given old_mean+-old_std of reward_model, change gain and bias to get N(new_mean,new_std)."""
        sess = tf.get_default_session()
        old_gain, old_bias = sess.run((self.reward_gain, self.reward_bias))
        assert old_gain == 1 and old_bias == 0,\
            f'set_reward_norm expects gain = 1 and bias = 0, not {old_gain}, {old_bias}'
        # gain * N(old_mean,old_std) + bias = N(gain * old_mean, gain * old_std) + bias
        #                                   = N(gain * old_mean + bias, gain * old_std)
        # gain * old_std = new_std, gain = new_std / old_std
        # gain * old_mean + bias = new_mean, bias = new_mean - gain * old_mean
        gain = new_std / old_std
        bias = new_mean - gain * old_mean
        sess.run(self._set_reward_norm, feed_dict={self._reward_gain_p: gain, self._reward_bias_p: bias}) 
Example #24
Source File: rewards.py    From lm-human-preferences with MIT License 5 votes vote down vote up
def reset_reward_scale(self):
        sess = tf.get_default_session()
        sess.run(self._set_reward_norm, feed_dict={self._reward_gain_p: 1, self._reward_bias_p: 0}) 
Example #25
Source File: core.py    From lm-human-preferences with MIT License 5 votes vote down vote up
def graph_function(**schemas: Schema):
    def decorate(make_op):
        def make_ph(path, schema):
            return tf.placeholder(name=f'arg_{make_op.__name__}_{path}', shape=schema.shape, dtype=schema.dtype)
        phs = nest.map_structure_with_paths(make_ph, schemas)
        op = make_op(**phs)
        sig = inspect.signature(make_op)
        @wraps(make_op)
        def run(*args, **kwargs):
            bound: inspect.BoundArguments = sig.bind(*args, **kwargs)
            bound.apply_defaults()

            arg_dict = bound.arguments
            for name, param in sig.parameters.items():
                if param.kind == inspect.Parameter.VAR_KEYWORD:
                    kwargs = arg_dict[name]
                    arg_dict.update(kwargs)
                    del arg_dict[name]
            flat_phs = nest.flatten(phs)
            flat_arguments = nest.flatten_up_to(phs, bound.arguments)
            feed = {ph: arg for ph, arg in zip(flat_phs, flat_arguments)}
            run_options = tf.RunOptions(report_tensor_allocations_upon_oom=True)

            return tf.get_default_session().run(op, feed_dict=feed, options=run_options, run_metadata=None)
        return run
    return decorate 
Example #26
Source File: train_policy.py    From lm-human-preferences with MIT License 5 votes vote down vote up
def step(self):
        step_started_at = time.time()

        queries = self.sample_queries()
        rollouts = self.policy.respond(queries, length=self.hparams.task.response_length)

        responses = rollouts['responses']
        logprobs = rollouts['logprobs']
        rollouts['queries'] = queries
        ref_logprobs = self.ref_policy.analyze_responses(queries, responses)['logprobs']
        scores, postprocessed_responses, score_stats = self.score_fn(queries, responses)

        rewards, non_score_reward, kl_coef = self.compute_rewards(
            scores=scores,
            logprobs=logprobs,
            ref_logprobs=ref_logprobs)
        rollouts['rewards'] = rewards

        train_stats = self.train(rollouts=rollouts)

        _, stats = self.record_step_stats(
            scores=scores, logprobs=logprobs, ref_logprobs=ref_logprobs, non_score_reward=non_score_reward,
            train_stats=train_stats, score_stats=score_stats, kl_coef=kl_coef)

        self.kl_ctl.update(stats['objective/kl'], self.hparams.ppo.batch_size)

        self.print_samples(queries=queries, responses=postprocessed_responses,
                           scores=scores, logprobs=logprobs, ref_logprobs=ref_logprobs)

        # Record profiles of the step times
        step = tf.get_default_session().run(tf.train.get_global_step())
        step_time = time.time() - step_started_at
        eps_per_second = float(self.hparams.ppo.batch_size) / step_time
        if self.comm.Get_rank() == 0:
            print(f"[ppo_step {step}] step_time={step_time:.2f}s, "
                  f"eps/s={eps_per_second:.2f}") 
Example #27
Source File: prediction.py    From aboleth with Apache License 2.0 5 votes vote down vote up
def sample_model(graph=None, sess=None, feed_dict=None):
    """
    Sample the model parameters.

    This function returns a feed_dict containing values for the sample tensors
    in the model. It means that multiple calls to eval() will not change the
    model parameters as long as the output of this function is used as a
    feed_dict.

    Parameters
    ----------
    graph : tf.Graph
        The current graph. If none provided use the default.
    sess : tf.Session
        The session to use for evaluating the tensors. If none provided
        will use the default.
    feed_dict : dict
        An optional feed_dict to pass the session.

    Returns
    -------
    collection : dict
        A feed_dict to use when evaluating the model.

    """
    if not graph:
        graph = tf.get_default_graph()
    if not sess:
        sess = tf.get_default_session()

    params = graph.get_collection('SampleTensors')
    param_values = sess.run(params, feed_dict=feed_dict)
    sample_feed_dict = dict(zip(params, param_values))
    return sample_feed_dict 
Example #28
Source File: pretrained.py    From tensornets with MIT License 5 votes vote down vote up
def _assign(scopes, values):
    sess = tf.get_default_session()
    assert sess is not None, 'The default session should be given.'

    scopes = parse_scopes(scopes)

    for scope in scopes:
        sess.run(pretrained_initializer(scope, values)) 
Example #29
Source File: utils.py    From tensornets with MIT License 5 votes vote down vote up
def init(scopes, sess):
    if sess is None:
        sess = tf.get_default_session()
        assert sess is not None, 'The default session should be given.'

    if not isinstance(scopes, list):
        scopes = [scopes]

    for scope in scopes:
        sess.run(tf.variables_initializer(get_weights(scope))) 
Example #30
Source File: utils.py    From tensornets with MIT License 5 votes vote down vote up
def save(model, weights_path, sess):
    if sess is None:
        sess = tf.get_default_session()
        assert sess is not None, 'The default session should be given.'

    weights = get_weights(model)
    names = [w.name for w in weights]
    values = sess.run(weights)
    np.savez(weights_path, names=names, values=values)