Python tensorflow.disable_v2_behavior() Examples
The following are 6
code examples of tensorflow.disable_v2_behavior().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: helpers.py From arviz with Apache License 2.0 | 6 votes |
def tfp_schools_model(num_schools, treatment_stddevs): """Non-centered eight schools model for tfp.""" import tensorflow_probability.python.edward2 as ed import tensorflow as tf if int(tf.__version__[0]) > 1: import tensorflow.compat.v1 as tf # pylint: disable=import-error tf.disable_v2_behavior() avg_effect = ed.Normal(loc=0.0, scale=10.0, name="avg_effect") # `mu` avg_stddev = ed.Normal(loc=5.0, scale=1.0, name="avg_stddev") # `log(tau)` school_effects_standard = ed.Normal( loc=tf.zeros(num_schools), scale=tf.ones(num_schools), name="school_effects_standard" ) # `eta` school_effects = avg_effect + tf.exp(avg_stddev) * school_effects_standard # `theta` treatment_effects = ed.Normal( loc=school_effects, scale=treatment_stddevs, name="treatment_effects" ) # `y` return treatment_effects
Example #2
Source File: test_data_tfp.py From arviz with Apache License 2.0 | 5 votes |
def get_inference_data3(self, data, eight_schools_params): """Read with observed Tensor var_names and dims.""" import tensorflow as tf if int(tf.__version__[0]) > 1: import tensorflow.compat.v1 as tf # pylint: disable=import-error tf.disable_v2_behavior() inference_data = from_tfp( data.obj, var_names=["mu", "tau", "eta"], model_fn=lambda: data.model( eight_schools_params["J"], eight_schools_params["sigma"].astype(np.float32) ), posterior_predictive_samples=100, posterior_predictive_size=3, observed=tf.convert_to_tensor( np.vstack( ( eight_schools_params["y"], eight_schools_params["y"], eight_schools_params["y"], ) ).astype(np.float32), np.float32, ), coords={"school": np.arange(eight_schools_params["J"])}, dims={"eta": ["school"], "obs": ["size_dim", "school"]}, ) return inference_data
Example #3
Source File: test_tensorboard_integration.py From jupyter_tensorboard with MIT License | 5 votes |
def tf_logs(tmpdir_factory): import numpy as np try: import tensorflow.compat.v1 as tf tf.disable_v2_behavior() except ImportError: import tensorflow as tf x = np.random.rand(5) y = 3 * x + 1 + 0.05 * np.random.rand(5) a = tf.Variable(0.1) b = tf.Variable(0.) err = a*x+b-y loss = tf.norm(err) tf.summary.scalar("loss", loss) tf.summary.scalar("a", a) tf.summary.scalar("b", b) merged = tf.summary.merge_all() optimizor = tf.train.GradientDescentOptimizer(0.01).minimize(loss) with tf.Session() as sess: log_dir = tmpdir_factory.mktemp("logs", numbered=False) log_dir = str(log_dir) train_write = tf.summary.FileWriter(log_dir, sess.graph) tf.global_variables_initializer().run() for i in range(1000): _, merged_ = sess.run([optimizor, merged]) train_write.add_summary(merged_, i) return log_dir
Example #4
Source File: io_tfp.py From arviz with Apache License 2.0 | 4 votes |
def __init__( self, *, posterior, var_names=None, model_fn=None, feed_dict=None, posterior_predictive_samples=100, posterior_predictive_size=1, chain_dim=None, observed=None, coords=None, dims=None ): self.posterior = posterior if var_names is None: self.var_names = [] for i in range(0, len(posterior)): self.var_names.append("var_{0}".format(i)) else: self.var_names = var_names self.model_fn = model_fn self.feed_dict = feed_dict self.posterior_predictive_samples = posterior_predictive_samples self.posterior_predictive_size = posterior_predictive_size self.observed = observed self.chain_dim = chain_dim self.coords = coords self.dims = dims import tensorflow_probability as tfp import tensorflow as tf import tensorflow_probability.python.edward2 as ed self.tfp = tfp self.tf = tf # pylint: disable=invalid-name self.ed = ed # pylint: disable=invalid-name if int(self.tf.__version__[0]) > 1: import tensorflow.compat.v1 as tf # pylint: disable=import-error tf.disable_v2_behavior() self.tf = tf # pylint: disable=invalid-name
Example #5
Source File: helpers.py From arviz with Apache License 2.0 | 4 votes |
def tfp_noncentered_schools(data, draws, chains): """Non-centered eight schools implementation for tfp.""" import tensorflow_probability as tfp import tensorflow_probability.python.edward2 as ed import tensorflow as tf if int(tf.__version__[0]) > 1: import tensorflow.compat.v1 as tf # pylint: disable=import-error tf.disable_v2_behavior() del chains log_joint = ed.make_log_joint_fn(tfp_schools_model) def target_log_prob_fn(avg_effect, avg_stddev, school_effects_standard): """Unnormalized target density as a function of states.""" return log_joint( num_schools=data["J"], treatment_stddevs=data["sigma"].astype(np.float32), avg_effect=avg_effect, avg_stddev=avg_stddev, school_effects_standard=school_effects_standard, treatment_effects=data["y"].astype(np.float32), ) states, kernel_results = tfp.mcmc.sample_chain( num_results=draws, num_burnin_steps=500, current_state=[ tf.zeros([], name="init_avg_effect"), tf.zeros([], name="init_avg_stddev"), tf.ones([data["J"]], name="init_school_effects_standard"), ], kernel=tfp.mcmc.HamiltonianMonteCarlo( target_log_prob_fn=target_log_prob_fn, step_size=0.4, num_leapfrog_steps=3 ), ) with tf.Session() as sess: [states_, _] = sess.run([states, kernel_results]) return tfp_schools_model, states_
Example #6
Source File: nn.py From acerta-abide with GNU General Public License v2.0 | 3 votes |
def run_nn(hdf5, experiment, code_size_1, code_size_2): # tf.disable_v2_behavior() exp_storage = hdf5["experiments"][experiment] for fold in exp_storage: experiment_cv = format_config("{experiment}_{fold}", { "experiment": experiment, "fold": fold, }) X_train, y_train, \ X_valid, y_valid, \ X_test, y_test = load_fold(hdf5["patients"], exp_storage, fold) ae1_model_path = format_config("./data/models/{experiment}_autoencoder-1.ckpt", { "experiment": experiment_cv, }) ae2_model_path = format_config("./data/models/{experiment}_autoencoder-2.ckpt", { "experiment": experiment_cv, }) nn_model_path = format_config("./data/models/{experiment}_mlp.ckpt", { "experiment": experiment_cv, }) reset() # Run first autoencoder run_autoencoder1(experiment_cv, X_train, y_train, X_valid, y_valid, X_test, y_test, model_path=ae1_model_path, code_size=code_size_1) reset() # Run second autoencoder run_autoencoder2(experiment_cv, X_train, y_train, X_valid, y_valid, X_test, y_test, model_path=ae2_model_path, prev_model_path=ae1_model_path, prev_code_size=code_size_1, code_size=code_size_2) reset() # Run multilayer NN with pre-trained autoencoders run_finetuning(experiment_cv, X_train, y_train, X_valid, y_valid, X_test, y_test, model_path=nn_model_path, prev_model_1_path=ae1_model_path, prev_model_2_path=ae2_model_path, code_size_1=code_size_1, code_size_2=code_size_2)