Python tensorflow.set_random_seed() Examples
The following are 30
code examples of tensorflow.set_random_seed().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: inputs_test.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def test_output_is_integer_in_replace_empty_string_with_random_number(self): string_placeholder = tf.placeholder(tf.string, shape=[]) replaced_string = inputs._replace_empty_string_with_random_number( string_placeholder) empty_string = '' feed_dict = {string_placeholder: empty_string} tf.set_random_seed(0) with self.test_session() as sess: out_string = sess.run(replaced_string, feed_dict=feed_dict) # Test whether out_string is a string which represents an integer. int(out_string) # throws an error if out_string is not castable to int. self.assertEqual(out_string, '2798129067578209328')
Example #2
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testCreateSingleclone(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(slim.get_variables()), 5) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(len(update_ops), 2) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) total_loss, grads_and_vars = model_deploy.optimize_clones(clones, optimizer) self.assertEqual(len(grads_and_vars), len(tf.trainable_variables())) self.assertEqual(total_loss.op.name, 'total_loss') for g, v in grads_and_vars: self.assertDeviceEqual(g.device, 'GPU:0') self.assertDeviceEqual(v.device, 'CPU:0')
Example #3
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testCreateLogisticClassifier(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = LogisticClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(slim.get_variables()), 2) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(update_ops, []) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) total_loss, grads_and_vars = model_deploy.optimize_clones(clones, optimizer) self.assertEqual(len(grads_and_vars), len(tf.trainable_variables())) self.assertEqual(total_loss.op.name, 'total_loss') for g, v in grads_and_vars: self.assertDeviceEqual(g.device, 'GPU:0') self.assertDeviceEqual(v.device, 'CPU:0')
Example #4
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testCreateOnecloneWithPS(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(clones), 1) clone = clones[0] self.assertEqual(clone.outputs.op.name, 'BatchNormClassifier/fully_connected/Sigmoid') self.assertDeviceEqual(clone.device, '/job:worker/device:GPU:0') self.assertEqual(clone.scope, '') self.assertEqual(len(slim.get_variables()), 5) for v in slim.get_variables(): self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0') self.assertDeviceEqual(v.device, v.value().device)
Example #5
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testCreateLogisticClassifier(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = LogisticClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) clone = clones[0] self.assertEqual(len(slim.get_variables()), 2) for v in slim.get_variables(): self.assertDeviceEqual(v.device, 'CPU:0') self.assertDeviceEqual(v.value().device, 'CPU:0') self.assertEqual(clone.outputs.op.name, 'LogisticClassifier/fully_connected/Sigmoid') self.assertEqual(clone.scope, '') self.assertDeviceEqual(clone.device, 'GPU:0') self.assertEqual(len(slim.losses.get_losses()), 1) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(update_ops, [])
Example #6
Source File: test_method.py From mmvec with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_equalize_sv(self): np.random.seed(1) tf.reset_default_graph() tf.set_random_seed(0) latent_dim = 2 res_ranks, res_biplot = paired_omics( self.microbes, self.metabolites, epochs=1000, latent_dim=latent_dim, min_feature_count=1, learning_rate=0.1, equalize_biplot=True ) # make sure the biplot is of the correct dimensions npt.assert_allclose( res_biplot.samples.shape, np.array([self.microbes.shape[0], latent_dim])) npt.assert_allclose( res_biplot.features.shape, np.array([self.metabolites.shape[0], latent_dim])) # make sure that the biplot has the correct ordering self.assertGreater(res_biplot.proportion_explained[0], res_biplot.proportion_explained[1]) self.assertGreater(res_biplot.eigvals[0], res_biplot.eigvals[1])
Example #7
Source File: model_deploy_test.py From ctw-baseline with MIT License | 6 votes |
def testCreateLogisticClassifier(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = LogisticClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) clone = clones[0] self.assertEqual(len(slim.get_variables()), 2) for v in slim.get_variables(): self.assertDeviceEqual(v.device, 'CPU:0') self.assertDeviceEqual(v.value().device, 'CPU:0') self.assertEqual(clone.outputs.op.name, 'LogisticClassifier/fully_connected/Sigmoid') self.assertEqual(clone.scope, '') self.assertDeviceEqual(clone.device, 'GPU:0') self.assertEqual(len(slim.losses.get_losses()), 1) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(update_ops, [])
Example #8
Source File: nn_model.py From mercari-price-suggestion with MIT License | 6 votes |
def __init__(self, train_df, word_count, batch_size, epochs): tf.set_random_seed(4) session_conf = tf.ConfigProto(intra_op_parallelism_threads=2, inter_op_parallelism_threads=8) backend.set_session(tf.Session(graph=tf.get_default_graph(), config=session_conf)) self.batch_size = batch_size self.epochs = epochs self.max_name_seq = 10 self.max_item_desc_seq = 75 self.max_text = word_count + 1 self.max_brand = np.max(train_df.brand_name.max()) + 1 self.max_condition = np.max(train_df.item_condition_id.max()) + 1 self.max_subcat0 = np.max(train_df.subcat_0.max()) + 1 self.max_subcat1 = np.max(train_df.subcat_1.max()) + 1 self.max_subcat2 = np.max(train_df.subcat_2.max()) + 1
Example #9
Source File: model_deploy_test.py From ctw-baseline with MIT License | 6 votes |
def testCreateOnecloneWithPS(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(clones), 1) clone = clones[0] self.assertEqual(clone.outputs.op.name, 'BatchNormClassifier/fully_connected/Sigmoid') self.assertDeviceEqual(clone.device, '/job:worker/device:GPU:0') self.assertEqual(clone.scope, '') self.assertEqual(len(slim.get_variables()), 5) for v in slim.get_variables(): self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0') self.assertDeviceEqual(v.device, v.value().device)
Example #10
Source File: misc_util.py From HardRLWithYoutube with MIT License | 6 votes |
def set_global_seeds(i): try: import MPI rank = MPI.COMM_WORLD.Get_rank() except ImportError: rank = 0 myseed = i + 1000 * rank if i is not None else None try: import tensorflow as tf except ImportError: pass else: tf.set_random_seed(myseed) np.random.seed(myseed) random.seed(myseed)
Example #11
Source File: model_deploy_test.py From ctw-baseline with MIT License | 6 votes |
def testCreateLogisticClassifier(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = LogisticClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(slim.get_variables()), 2) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(update_ops, []) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) total_loss, grads_and_vars = model_deploy.optimize_clones(clones, optimizer) self.assertEqual(len(grads_and_vars), len(tf.trainable_variables())) self.assertEqual(total_loss.op.name, 'total_loss') for g, v in grads_and_vars: self.assertDeviceEqual(g.device, 'GPU:0') self.assertDeviceEqual(v.device, 'CPU:0')
Example #12
Source File: model_deploy_test.py From ctw-baseline with MIT License | 6 votes |
def testCreateSingleclone(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(slim.get_variables()), 5) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(len(update_ops), 2) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) total_loss, grads_and_vars = model_deploy.optimize_clones(clones, optimizer) self.assertEqual(len(grads_and_vars), len(tf.trainable_variables())) self.assertEqual(total_loss.op.name, 'total_loss') for g, v in grads_and_vars: self.assertDeviceEqual(g.device, 'GPU:0') self.assertDeviceEqual(v.device, 'CPU:0')
Example #13
Source File: dcgan_test.py From DeepLab_v3 with MIT License | 6 votes |
def test_generator_graph(self): tf.set_random_seed(1234) # Check graph construction for a number of image size/depths and batch # sizes. for i, batch_size in zip(xrange(3, 7), xrange(3, 8)): tf.reset_default_graph() final_size = 2 ** i noise = tf.random_normal([batch_size, 64]) image, end_points = dcgan.generator( noise, depth=32, final_size=final_size) self.assertAllEqual([batch_size, final_size, final_size, 3], image.shape.as_list()) expected_names = ['deconv%i' % j for j in xrange(1, i)] + ['logits'] self.assertSetEqual(set(expected_names), set(end_points.keys())) # Check layer depths. for j in range(1, i): layer = end_points['deconv%i' % j] self.assertEqual(32 * 2**(i-j-1), layer.get_shape().as_list()[-1])
Example #14
Source File: dataset_builder_test.py From vehicle_counting_tensorflow with MIT License | 5 votes |
def test_enable_shuffle(self): config = input_reader_pb2.InputReader() config.num_readers = 1 config.shuffle = True tf.set_random_seed(1) # Set graph level seed. data = self._get_dataset_next( [self._shuffle_path_template % '*'], config, batch_size=10) expected_non_shuffle_output = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1] with self.test_session() as sess: self.assertTrue( np.any(np.not_equal(sess.run(data), expected_non_shuffle_output)))
Example #15
Source File: model_deploy_test.py From ctw-baseline with MIT License | 5 votes |
def testCreateMulticlone(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) num_clones = 4 deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(slim.get_variables()), 5) for v in slim.get_variables(): self.assertDeviceEqual(v.device, 'CPU:0') self.assertDeviceEqual(v.value().device, 'CPU:0') self.assertEqual(len(clones), num_clones) for i, clone in enumerate(clones): self.assertEqual( clone.outputs.op.name, 'clone_%d/BatchNormClassifier/fully_connected/Sigmoid' % i) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, clone.scope) self.assertEqual(len(update_ops), 2) self.assertEqual(clone.scope, 'clone_%d/' % i) self.assertDeviceEqual(clone.device, 'GPU:%d' % i)
Example #16
Source File: model_deploy_test.py From ctw-baseline with MIT License | 5 votes |
def testCreateMulticloneCPU(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier model_args = (tf_inputs, tf_labels) num_clones = 4 deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones, clone_on_cpu=True) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, model_args) self.assertEqual(len(slim.get_variables()), 5) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(len(update_ops), num_clones * 2) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) total_loss, grads_and_vars = model_deploy.optimize_clones(clones, optimizer) self.assertEqual(len(grads_and_vars), len(tf.trainable_variables())) self.assertEqual(total_loss.op.name, 'total_loss') for g, v in grads_and_vars: self.assertDeviceEqual(g.device, '') self.assertDeviceEqual(v.device, 'CPU:0')
Example #17
Source File: srez_main.py From srez with MIT License | 5 votes |
def setup_tensorflow(): # Create session config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement) sess = tf.Session(config=config) # Initialize rng with a deterministic seed with sess.graph.as_default(): tf.set_random_seed(FLAGS.random_seed) random.seed(FLAGS.random_seed) np.random.seed(FLAGS.random_seed) summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph) return sess, summary_writer
Example #18
Source File: resnet_v2_test.py From ctw-baseline with MIT License | 5 votes |
def testAtrousFullyConvolutionalValues(self): """Verify dense feature extraction with atrous convolution.""" nominal_stride = 32 for output_stride in [4, 8, 16, 32, None]: with slim.arg_scope(resnet_utils.resnet_arg_scope()): with tf.Graph().as_default(): with self.test_session() as sess: tf.set_random_seed(0) inputs = create_test_input(2, 81, 81, 3) # Dense feature extraction followed by subsampling. output, _ = self._resnet_small(inputs, None, is_training=False, global_pool=False, output_stride=output_stride) if output_stride is None: factor = 1 else: factor = nominal_stride // output_stride output = resnet_utils.subsample(output, factor) # Make the two networks use the same weights. tf.get_variable_scope().reuse_variables() # Feature extraction at the nominal network rate. expected, _ = self._resnet_small(inputs, None, is_training=False, global_pool=False) sess.run(tf.global_variables_initializer()) self.assertAllClose(output.eval(), expected.eval(), atol=1e-4, rtol=1e-4)
Example #19
Source File: resnet_v1_test.py From ctw-baseline with MIT License | 5 votes |
def testAtrousFullyConvolutionalValues(self): """Verify dense feature extraction with atrous convolution.""" nominal_stride = 32 for output_stride in [4, 8, 16, 32, None]: with slim.arg_scope(resnet_utils.resnet_arg_scope()): with tf.Graph().as_default(): with self.test_session() as sess: tf.set_random_seed(0) inputs = create_test_input(2, 81, 81, 3) # Dense feature extraction followed by subsampling. output, _ = self._resnet_small(inputs, None, is_training=False, global_pool=False, output_stride=output_stride) if output_stride is None: factor = 1 else: factor = nominal_stride // output_stride output = resnet_utils.subsample(output, factor) # Make the two networks use the same weights. tf.get_variable_scope().reuse_variables() # Feature extraction at the nominal network rate. expected, _ = self._resnet_small(inputs, None, is_training=False, global_pool=False) sess.run(tf.global_variables_initializer()) self.assertAllClose(output.eval(), expected.eval(), atol=1e-4, rtol=1e-4)
Example #20
Source File: misc_util.py From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 | 5 votes |
def set_global_seeds(i): try: import MPI rank = MPI.COMM_WORLD.Get_rank() except ImportError: rank = 0 myseed = i + 1000 * rank if i is not None else None try: import tensorflow as tf tf.set_random_seed(myseed) except ImportError: pass np.random.seed(myseed) random.seed(myseed)
Example #21
Source File: model_deploy_test.py From ctw-baseline with MIT License | 5 votes |
def testCreateOnecloneWithPS(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier model_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, model_args) self.assertEqual(len(slim.get_variables()), 5) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(len(update_ops), 2) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) total_loss, grads_and_vars = model_deploy.optimize_clones(clones, optimizer) self.assertEqual(len(grads_and_vars), len(tf.trainable_variables())) self.assertEqual(total_loss.op.name, 'total_loss') for g, v in grads_and_vars: self.assertDeviceEqual(g.device, '/job:worker/device:GPU:0') self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
Example #22
Source File: util.py From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 | 5 votes |
def simple_test(env_fn, learn_fn, min_reward_fraction, n_trials=N_TRIALS): np.random.seed(0) np_random.seed(0) env = DummyVecEnv([env_fn]) with tf.Graph().as_default(), tf.Session(config=tf.ConfigProto(allow_soft_placement=True)).as_default(): tf.set_random_seed(0) model = learn_fn(env) sum_rew = 0 done = True for i in range(n_trials): if done: obs = env.reset() state = model.initial_state if state is not None: a, v, state, _ = model.step(obs, S=state, M=[False]) else: a, v, _, _ = model.step(obs) obs, rew, done, _ = env.step(a) sum_rew += float(rew) print("Reward in {} trials is {}".format(n_trials, sum_rew)) assert sum_rew > min_reward_fraction * n_trials, \ 'sum of rewards {} is less than {} of the total number of trials {}'.format(sum_rew, min_reward_fraction, n_trials)
Example #23
Source File: test_method.py From mmvec with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_fit(self): np.random.seed(1) tf.reset_default_graph() tf.set_random_seed(0) latent_dim = 2 res_ranks, res_biplot = paired_omics( self.microbes, self.metabolites, epochs=1000, latent_dim=latent_dim, min_feature_count=1, learning_rate=0.1 ) res_ranks = clr_inv(res_ranks.T) s_r, s_p = spearmanr(np.ravel(res_ranks), np.ravel(self.exp_ranks)) self.assertGreater(s_r, 0.5) self.assertLess(s_p, 1e-2) # make sure the biplot is of the correct dimensions npt.assert_allclose( res_biplot.samples.shape, np.array([self.microbes.shape[0], latent_dim])) npt.assert_allclose( res_biplot.features.shape, np.array([self.metabolites.shape[0], latent_dim])) # make sure that the biplot has the correct ordering self.assertGreater(res_biplot.proportion_explained[0], res_biplot.proportion_explained[1]) self.assertGreater(res_biplot.eigvals[0], res_biplot.eigvals[1])
Example #24
Source File: tf_nn_classification.py From kaggle-code with MIT License | 5 votes |
def reset_graph(seed=42): tf.reset_default_graph() tf.set_random_seed(seed) np.random.seed(seed)
Example #25
Source File: insurance_tf_nn_classification_downsample.py From kaggle-code with MIT License | 5 votes |
def reset_graph(seed=42): tf.reset_default_graph() tf.set_random_seed(seed) np.random.seed(seed)
Example #26
Source File: insurance_tf_nn_classification_upsample.py From kaggle-code with MIT License | 5 votes |
def reset_graph(seed=42): tf.reset_default_graph() tf.set_random_seed(seed) np.random.seed(seed)
Example #27
Source File: tf_nn_classification_bad.py From kaggle-code with MIT License | 5 votes |
def reset_graph(seed=42): tf.reset_default_graph() tf.set_random_seed(seed) np.random.seed(seed)
Example #28
Source File: tensorflow_cnn_from_scratch.py From kaggle-code with MIT License | 5 votes |
def reset_graph(seed=42): tf.reset_default_graph() tf.set_random_seed(seed) np.random.seed(seed)
Example #29
Source File: iceberg_tensorflow_cnn.py From kaggle-code with MIT License | 5 votes |
def reset_graph(seed=42): tf.reset_default_graph() tf.set_random_seed(seed) np.random.seed(seed)
Example #30
Source File: dcgan_test.py From DeepLab_v3 with MIT License | 5 votes |
def test_generator_run(self): tf.set_random_seed(1234) noise = tf.random_normal([100, 64]) image, _ = dcgan.generator(noise) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) image.eval()