Python networks.discriminator() Examples
The following are 30
code examples of networks.discriminator().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
networks
, or try the search function
.
Example #1
Source File: train.py From g-tensorflow-models with Apache License 2.0 | 6 votes |
def _get_optimizer(gen_lr, dis_lr): """Returns generator optimizer and discriminator optimizer. Args: gen_lr: A scalar float `Tensor` or a Python number. The Generator learning rate. dis_lr: A scalar float `Tensor` or a Python number. The Discriminator learning rate. Returns: A tuple of generator optimizer and discriminator optimizer. """ # beta1 follows # https://github.com/junyanz/CycleGAN/blob/master/options.lua gen_opt = tf.train.AdamOptimizer(gen_lr, beta1=0.5, use_locking=True) dis_opt = tf.train.AdamOptimizer(dis_lr, beta1=0.5, use_locking=True) return gen_opt, dis_opt
Example #2
Source File: train.py From g-tensorflow-models with Apache License 2.0 | 6 votes |
def _define_model(images_x, images_y): """Defines a CycleGAN model that maps between images_x and images_y. Args: images_x: A 4D float `Tensor` of NHWC format. Images in set X. images_y: A 4D float `Tensor` of NHWC format. Images in set Y. Returns: A `CycleGANModel` namedtuple. """ cyclegan_model = tfgan.cyclegan_model( generator_fn=networks.generator, discriminator_fn=networks.discriminator, data_x=images_x, data_y=images_y) # Add summaries for generated images. tfgan.eval.add_cyclegan_image_summaries(cyclegan_model) return cyclegan_model
Example #3
Source File: train.py From g-tensorflow-models with Apache License 2.0 | 6 votes |
def define_train_ops(gan_model, gan_loss, **kwargs): """Defines progressive GAN train ops. Args: gan_model: A `GANModel` namedtuple. gan_loss: A `GANLoss` namedtuple. **kwargs: A dictionary of 'adam_beta1': A float of Adam optimizer beta1. 'adam_beta2': A float of Adam optimizer beta2. 'generator_learning_rate': A float of generator learning rate. 'discriminator_learning_rate': A float of discriminator learning rate. Returns: A tuple of `GANTrainOps` namedtuple and a list variables tracking the state of optimizers. """ with tf.variable_scope('progressive_gan_train_ops') as var_scope: beta1, beta2 = kwargs['adam_beta1'], kwargs['adam_beta2'] gen_opt = tf.train.AdamOptimizer(kwargs['generator_learning_rate'], beta1, beta2) dis_opt = tf.train.AdamOptimizer(kwargs['discriminator_learning_rate'], beta1, beta2) gan_train_ops = tfgan.gan_train_ops(gan_model, gan_loss, gen_opt, dis_opt) return gan_train_ops, tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope=var_scope.name)
Example #4
Source File: train.py From multilabel-image-classification-tensorflow with MIT License | 6 votes |
def define_train_ops(gan_model, gan_loss, **kwargs): """Defines progressive GAN train ops. Args: gan_model: A `GANModel` namedtuple. gan_loss: A `GANLoss` namedtuple. **kwargs: A dictionary of 'adam_beta1': A float of Adam optimizer beta1. 'adam_beta2': A float of Adam optimizer beta2. 'generator_learning_rate': A float of generator learning rate. 'discriminator_learning_rate': A float of discriminator learning rate. Returns: A tuple of `GANTrainOps` namedtuple and a list variables tracking the state of optimizers. """ with tf.variable_scope('progressive_gan_train_ops') as var_scope: beta1, beta2 = kwargs['adam_beta1'], kwargs['adam_beta2'] gen_opt = tf.train.AdamOptimizer(kwargs['generator_learning_rate'], beta1, beta2) dis_opt = tf.train.AdamOptimizer(kwargs['discriminator_learning_rate'], beta1, beta2) gan_train_ops = tfgan.gan_train_ops(gan_model, gan_loss, gen_opt, dis_opt) return gan_train_ops, tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope=var_scope.name)
Example #5
Source File: train.py From Gun-Detector with Apache License 2.0 | 6 votes |
def _define_model(images_x, images_y): """Defines a CycleGAN model that maps between images_x and images_y. Args: images_x: A 4D float `Tensor` of NHWC format. Images in set X. images_y: A 4D float `Tensor` of NHWC format. Images in set Y. Returns: A `CycleGANModel` namedtuple. """ cyclegan_model = tfgan.cyclegan_model( generator_fn=networks.generator, discriminator_fn=networks.discriminator, data_x=images_x, data_y=images_y) # Add summaries for generated images. tfgan.eval.add_image_comparison_summaries( cyclegan_model, num_comparisons=3, display_diffs=False) tfgan.eval.add_gan_model_image_summaries( cyclegan_model, grid_size=int(np.sqrt(FLAGS.batch_size))) return cyclegan_model
Example #6
Source File: train.py From Gun-Detector with Apache License 2.0 | 6 votes |
def _get_optimizer(gen_lr, dis_lr): """Returns generator optimizer and discriminator optimizer. Args: gen_lr: A scalar float `Tensor` or a Python number. The Generator learning rate. dis_lr: A scalar float `Tensor` or a Python number. The Discriminator learning rate. Returns: A tuple of generator optimizer and discriminator optimizer. """ # beta1 follows # https://github.com/junyanz/CycleGAN/blob/master/options.lua gen_opt = tf.train.AdamOptimizer(gen_lr, beta1=0.5, use_locking=True) dis_opt = tf.train.AdamOptimizer(dis_lr, beta1=0.5, use_locking=True) return gen_opt, dis_opt
Example #7
Source File: train.py From multilabel-image-classification-tensorflow with MIT License | 6 votes |
def _get_optimizer(gen_lr, dis_lr): """Returns generator optimizer and discriminator optimizer. Args: gen_lr: A scalar float `Tensor` or a Python number. The Generator learning rate. dis_lr: A scalar float `Tensor` or a Python number. The Discriminator learning rate. Returns: A tuple of generator optimizer and discriminator optimizer. """ # beta1 follows # https://github.com/junyanz/CycleGAN/blob/master/options.lua gen_opt = tf.train.AdamOptimizer(gen_lr, beta1=0.5, use_locking=True) dis_opt = tf.train.AdamOptimizer(dis_lr, beta1=0.5, use_locking=True) return gen_opt, dis_opt
Example #8
Source File: train.py From multilabel-image-classification-tensorflow with MIT License | 6 votes |
def _define_model(images_x, images_y): """Defines a CycleGAN model that maps between images_x and images_y. Args: images_x: A 4D float `Tensor` of NHWC format. Images in set X. images_y: A 4D float `Tensor` of NHWC format. Images in set Y. Returns: A `CycleGANModel` namedtuple. """ cyclegan_model = tfgan.cyclegan_model( generator_fn=networks.generator, discriminator_fn=networks.discriminator, data_x=images_x, data_y=images_y) # Add summaries for generated images. tfgan.eval.add_cyclegan_image_summaries(cyclegan_model) return cyclegan_model
Example #9
Source File: train.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def _lr(gen_lr_base, dis_lr_base): """Return the generator and discriminator learning rates.""" gen_lr = tf.train.exponential_decay( learning_rate=gen_lr_base, global_step=tf.train.get_or_create_global_step(), decay_steps=100000, decay_rate=0.8, staircase=True,) dis_lr = dis_lr_base return gen_lr, dis_lr
Example #10
Source File: networks_test.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def test_discriminator_run(self): img_batch = tf.zeros([3, 70, 70, 3]) disc_output = networks.discriminator(img_batch) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) sess.run(disc_output)
Example #11
Source File: train.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def define_loss(gan_model, **kwargs): """Defines progressive GAN losses. The generator and discriminator both use wasserstein loss. In addition, a small penalty term is added to the discriminator loss to prevent it getting too large. Args: gan_model: A `GANModel` namedtuple. **kwargs: A dictionary of 'gradient_penalty_weight': A float of gradient norm target for wasserstein loss. 'gradient_penalty_target': A float of gradient penalty weight for wasserstein loss. 'real_score_penalty_weight': A float of Additional penalty to keep the scores from drifting too far from zero. Returns: A `GANLoss` namedtuple. """ gan_loss = tfgan.gan_loss( gan_model, generator_loss_fn=tfgan.losses.wasserstein_generator_loss, discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss, gradient_penalty_weight=kwargs['gradient_penalty_weight'], gradient_penalty_target=kwargs['gradient_penalty_target'], gradient_penalty_epsilon=0.0) real_score_penalty = tf.reduce_mean( tf.square(gan_model.discriminator_real_outputs)) tf.summary.scalar('real_score_penalty', real_score_penalty) return gan_loss._replace( discriminator_loss=( gan_loss.discriminator_loss + kwargs['real_score_penalty_weight'] * real_score_penalty))
Example #12
Source File: networks_test.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def test_discriminator_run(self): img_batch = tf.zeros([3, 70, 70, 3]) disc_output = networks.discriminator(img_batch) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) sess.run(disc_output)
Example #13
Source File: networks_test.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def test_discriminator_invalid_input(self): wrong_dim_input = tf.zeros([5, 32, 32]) with self.assertRaisesRegexp(ValueError, 'Shape must be rank 4'): networks.discriminator(wrong_dim_input) not_fully_defined = tf.placeholder(tf.float32, [3, None, 32, 3]) with self.assertRaisesRegexp(ValueError, 'Shape .* is not fully defined'): networks.compression_model(not_fully_defined)
Example #14
Source File: train.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def _optimizer(gen_lr, dis_lr): # First is generator optimizer, second is discriminator. adam_kwargs = { 'epsilon': 1e-8, 'beta1': 0.5, } return (tf.train.AdamOptimizer(gen_lr, **adam_kwargs), tf.train.AdamOptimizer(dis_lr, **adam_kwargs))
Example #15
Source File: train.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def _lr(gen_lr_base, dis_lr_base): """Return the generator and discriminator learning rates.""" gen_lr_kwargs = { 'decay_steps': 60000, 'decay_rate': 0.9, 'staircase': True, } gen_lr = tf.train.exponential_decay( learning_rate=gen_lr_base, global_step=tf.train.get_or_create_global_step(), **gen_lr_kwargs) dis_lr = dis_lr_base return gen_lr, dis_lr
Example #16
Source File: train.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def _get_gan_model(generator_inputs, generated_data, real_data, generator_scope): """Manually construct and return a GANModel tuple.""" generator_vars = tf.contrib.framework.get_trainable_variables(generator_scope) discriminator_fn = networks.discriminator with tf.variable_scope('discriminator') as dis_scope: discriminator_gen_outputs = discriminator_fn(generated_data) with tf.variable_scope(dis_scope, reuse=True): discriminator_real_outputs = discriminator_fn(real_data) discriminator_vars = tf.contrib.framework.get_trainable_variables( dis_scope) # Manually construct GANModel tuple. gan_model = tfgan.GANModel( generator_inputs=generator_inputs, generated_data=generated_data, generator_variables=generator_vars, generator_scope=generator_scope, generator_fn=None, # not necessary real_data=real_data, discriminator_real_outputs=discriminator_real_outputs, discriminator_gen_outputs=discriminator_gen_outputs, discriminator_variables=discriminator_vars, discriminator_scope=dis_scope, discriminator_fn=discriminator_fn) return gan_model
Example #17
Source File: train.py From DCGAN_WGAN_WGAN-GP_LSGAN_SNGAN_RSGAN_BEGAN_ACGAN_PGGAN_TensorFlow with MIT License | 5 votes |
def train(): real_img = tf.placeholder(tf.float32, [None, H, W, 3]) label = tf.placeholder(tf.int32, [None]) z = tf.placeholder(tf.float32, [None, 100]) one_hot_label = tf.one_hot(label, NUMS_CLASS) labeled_z = tf.concat([z, one_hot_label], axis=1) G = generator("generator") D = discriminator("discriminator") fake_img = G(labeled_z) class_fake_logits, adv_fake_logits = D(fake_img, NUMS_CLASS) class_real_logits, adv_real_logits = D(real_img, NUMS_CLASS) loss_d_real = -tf.reduce_mean(tf.log(adv_real_logits + EPSILON)) loss_d_fake = -tf.reduce_mean(tf.log(1 - adv_fake_logits + EPSILON)) loss_cls_real = -tf.reduce_mean(tf.log(tf.reduce_sum(class_real_logits * one_hot_label, axis=1) + EPSILON)) loss_cls_fake = -tf.reduce_mean(tf.log(tf.reduce_sum(class_fake_logits * one_hot_label, axis=1) + EPSILON)) D_loss = loss_d_real + loss_d_fake + loss_cls_real G_loss = -tf.reduce_mean(tf.log(adv_fake_logits + EPSILON)) + loss_cls_fake D_opt = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(D_loss, var_list=D.var_list()) G_opt = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(G_loss, var_list=G.var_list()) sess = tf.Session() sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() data, labels = read_face_data("./dataset/face_woman_man.mat") for i in range(50000): s = time.time() for j in range(1): BATCH, LABELS, Z = get_batch_face(data, labels, BATCHSIZE) BATCH = BATCH / 127.5 - 1.0 sess.run(D_opt, feed_dict={real_img: BATCH, label: LABELS, z: Z}) sess.run(G_opt, feed_dict={real_img: BATCH, label: LABELS, z: Z}) e = time.time() if i % 100 == 0: [D_LOSS, G_LOSS, FAKE_IMG] = sess.run([D_loss, G_loss, fake_img], feed_dict={real_img: BATCH, label: LABELS, z: Z}) Image.fromarray(np.uint8((FAKE_IMG[0, :, :, :] + 1) * 127.5)).save("./results/" + str(i) +"_" + str(int(LABELS[0])) + ".jpg") print("Iteration: %d, D_loss: %f, G_loss: %f, update_time: %f"%(i, D_LOSS, G_LOSS, e-s)) if i % 500 == 0: saver.save(sess, "./save_para/model.ckpt") pass
Example #18
Source File: networks_test.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def test_discriminator(self): batch_size = 5 image = tf.random_uniform([batch_size, 32, 32, 3], -1, 1) dis_output = networks.discriminator(image, None) with self.test_session(use_gpu=True) as sess: sess.run(tf.global_variables_initializer()) dis_output_np = dis_output.eval() self.assertAllEqual([batch_size, 1], dis_output_np.shape)
Example #19
Source File: networks_test.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def test_discriminator_run(self): img_batch = tf.zeros([3, 70, 70, 3]) disc_output = networks.discriminator(img_batch) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) sess.run(disc_output)
Example #20
Source File: networks_test.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def test_discriminator_graph(self): # Check graph construction for a number of image size/depths and batch # sizes. for batch_size, patch_size in zip([3, 6], [70, 128]): tf.reset_default_graph() img = tf.ones([batch_size, patch_size, patch_size, 3]) disc_output = networks.discriminator(img) self.assertEqual(2, disc_output.shape.ndims) self.assertEqual(batch_size, disc_output.shape.as_list()[0])
Example #21
Source File: train.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def _lr(gen_lr_base, dis_lr_base): """Return the generator and discriminator learning rates.""" gen_lr = tf.train.exponential_decay( learning_rate=gen_lr_base, global_step=tf.train.get_or_create_global_step(), decay_steps=100000, decay_rate=0.8, staircase=True,) dis_lr = dis_lr_base return gen_lr, dis_lr
Example #22
Source File: train.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def define_loss(gan_model, **kwargs): """Defines progressive GAN losses. The generator and discriminator both use wasserstein loss. In addition, a small penalty term is added to the discriminator loss to prevent it getting too large. Args: gan_model: A `GANModel` namedtuple. **kwargs: A dictionary of 'gradient_penalty_weight': A float of gradient norm target for wasserstein loss. 'gradient_penalty_target': A float of gradient penalty weight for wasserstein loss. 'real_score_penalty_weight': A float of Additional penalty to keep the scores from drifting too far from zero. Returns: A `GANLoss` namedtuple. """ gan_loss = tfgan.gan_loss( gan_model, generator_loss_fn=tfgan.losses.wasserstein_generator_loss, discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss, gradient_penalty_weight=kwargs['gradient_penalty_weight'], gradient_penalty_target=kwargs['gradient_penalty_target'], gradient_penalty_epsilon=0.0) real_score_penalty = tf.reduce_mean( tf.square(gan_model.discriminator_real_outputs)) tf.summary.scalar('real_score_penalty', real_score_penalty) return gan_loss._replace( discriminator_loss=( gan_loss.discriminator_loss + kwargs['real_score_penalty_weight'] * real_score_penalty))
Example #23
Source File: networks_test.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def test_discriminator_run(self): img_batch = tf.zeros([3, 70, 70, 3]) disc_output = networks.discriminator(img_batch) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) sess.run(disc_output)
Example #24
Source File: networks_test.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def test_discriminator_invalid_input(self): wrong_dim_input = tf.zeros([5, 32, 32]) with self.assertRaisesRegexp(ValueError, 'Shape must be rank 4'): networks.discriminator(wrong_dim_input) not_fully_defined = tf.placeholder(tf.float32, [3, None, 32, 3]) with self.assertRaisesRegexp(ValueError, 'Shape .* is not fully defined'): networks.compression_model(not_fully_defined)
Example #25
Source File: train.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def _optimizer(gen_lr, dis_lr): # First is generator optimizer, second is discriminator. adam_kwargs = { 'epsilon': 1e-8, 'beta1': 0.5, } return (tf.train.AdamOptimizer(gen_lr, **adam_kwargs), tf.train.AdamOptimizer(dis_lr, **adam_kwargs))
Example #26
Source File: train.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def _lr(gen_lr_base, dis_lr_base): """Return the generator and discriminator learning rates.""" gen_lr_kwargs = { 'decay_steps': 60000, 'decay_rate': 0.9, 'staircase': True, } gen_lr = tf.train.exponential_decay( learning_rate=gen_lr_base, global_step=tf.train.get_or_create_global_step(), **gen_lr_kwargs) dis_lr = dis_lr_base return gen_lr, dis_lr
Example #27
Source File: train.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def _get_gan_model(generator_inputs, generated_data, real_data, generator_scope): """Manually construct and return a GANModel tuple.""" generator_vars = tf.contrib.framework.get_trainable_variables(generator_scope) discriminator_fn = networks.discriminator with tf.variable_scope('discriminator') as dis_scope: discriminator_gen_outputs = discriminator_fn(generated_data) with tf.variable_scope(dis_scope, reuse=True): discriminator_real_outputs = discriminator_fn(real_data) discriminator_vars = tf.contrib.framework.get_trainable_variables( dis_scope) # Manually construct GANModel tuple. gan_model = tfgan.GANModel( generator_inputs=generator_inputs, generated_data=generated_data, generator_variables=generator_vars, generator_scope=generator_scope, generator_fn=None, # not necessary real_data=real_data, discriminator_real_outputs=discriminator_real_outputs, discriminator_gen_outputs=discriminator_gen_outputs, discriminator_variables=discriminator_vars, discriminator_scope=dis_scope, discriminator_fn=discriminator_fn) return gan_model
Example #28
Source File: networks_test.py From Gun-Detector with Apache License 2.0 | 5 votes |
def test_discriminator_invalid_input(self): wrong_dim_input = tf.zeros([5, 32, 32]) with self.assertRaisesRegexp(ValueError, 'Shape must be rank 4'): networks.discriminator(wrong_dim_input) not_fully_defined = tf.placeholder(tf.float32, [3, None, 32, 3]) with self.assertRaisesRegexp(ValueError, 'Shape .* is not fully defined'): networks.compression_model(not_fully_defined)
Example #29
Source File: networks_test.py From yolo_v2 with Apache License 2.0 | 5 votes |
def test_discriminator_run(self): img_batch = tf.zeros([3, 70, 70, 3]) disc_output = networks.discriminator(img_batch) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) sess.run(disc_output)
Example #30
Source File: networks_test.py From yolo_v2 with Apache License 2.0 | 5 votes |
def test_discriminator_graph(self): # Check graph construction for a number of image size/depths and batch # sizes. for batch_size, patch_size in zip([3, 6], [70, 128]): tf.reset_default_graph() img = tf.ones([batch_size, patch_size, patch_size, 3]) disc_output = networks.discriminator(img) self.assertEqual(2, disc_output.shape.ndims) self.assertEqual(batch_size, disc_output.shape[0])