Python discriminator.Discriminator() Examples

The following are 13 code examples of discriminator.Discriminator(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module discriminator , or try the search function .
Example #1
Source File: train.py    From Generative-Adversarial-Networks-Cookbook with MIT License 6 votes vote down vote up
def __init__(self, width = 28, height= 28, channels = 1, latent_size=100, epochs =50000, batch=32, checkpoint=50,model_type=-1):
        self.W = width
        self.H = height
        self.C = channels
        self.EPOCHS = epochs
        self.BATCH = batch
        self.CHECKPOINT = checkpoint
        self.model_type=model_type

        self.LATENT_SPACE_SIZE = latent_size

        self.generator = Generator(height=self.H, width=self.W, channels=self.C, latent_size=self.LATENT_SPACE_SIZE)
        self.discriminator = Discriminator(height=self.H, width=self.W, channels=self.C)
        self.gan = GAN(generator=self.generator.Generator, discriminator=self.discriminator.Discriminator)

        self.load_MNIST() 
Example #2
Source File: train.py    From Generative-Adversarial-Networks-Cookbook with MIT License 6 votes vote down vote up
def __init__(self, width = 28, height= 28, channels = 1, latent_size=100, epochs =50000, batch=32, checkpoint=50,model_type=-1,data_path = ''):
        self.W = width
        self.H = height
        self.C = channels
        self.EPOCHS = epochs
        self.BATCH = batch
        self.CHECKPOINT = checkpoint
        self.model_type=model_type

        self.LATENT_SPACE_SIZE = latent_size

        self.generator = Generator(height=self.H, width=self.W, channels=self.C, latent_size=self.LATENT_SPACE_SIZE,model_type = 'DCGAN')
        self.discriminator = Discriminator(height=self.H, width=self.W, channels=self.C,model_type = 'DCGAN')
        self.gan = GAN(generator=self.generator.Generator, discriminator=self.discriminator.Discriminator)

        #self.load_MNIST()
        self.load_npy(data_path) 
Example #3
Source File: train.py    From Generative-Adversarial-Networks-Cookbook with MIT License 6 votes vote down vote up
def __init__(self, side=16, latent_size=32, epochs =100, batch=32, checkpoint=50, data_dir = ''):
        self.SIDE=side
        self.EPOCHS = epochs
        self.BATCH = batch
        self.CHECKPOINT = checkpoint

        self.load_3D_MNIST(data_dir)
        self.load_2D_encoded_MNIST()
        self.LATENT_SPACE_SIZE = latent_size
        self.LABELS = [1]

        self.generator = Generator(latent_size=self.LATENT_SPACE_SIZE)
        self.discriminator = Discriminator(side=self.SIDE)
        self.gan = GAN(generator=self.generator.Generator, discriminator=self.discriminator.Discriminator)
        
    # Translate data to color 
Example #4
Source File: train.py    From Generative-Adversarial-Networks-Cookbook with MIT License 5 votes vote down vote up
def train(self):
        for e in range(self.EPOCHS):
            # Train Discriminator
            # Make the training batch for this model be half real, half noise
            # Grab Real Images for this training batch
            count_real_images = int(self.BATCH/2)
            starting_index = randint(0, (len(self.X_train)-count_real_images))
            real_images_raw = self.X_train[ starting_index : (starting_index + count_real_images) ]
            x_real_images = real_images_raw.reshape( count_real_images, self.W, self.H, self.C )
            y_real_labels = np.ones([count_real_images,1])

            # Grab Generated Images for this training batch
            latent_space_samples = self.sample_latent_space(count_real_images)
            x_generated_images = self.generator.Generator.predict(latent_space_samples)
            y_generated_labels = np.zeros([self.BATCH-count_real_images,1])

            # Combine to train on the discriminator
            x_batch = np.concatenate( [x_real_images, x_generated_images] )
            y_batch = np.concatenate( [y_real_labels, y_generated_labels] )

            # Now, train the discriminator with this batch
            discriminator_loss = self.discriminator.Discriminator.train_on_batch(x_batch,y_batch)[0]
        
            # Generate Noise
            x_latent_space_samples = self.sample_latent_space(self.BATCH)
            y_generated_labels = np.ones([self.BATCH,1])
            generator_loss = self.gan.gan_model.train_on_batch(x_latent_space_samples,y_generated_labels)

            print ('Epoch: '+str(int(e))+', [Discriminator :: Loss: '+str(discriminator_loss)+'], [ Generator :: Loss: '+str(generator_loss)+']')
                        
            if e % self.CHECKPOINT == 0 :
                self.plot_checkpoint(e)
        return 
Example #5
Source File: train.py    From Generative-Adversarial-Networks-Cookbook with MIT License 5 votes vote down vote up
def __init__(self, height = 64, width = 64, epochs = 50000, batch = 32, checkpoint = 50, train_data_path_A = '',train_data_path_B = '',test_data_path_A='',test_data_path_B='',lambda_cycle=10.0,lambda_id=1.0):
        self.EPOCHS = epochs
        self.BATCH = batch
        self.RESIZE_HEIGHT = height
        self.RESIZE_WIDTH = width
        self.CHECKPOINT = checkpoint

        self.X_train_A, self.H_A, self.W_A, self.C_A = self.load_data(train_data_path_A)
        self.X_train_B, self.H_B, self.W_B, self.C_B  = self.load_data(train_data_path_B)
        self.X_test_A, self.H_A_test, self.W_A_test, self.C_A_test = self.load_data(test_data_path_A)
        self.X_test_B, self.H_B_test, self.W_B_test, self.C_B_test  = self.load_data(test_data_path_B)

        self.generator_A_to_B = Generator(height=self.H_A, width=self.W_A, channels=self.C_A)
        self.generator_B_to_A = Generator(height=self.H_B, width=self.W_B, channels=self.C_B)

        self.orig_A = Input(shape=(self.W_A, self.H_A, self.C_A))
        self.orig_B = Input(shape=(self.W_B, self.H_B, self.C_B))

        self.fake_B = self.generator_A_to_B.Generator(self.orig_A)
        self.fake_A = self.generator_B_to_A.Generator(self.orig_B)
        self.reconstructed_A = self.generator_B_to_A.Generator(self.fake_B)
        self.reconstructed_B = self.generator_A_to_B.Generator(self.fake_A)
        self.id_A = self.generator_B_to_A.Generator(self.orig_A)
        self.id_B = self.generator_A_to_B.Generator(self.orig_B)


        self.discriminator_A = Discriminator(height=self.H_A, width=self.W_A, channels=self.C_A)
        self.discriminator_B = Discriminator(height=self.H_B, width=self.W_B, channels=self.C_B)
        self.discriminator_A.trainable = False
        self.discriminator_B.trainable = False
        self.valid_A = self.discriminator_A.Discriminator(self.fake_A)
        self.valid_B = self.discriminator_B.Discriminator(self.fake_B)

        model_inputs  = [self.orig_A,self.orig_B]
        model_outputs = [self.valid_A, self.valid_B,self.reconstructed_A,self.reconstructed_B,self.id_A, self.id_B]
        self.gan = GAN(model_inputs=model_inputs,model_outputs=model_outputs,lambda_cycle=lambda_cycle,lambda_id=lambda_id) 
Example #6
Source File: train.py    From Generative-Adversarial-Networks-Cookbook with MIT License 5 votes vote down vote up
def __init__(self, height=55,width=35, channels=1,epochs =100, batch=16, checkpoint=50,sim_path='',real_path='',data_limit=0.001,generator_steps=2,discriminator_steps=1):
        self.W = width
        self.H = height
        self.C = channels
        self.EPOCHS = epochs
        self.BATCH = batch
        self.CHECKPOINT = checkpoint
        self.DATA_LIMIT=data_limit
        self.GEN_STEPS = generator_steps
        self.DISC_STEPS = discriminator_steps

        self.X_real = self.load_h5py(real_path)
        self.X_sim = self.load_h5py(sim_path)

        self.refiner = Generator(height=self.H, width=self.W, channels=self.C)
        self.discriminator = Discriminator(height=self.H, width=self.W, channels=self.C)
        self.discriminator.trainable = False

        self.synthetic_image = Input(shape=(self.H, self.W, self.C))
        self.real_or_fake = Input(shape=(self.H, self.W, self.C))


        self.refined_image = self.refiner.Generator(self.synthetic_image)
        self.discriminator_output = self.discriminator.Discriminator(self.real_or_fake)
        self.combined = self.discriminator.Discriminator(self.refined_image)

        model_inputs  = [self.synthetic_image]
        model_outputs = [self.refined_image, self.combined]
        self.gan = GAN(model_inputs=model_inputs,model_outputs=model_outputs) 
Example #7
Source File: train.py    From Generative-Adversarial-Networks-Cookbook with MIT License 5 votes vote down vote up
def train(self):
        for e in range(self.EPOCHS):

            b = 0
            X_real_temp = deepcopy(self.X_real)
            X_sim_temp = deepcopy(self.X_sim)
            combined_loss = np.zeros(shape=len(self.gan.gan_model.metrics_names))
            discriminator_loss_real = np.zeros(shape=len(self.discriminator.Discriminator.metrics_names))
            discriminator_loss_sim = np.zeros(shape=len(self.discriminator.Discriminator.metrics_names))

            while min(len(X_real_temp),len(X_sim_temp))>self.BATCH:
                # Keep track of Batches
                b=b+1

                count_real_images = int(self.BATCH)
                starting_indexs = randint(0, (min(len(X_real_temp),len(X_sim_temp))-count_real_images))
              
                real_images_raw = X_real_temp[ starting_indexs : (starting_indexs + count_real_images) ]
                real_images = real_images_raw.reshape( count_real_images, self.H, self.W, self.C )

                y_real = np.array([[[1.0, 0.0]] * self.discriminator.Discriminator.output_shape[1]] * self.BATCH)
                
                sim_images_raw = X_sim_temp[ starting_indexs : (starting_indexs + count_real_images) ]
                sim_images = sim_images_raw.reshape( count_real_images, self.H, self.W, self.C )

                y_sim = np.array([[[0.0, 1.0]] * self.discriminator.Discriminator.output_shape[1]] * self.BATCH)

                for _ in range(self.GEN_STEPS):
                    combined_loss = np.add(self.gan.gan_model.train_on_batch(sim_images,[sim_images, y_real]), combined_loss)
        
                for _ in range(self.DISC_STEPS):
                    improved_image_batch = self.refiner.Generator.predict_on_batch(sim_images)
                    discriminator_loss_real = np.add(self.discriminator.Discriminator.train_on_batch(real_images, y_real), discriminator_loss_real)
                    discriminator_loss_sim = np.add(self.discriminator.Discriminator.train_on_batch(improved_image_batch, y_sim),discriminator_loss_sim)

            print ('Epoch: '+str(int(e))+', [Real Discriminator :: Loss: '+str(discriminator_loss_real)+'], [ GAN :: Loss: '+str(combined_loss)+']')
                        
        return 
Example #8
Source File: graph_gan.py    From GraphGAN with MIT License 5 votes vote down vote up
def build_discriminator(self):
        """initializing the discriminator"""

        with tf.variable_scope("discriminator"):
            self.discriminator = discriminator.Discriminator(n_node=self.n_node, node_emd_init=self.node_embed_init_d) 
Example #9
Source File: model.py    From CycleGAN-TensorFlow with MIT License 4 votes vote down vote up
def __init__(self,
               X_train_file='',
               Y_train_file='',
               batch_size=1,
               image_size=256,
               use_lsgan=True,
               norm='instance',
               lambda1=10,
               lambda2=10,
               learning_rate=2e-4,
               beta1=0.5,
               ngf=64
              ):
    """
    Args:
      X_train_file: string, X tfrecords file for training
      Y_train_file: string Y tfrecords file for training
      batch_size: integer, batch size
      image_size: integer, image size
      lambda1: integer, weight for forward cycle loss (X->Y->X)
      lambda2: integer, weight for backward cycle loss (Y->X->Y)
      use_lsgan: boolean
      norm: 'instance' or 'batch'
      learning_rate: float, initial learning rate for Adam
      beta1: float, momentum term of Adam
      ngf: number of gen filters in first conv layer
    """
    self.lambda1 = lambda1
    self.lambda2 = lambda2
    self.use_lsgan = use_lsgan
    use_sigmoid = not use_lsgan
    self.batch_size = batch_size
    self.image_size = image_size
    self.learning_rate = learning_rate
    self.beta1 = beta1
    self.X_train_file = X_train_file
    self.Y_train_file = Y_train_file

    self.is_training = tf.placeholder_with_default(True, shape=[], name='is_training')

    self.G = Generator('G', self.is_training, ngf=ngf, norm=norm, image_size=image_size)
    self.D_Y = Discriminator('D_Y',
        self.is_training, norm=norm, use_sigmoid=use_sigmoid)
    self.F = Generator('F', self.is_training, ngf=ngf, norm=norm, image_size=image_size)
    self.D_X = Discriminator('D_X',
        self.is_training, norm=norm, use_sigmoid=use_sigmoid)

    self.fake_x = tf.placeholder(tf.float32,
        shape=[batch_size, image_size, image_size, 3])
    self.fake_y = tf.placeholder(tf.float32,
        shape=[batch_size, image_size, image_size, 3]) 
Example #10
Source File: train.py    From Generative-Adversarial-Networks-Cookbook with MIT License 4 votes vote down vote up
def train(self):
        for e in range(self.EPOCHS):
            b = 0
            X_train_temp = deepcopy(self.X_train)
            while len(X_train_temp)>self.BATCH:
                # Keep track of Batches
                b=b+1

                # Train Discriminator
                # Make the training batch for this model be half real, half noise
                # Grab Real Images for this training batch
                if self.flipCoin():
                    count_real_images = int(self.BATCH)
                    starting_index = randint(0, (len(X_train_temp)-count_real_images))
                    real_images_raw = X_train_temp[ starting_index : (starting_index + count_real_images) ]
                    #self.plot_check_batch(b,real_images_raw)
                    # Delete the images used until we have none left
                    X_train_temp = np.delete(X_train_temp,range(starting_index,(starting_index + count_real_images)),0)
                    x_batch = real_images_raw.reshape( count_real_images, self.W, self.H, self.C )
                    y_batch = np.ones([count_real_images,1])
                else:
                    # Grab Generated Images for this training batch
                    latent_space_samples = self.sample_latent_space(self.BATCH)
                    x_batch = self.generator.Generator.predict(latent_space_samples)
                    y_batch = np.zeros([self.BATCH,1])

                # Now, train the discriminator with this batch
                discriminator_loss = self.discriminator.Discriminator.train_on_batch(x_batch,y_batch)[0]
            
                # In practice, flipping the label when training the generator improves convergence
                if self.flipCoin(chance=0.9):
                    y_generated_labels = np.ones([self.BATCH,1])
                else:
                    y_generated_labels = np.zeros([self.BATCH,1])
                x_latent_space_samples = self.sample_latent_space(self.BATCH)
                generator_loss = self.gan.gan_model.train_on_batch(x_latent_space_samples,y_generated_labels)
    
                print ('Batch: '+str(int(b))+', [Discriminator :: Loss: '+str(discriminator_loss)+'], [ Generator :: Loss: '+str(generator_loss)+']')
                if b % self.CHECKPOINT == 0 :
                    label = str(e)+'_'+str(b)
                    self.plot_checkpoint(label)

            print ('Epoch: '+str(int(e))+', [Discriminator :: Loss: '+str(discriminator_loss)+'], [ Generator :: Loss: '+str(generator_loss)+']')
                        
            if e % self.CHECKPOINT == 0 :
                self.plot_checkpoint(e)
        return 
Example #11
Source File: train.py    From Generative-Adversarial-Networks-Cookbook with MIT License 4 votes vote down vote up
def train(self):
        for e in range(self.EPOCHS):
            b = 0
            X_train_A_temp = deepcopy(self.X_train_A)
            X_train_B_temp = deepcopy(self.X_train_B)

            number_of_batches = len(self.X_train_A)
        
            for b in range(number_of_batches):
                # Train Discriminator
                # Grab Real Images for this training batch
                starting_ind = randint(0, (len(X_train_A_temp)-1))
                real_images_raw_A = X_train_A_temp[ starting_ind : (starting_ind + 1) ]
                real_images_raw_B = X_train_B_temp[ starting_ind : (starting_ind + 1) ]

                # Delete the images used until we have none left
                X_train_A_temp = np.delete(X_train_A_temp,range(starting_ind,(starting_ind + 1)),0)
                X_train_B_temp = np.delete(X_train_B_temp,range(starting_ind,(starting_ind + 1)),0)

                batch_A = real_images_raw_A.reshape( 1, self.W, self.H, self.C )
                batch_B = real_images_raw_B.reshape( 1, self.W, self.H, self.C )

                # PatchGAN
                y_valid = np.ones((1,)+(int(self.W / 2**4), int(self.W / 2**4), 1))
                y_fake = np.zeros((1,)+(int(self.W / 2**4), int(self.W / 2**4), 1))

                fake_A = self.generator.Generator.predict(batch_B)

                # Now, train the discriminator with this batch of reals
                discriminator_loss_real = self.discriminator.Discriminator.train_on_batch([batch_A,batch_B],y_valid)[0]
                discriminator_loss_fake = self.discriminator.Discriminator.train_on_batch([fake_A,batch_B],y_fake)[0]
                full_loss = 0.5 * np.add(discriminator_loss_real, discriminator_loss_fake)

                generator_loss = self.gan.gan_model.train_on_batch([batch_A, batch_B],[y_valid,batch_A])    

                print ('Batch: '+str(int(b))+', [Full Discriminator :: Loss: '+str(full_loss)+'], [ Generator :: Loss: '+str(generator_loss)+']')
                if b % self.CHECKPOINT == 0 :
                    label = str(e)+'_'+str(b)
                    self.plot_checkpoint(label)

            print ('Epoch: '+str(int(e))+', [Full Discriminator :: Loss:'+str(full_loss)+'], [ Generator :: Loss: '+str(generator_loss)+']')
                        
        return 
Example #12
Source File: train.py    From Generative-Adversarial-Networks-Cookbook with MIT License 4 votes vote down vote up
def train(self):
        
        count_generated_images = int(self.BATCH/2)
        count_real_images = int(self.BATCH/2)
        for e in range(self.EPOCHS):
            for label in self.LABELS:

                # Grab the Real 3D Samples
                all_3D_samples = self.X_train_3D[np.where(self.Y_train_3D==label)]
                starting_index = randint(0, (len(all_3D_samples)-count_real_images))
                real_3D_samples = all_3D_samples[ starting_index : int((starting_index + count_real_images)) ]
                y_real_labels =  np.ones([count_generated_images,1])

                # Grab Generated Images for this training batch
                all_encoded_samples = self.X_train_2D_encoded[np.where(self.Y_train_2D==label)]
                starting_index = randint(0, (len(all_encoded_samples)-count_generated_images))
                batch_encoded_samples = all_encoded_samples[ starting_index : int((starting_index + count_generated_images)) ]
                batch_encoded_samples = batch_encoded_samples.reshape( count_generated_images, 1, 1, 1,self.LATENT_SPACE_SIZE)

                x_generated_3D_samples = self.generator.Generator.predict(batch_encoded_samples)
                y_generated_labels = np.zeros([count_generated_images,1])

                # Combine to train on the discriminator
                x_batch = np.concatenate( [real_3D_samples, x_generated_3D_samples] )
                y_batch = np.concatenate( [y_real_labels, y_generated_labels] )

                # Now, train the discriminator with this batch
                self.discriminator.Discriminator.trainable = False
                discriminator_loss = self.discriminator.Discriminator.train_on_batch(x_batch,y_batch)[0]
                self.discriminator.Discriminator.trainable = True

                # Generate Noise
                starting_index = randint(0, (len(all_encoded_samples)-self.BATCH))
                x_batch_encoded_samples = all_encoded_samples[ starting_index : int((starting_index + self.BATCH)) ]
                x_batch_encoded_samples = x_batch_encoded_samples.reshape( int(self.BATCH), 1, 1, 1,self.LATENT_SPACE_SIZE)
                y_generated_labels = np.ones([self.BATCH,1])
                generator_loss = self.gan.gan_model.train_on_batch(x_batch_encoded_samples,y_generated_labels)
                print ('Epoch: '+str(int(e))+' Label: '+str(int(label))+', [Discriminator :: Loss: '+str(discriminator_loss)+'], [ Generator :: Loss: '+str(generator_loss)+']')
                if e % self.CHECKPOINT == 0 and e != 0 :
                    self.plot_checkpoint(e,label)
            
        return 
Example #13
Source File: model.py    From Cycle-Dehaze with MIT License 4 votes vote down vote up
def __init__(self,
               X_train_file='',
               Y_train_file='',
               batch_size=1,
               image_size1=256,
               image_size2=256,
               use_lsgan=True,
               norm='instance',
               lambda1=10.0,
               lambda2=10.0,
               learning_rate=1e-4,
               beta1=0.5,
               ngf=64
              ):
    """
    Args:
      X_train_file: string, X tfrecords file for training
      Y_train_file: string Y tfrecords file for training
      batch_size: integer, batch size
      image_size: integer, image size
      lambda1: integer, weight for forward cycle loss (X->Y->X)
      lambda2: integer, weight for backward cycle loss (Y->X->Y)
      use_lsgan: boolean
      norm: 'instance' or 'batch'
      learning_rate: float, initial learning rate for Adam
      beta1: float, momentum term of Adam
      ngf: number of gen filters in first conv layer
    """
    self.lambda1 = lambda1
    self.lambda2 = lambda2
    self.use_lsgan = use_lsgan
    use_sigmoid = not use_lsgan
    self.batch_size = batch_size
    self.image_size1 = image_size1
    self.image_size2 = image_size2
    self.learning_rate = learning_rate
    self.beta1 = beta1
    self.X_train_file = X_train_file
    self.Y_train_file = Y_train_file

    self.is_training = tf.placeholder_with_default(True, shape=[], name='is_training')


    self.G = Generator('G', self.is_training, ngf=ngf, norm=norm, image_size1=image_size1, image_size2=image_size2)
    self.D_Y = Discriminator('D_Y',
        self.is_training, norm=norm, use_sigmoid=use_sigmoid)
    self.F = Generator('F', self.is_training, ngf=ngf, norm=norm, image_size1=image_size1, image_size2=image_size2)
    self.D_X = Discriminator('D_X',
        self.is_training, norm=norm, use_sigmoid=use_sigmoid)

    self.fake_x = tf.placeholder(tf.float32,
        shape=[batch_size, image_size1, image_size2, 3])
    self.fake_y = tf.placeholder(tf.float32,
        shape=[batch_size, image_size1, image_size2, 3])

    self.vgg = vgg16.Vgg16()