Python ops.conv2d() Examples

The following are 30 code examples of ops.conv2d(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module ops , or try the search function .
Example #1
Source File: gan.py    From adagan with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def discriminator(self, opts, input_, is_training,
                      prefix='DISCRIMINATOR', reuse=False):
        """Discriminator function, suitable for simple toy experiments.

        """
        num_filters = opts['d_num_filters']

        with tf.variable_scope(prefix, reuse=reuse):
            h0 = ops.conv2d(opts, input_, num_filters, scope='h0_conv')
            h0 = ops.batch_norm(opts, h0, is_training, reuse, scope='bn_layer1')
            h0 = ops.lrelu(h0)
            h1 = ops.conv2d(opts, h0, num_filters * 2, scope='h1_conv')
            h1 = ops.batch_norm(opts, h1, is_training, reuse, scope='bn_layer2')
            h1 = ops.lrelu(h1)
            h2 = ops.conv2d(opts, h1, num_filters * 4, scope='h2_conv')
            h2 = ops.batch_norm(opts, h2, is_training, reuse, scope='bn_layer3')
            h2 = ops.lrelu(h2)
            h3 = ops.linear(opts, h2, 1, scope='h3_lin')

        return h3 
Example #2
Source File: discriminator.py    From WGAN-GP-TensorFlow with MIT License 6 votes vote down vote up
def __call__(self, input):
        with tf.variable_scope(self.name, reuse=self._reuse):
            if not self._reuse:
                print('\033[93m'+self.name+'\033[0m')
            _ = input
            num_channel = [32, 64, 128, 256, 256, 512, 512, 512, 512]
            assert self._num_conv <= 10 and self._num_conv > 0
            for i in range(self._num_conv):
                _ = conv2d(_, num_channel[i], self._is_train, info=not self._reuse,
                           norm=self._norm_type, name='conv{}'.format(i+1))
                if self._num_conv - i <= self._num_res_block:
                    _ = conv2d_res(
                            _, self._is_train, info=not self._reuse,
                            norm=self._norm_type,
                            name='res_block{}'.format(self._num_res_block - self._num_conv + i + 1))

            _ = conv2d(_, int(num_channel[i]/4), self._is_train, k=1, s=1,
                       info=not self._reuse, norm='none', name='conv{}'.format(i+2))
            _ = conv2d(_, 1, self._is_train, k=1, s=1, info=not self._reuse,
                       activation_fn=None, norm='none',
                       name='conv{}'.format(i+3))

            self._reuse = True
            self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name)
            return _ 
Example #3
Source File: inception_model.py    From InceptionV3_TensorFlow with MIT License 6 votes vote down vote up
def inception_v3_parameters(weight_decay=0.00004, stddev=0.1,
                            batch_norm_decay=0.9997, batch_norm_epsilon=0.001):
  """Yields the scope with the default parameters for inception_v3.

  Args:
    weight_decay: the weight decay for weights variables.
    stddev: standard deviation of the truncated guassian weight distribution.
    batch_norm_decay: decay for the moving average of batch_norm momentums.
    batch_norm_epsilon: small float added to variance to avoid dividing by zero.

  Yields:
    a arg_scope with the parameters needed for inception_v3.
  """
  # Set weight_decay for weights in Conv and FC layers.
  with scopes.arg_scope([ops.conv2d, ops.fc],
                        weight_decay=weight_decay):
    # Set stddev, activation and parameters for batch_norm.
    with scopes.arg_scope([ops.conv2d],
                          stddev=stddev,
                          activation=tf.nn.relu,
                          batch_norm_params={
                              'decay': batch_norm_decay,
                              'epsilon': batch_norm_epsilon}) as arg_scope:
      yield arg_scope 
Example #4
Source File: segan_model.py    From Awesome-GANs with MIT License 6 votes vote down vote up
def discriminator(self, x, reuse=False):
        with tf.variable_scope("discriminator", reuse=reuse):
            def residual_block(x, name='residual_block'):
                x = ops.conv2d(x)
                x = self.ops(x)
                x = tf.nn.leaky_relu(x)
                return x

            if len(x) == 2:
                x = tf.expand_dims(x, axis=-1)
            else:
                raise ValueError("[-] disc: waveform must be 2, 3-D")

            for idx, f in enumerate(self.num_blocks):
                x = residual_block(x)


            return x 
Example #5
Source File: models.py    From wae with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def dcgan_encoder(opts, inputs, is_training=False, reuse=False):
    num_units = opts['e_num_filters']
    num_layers = opts['e_num_layers']
    layer_x = inputs
    for i in xrange(num_layers):
        scale = 2**(num_layers - i - 1)
        layer_x = ops.conv2d(opts, layer_x, num_units / scale,
                             scope='h%d_conv' % i)
        if opts['batch_norm']:
            layer_x = ops.batch_norm(opts, layer_x, is_training,
                                     reuse, scope='h%d_bn' % i)
        layer_x = tf.nn.relu(layer_x)
    if opts['e_noise'] != 'gaussian':
        res = ops.linear(opts, layer_x, opts['zdim'], scope='hfinal_lin')
        return res
    else:
        mean = ops.linear(opts, layer_x, opts['zdim'], scope='mean_lin')
        log_sigmas = ops.linear(opts, layer_x,
                                opts['zdim'], scope='log_sigmas_lin')
        return mean, log_sigmas 
Example #6
Source File: models.py    From MGAN with MIT License 6 votes vote down vote up
def _create_discriminator(self, x, train=True, reuse=False, name="discriminator"):
        with tf.variable_scope(name) as scope:
            if reuse:
                scope.reuse_variables()

            h = x
            for i in range(self.num_conv_layers):
                h = lrelu(batch_norm(conv2d(h, self.num_dis_feature_maps * (2 ** i),
                                            stddev=0.02, name="d_h{}_conv".format(i)),
                                     is_training=train,
                                     scope="d_bn{}".format(i)))

            dim = h.get_shape()[1:].num_elements()
            h = tf.reshape(h, [-1, dim])
            d_bin_logits = linear(h, 1, scope='d_bin_logits')
            d_mul_logits = linear(h, self.num_gens, scope='d_mul_logits')
        return d_bin_logits, d_mul_logits 
Example #7
Source File: ResidualGAN.py    From Residual_Image_Learning_GAN with MIT License 6 votes vote down vote up
def encode_decode_2(self, x, reuse=False):

        with tf.variable_scope("encode_decode_2") as scope:
            if reuse == True:
                scope.reuse_variables()

            conv1 = lrelu(instance_norm(conv2d(x, output_dim=64, k_w=5, k_h=5, d_w=1, d_h=1, name='e_c1'), scope='e_in1',
                                       ))
            conv2 = lrelu(instance_norm(conv2d(conv1, output_dim=128, name='e_c2'), scope='e_in2'))

            conv3 = lrelu(instance_norm(conv2d(conv2, output_dim=256, name='e_c3'), scope='e_in3'))
            # for x_{1}
            de_conv1 = lrelu(instance_norm(de_conv(conv3, output_shape=[self.batch_size, 64, 64, 128]
                                                  , name='e_d1', k_h=3, k_w=3), scope='e_in4',
                                          ))
            de_conv2 = lrelu(instance_norm(de_conv(de_conv1, output_shape=[self.batch_size, 128, 128, 64]
                                                  , name='e_d2', k_w=3, k_h=3), scope='e_in5',
                                          ))
            x_tilde = conv2d(de_conv2, output_dim=3, d_h=1, d_w=1, name='e_c4')

            return x_tilde 
Example #8
Source File: ResidualGAN.py    From Residual_Image_Learning_GAN with MIT License 6 votes vote down vote up
def encode_decode_1(self, x, reuse=False):

        with tf.variable_scope("encode_decode_1") as scope:
            if reuse == True:
                scope.reuse_variables()

            conv1 = lrelu(instance_norm(conv2d(x, output_dim=64, k_w=5, k_h=5, d_w=1, d_h=1, name='e_c1'), scope='e_in1'))
            conv2 = lrelu(instance_norm(conv2d(conv1, output_dim=128, name='e_c2'), scope='e_in2'))
            conv3 = lrelu(instance_norm(conv2d(conv2, output_dim=256, name='e_c3'), scope='e_in3'))
            # for x_{1}
            de_conv1 = lrelu(instance_norm(de_conv(conv3, output_shape=[self.batch_size, 64, 64, 128]
                                                  , name='e_d1', k_h=3, k_w=3), scope='e_in4'))
            de_conv2 = lrelu(instance_norm(de_conv(de_conv1, output_shape=[self.batch_size, 128, 128, 64]
                                                  , name='e_d2', k_w=3, k_h=3), scope='e_in5'))
            x_tilde1 = conv2d(de_conv2, output_dim=3, d_h=1, d_w=1, name='e_c4')

            return x_tilde1 
Example #9
Source File: ResidualGAN.py    From Residual_Image_Learning_GAN with MIT License 6 votes vote down vote up
def discriminate(self, x_var, reuse=False):

        with tf.variable_scope("discriminator") as scope:
            if reuse == True:
                scope.reuse_variables()

            conv1 = lrelu(conv2d(x_var, output_dim=64, name='dis_conv1'))
            conv2 = lrelu(instance_norm(conv2d(conv1, output_dim=128, name='dis_conv2'), scope='dis_bn1'))
            conv3 = lrelu(instance_norm(conv2d(conv2, output_dim=256, name='dis_conv3'), scope='dis_bn2'))
            conv4 = conv2d(conv3, output_dim=512, name='dis_conv4')
            middle_conv = conv4
            conv4 = lrelu(instance_norm(conv4, scope='dis_bn3'))
            conv5 = lrelu(instance_norm(conv2d(conv4, output_dim=1024, name='dis_conv5'), scope='dis_bn4'))

            conv6 = conv2d(conv5, output_dim=2, k_w=4, k_h=4, d_h=1, d_w=1, padding='VALID', name='dis_conv6')

            return conv6, middle_conv 
Example #10
Source File: models.py    From CausalGAN with MIT License 6 votes vote down vote up
def discriminator_on_z(image, config, reuse=None):
    batch_size=tf.shape(image)[0]
    with tf.variable_scope("disc_z_labeler",reuse=reuse) as vs:
        dl_bn1 = batch_norm(name='dl_bn1')
        dl_bn2 = batch_norm(name='dl_bn2')
        dl_bn3 = batch_norm(name='dl_bn3')

        h0 = lrelu(conv2d(image, config.df_dim, name='dzl_h0_conv'))#16,32,32,64
        h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dzl_h1_conv')))#16,16,16,128
        h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dzl_h2_conv')))#16,16,16,248
        h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dzl_h3_conv')))
        dim3=np.prod(h3.get_shape().as_list()[1:])
        h3_flat=tf.reshape(h3, [-1,dim3])
        D_labels_logits = linear(h3_flat, config.z_dim, 'dzl_h3_Label')
        D_labels = tf.nn.tanh(D_labels_logits)
        variables = tf.contrib.framework.get_variables(vs)
    return D_labels,variables 
Example #11
Source File: models.py    From CausalGAN with MIT License 6 votes vote down vote up
def discriminator_gen_labeler(image, output_dim, config, reuse=None):
    batch_size=tf.shape(image)[0]
    with tf.variable_scope("disc_gen_labeler",reuse=reuse) as vs:
        dl_bn1 = batch_norm(name='dl_bn1')
        dl_bn2 = batch_norm(name='dl_bn2')
        dl_bn3 = batch_norm(name='dl_bn3')

        h0 = lrelu(conv2d(image, config.df_dim, name='dgl_h0_conv'))#16,32,32,64
        h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dgl_h1_conv')))#16,16,16,128
        h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dgl_h2_conv')))#16,16,16,248
        h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dgl_h3_conv')))
        dim3=np.prod(h3.get_shape().as_list()[1:])
        h3_flat=tf.reshape(h3, [-1,dim3])
        D_labels_logits = linear(h3_flat, output_dim, 'dgl_h3_Label')
        D_labels = tf.nn.sigmoid(D_labels_logits)
        variables = tf.contrib.framework.get_variables(vs)
    return D_labels, D_labels_logits,variables 
Example #12
Source File: models.py    From CausalGAN with MIT License 6 votes vote down vote up
def discriminator_labeler(image, output_dim, config, reuse=None):
    batch_size=tf.shape(image)[0]
    with tf.variable_scope("disc_labeler",reuse=reuse) as vs:
        dl_bn1 = batch_norm(name='dl_bn1')
        dl_bn2 = batch_norm(name='dl_bn2')
        dl_bn3 = batch_norm(name='dl_bn3')

        h0 = lrelu(conv2d(image, config.df_dim, name='dl_h0_conv'))#16,32,32,64
        h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dl_h1_conv')))#16,16,16,128
        h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dl_h2_conv')))#16,16,16,248
        h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dl_h3_conv')))
        dim3=np.prod(h3.get_shape().as_list()[1:])
        h3_flat=tf.reshape(h3, [-1,dim3])
        D_labels_logits = linear(h3_flat, output_dim, 'dl_h3_Label')
        D_labels = tf.nn.sigmoid(D_labels_logits)
        variables = tf.contrib.framework.get_variables(vs)
    return D_labels, D_labels_logits, variables 
Example #13
Source File: model_tmmd.py    From opt-mmd with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def discriminator(self, image, y=None, reuse=False):
        if reuse:
            tf.get_variable_scope().reuse_variables()

        s = self.output_size
        if np.mod(s, 16) == 0:
            h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
            h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
            h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))
            h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, name='d_h3_conv')))
            h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')

            return tf.nn.sigmoid(h4), h4
        else:
            h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
            h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
            h2 = linear(tf.reshape(h1, [self.batch_size, -1]), 1, 'd_h2_lin')
            if not self.config.use_kernel:
              return tf.nn.sigmoid(h2), h2
            else:
              return tf.nn.sigmoid(h2), h2, h1, h0 
Example #14
Source File: discriminator.py    From SSGAN-Tensorflow with MIT License 6 votes vote down vote up
def __call__(self, input):
        with tf.variable_scope(self.name, reuse=self._reuse):
            if not self._reuse:
                print('\033[93m'+self.name+'\033[0m')
            _ = input
            num_channel = [32, 64, 128, 256, 256, 512]
            num_layer = np.ceil(np.log2(min(_.shape.as_list()[1:3]))).astype(np.int)
            for i in range(num_layer):
                ch = num_channel[i] if i < len(num_channel) else 512
                _ = conv2d(_, ch, self._is_train, info=not self._reuse,
                           norm=self._norm_type, name='conv{}'.format(i+1))
            _ = conv2d(_, int(num_channel[i]/4), self._is_train, k=1, s=1,
                       info=not self._reuse, norm='None', name='conv{}'.format(i+2))
            _ = conv2d(_, self._num_class+1, self._is_train, k=1, s=1, info=not self._reuse,
                       activation_fn=None, norm='None',
                       name='conv{}'.format(i+3))
            _ = tf.squeeze(_)
            if not self._reuse: 
                log.info('discriminator output {}'.format(_.shape.as_list()))
            self._reuse = True
            self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name)
            return tf.nn.sigmoid(_), _ 
Example #15
Source File: model_mmd.py    From opt-mmd with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def discriminator(self, image, y=None, reuse=False):
        if reuse:
            tf.get_variable_scope().reuse_variables()

        s = self.output_size
        if np.mod(s, 16) == 0:
            h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
            h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
            h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))
            h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, name='d_h3_conv')))
            h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')

            return tf.nn.sigmoid(h4), h4
        else:
            h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
            h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
            h2 = linear(tf.reshape(h1, [self.batch_size, -1]), 1, 'd_h2_lin')
            if not self.config.use_kernel:
              return tf.nn.sigmoid(h2), h2
            else:
              return tf.nn.sigmoid(h2), h2, h1, h0 
Example #16
Source File: vae.py    From adagan with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def discriminator(self, opts, input_, is_training,
                      prefix='DISCRIMINATOR', reuse=False):
        """Encoder function, suitable for simple toy experiments.

        """
        num_filters = opts['d_num_filters']

        with tf.variable_scope(prefix, reuse=reuse):
            h0 = ops.conv2d(opts, input_, num_filters / 8, scope='h0_conv')
            h0 = ops.batch_norm(opts, h0, is_training, reuse, scope='bn_layer1')
            h0 = tf.nn.relu(h0)
            h1 = ops.conv2d(opts, h0, num_filters / 4, scope='h1_conv')
            h1 = ops.batch_norm(opts, h1, is_training, reuse, scope='bn_layer2')
            h1 = tf.nn.relu(h1)
            h2 = ops.conv2d(opts, h1, num_filters / 2, scope='h2_conv')
            h2 = ops.batch_norm(opts, h2, is_training, reuse, scope='bn_layer3')
            h2 = tf.nn.relu(h2)
            h3 = ops.conv2d(opts, h2, num_filters, scope='h3_conv')
            h3 = ops.batch_norm(opts, h3, is_training, reuse, scope='bn_layer4')
            h3 = tf.nn.relu(h3)
            # Already has NaNs!!
            latent_mean = ops.linear(opts, h3, opts['latent_space_dim'], scope='h3_lin')
            log_latent_sigmas = ops.linear(opts, h3, opts['latent_space_dim'], scope='h3_lin_sigma')

        return latent_mean, log_latent_sigmas 
Example #17
Source File: model_mmd_fm.py    From opt-mmd with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def discriminator(self, image, y=None, reuse=False):
        if reuse:
            tf.get_variable_scope().reuse_variables()

        s = self.output_size
        if np.mod(s, 16) == 0:
            h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
            h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
            h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))
            h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, name='d_h3_conv')))
            h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')

            return tf.nn.sigmoid(h4), h4
        else:
            h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
            h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
            h2 = linear(tf.reshape(h1, [self.batch_size, -1]), 1, 'd_h2_lin')
            if not self.config.use_kernel:
              return tf.nn.sigmoid(h2), h2
            else:
              return tf.nn.sigmoid(h2), h2, h1, h0 
Example #18
Source File: generator.py    From self-attention-gan with Apache License 2.0 5 votes vote down vote up
def block_no_sn(x, labels, out_channels, num_classes, is_training, name):
  """Builds the residual blocks used in the generator.

  Compared with block, optimized_block always downsamples the spatial resolution
  of the input vector by a factor of 4.

  Args:
    x: The 4D input vector.
    labels: The conditional labels in the generation.
    out_channels: Number of features in the output layer.
    num_classes: Number of classes in the labels.
    name: The variable scope name for the block.
  Returns:
    A `Tensor` representing the output of the operation.
  """
  with tf.variable_scope(name):
    bn0 = ops.ConditionalBatchNorm(num_classes, name='cbn_0')
    bn1 = ops.ConditionalBatchNorm(num_classes, name='cbn_1')
    x_0 = x
    x = tf.nn.relu(bn0(x, labels, is_training))
    x = usample(x)
    x = ops.conv2d(x, out_channels, 3, 3, 1, 1, name='conv1')
    x = tf.nn.relu(bn1(x, labels, is_training))
    x = ops.conv2d(x, out_channels, 3, 3, 1, 1, name='conv2')

    x_0 = usample(x_0)
    x_0 = ops.conv2d(x_0, out_channels, 1, 1, 1, 1, name='conv3')

    return x_0 + x 
Example #19
Source File: model_mnist.py    From Conditional-GAN with MIT License 5 votes vote down vote up
def dis_net(self, images, y, reuse=False):

        with tf.variable_scope("discriminator") as scope:

            if reuse == True:
                scope.reuse_variables()

            # mnist data's shape is (28 , 28 , 1)
            yb = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
            # concat
            concat_data = conv_cond_concat(images, yb)

            conv1, w1 = conv2d(concat_data, output_dim=10, name='dis_conv1')
            tf.add_to_collection('weight_1', w1)

            conv1 = lrelu(conv1)
            conv1 = conv_cond_concat(conv1, yb)
            tf.add_to_collection('ac_1', conv1)


            conv2, w2 = conv2d(conv1, output_dim=64, name='dis_conv2')
            tf.add_to_collection('weight_2', w2)

            conv2 = lrelu(batch_normal(conv2, scope='dis_bn1'))
            tf.add_to_collection('ac_2', conv2)

            conv2 = tf.reshape(conv2, [self.batch_size, -1])
            conv2 = tf.concat([conv2, y], 1)

            f1 = lrelu(batch_normal(fully_connect(conv2, output_size=1024, scope='dis_fully1'), scope='dis_bn2', reuse=reuse))
            f1 = tf.concat([f1, y], 1)

            out = fully_connect(f1, output_size=1, scope='dis_fully2',  initializer = xavier_initializer())

            return tf.nn.sigmoid(out), out 
Example #20
Source File: model_def.py    From sparse_gen with MIT License 5 votes vote down vote up
def discriminator(hparams, x, train, reuse):

    if reuse:
        tf.get_variable_scope().reuse_variables()

    d_bn1 = ops.batch_norm(name='d_bn1')
    d_bn2 = ops.batch_norm(name='d_bn2')
    d_bn3 = ops.batch_norm(name='d_bn3')

    h0 = ops.lrelu(ops.conv2d(x, hparams.df_dim, name='d_h0_conv'))

    h1 = ops.conv2d(h0, hparams.df_dim*2, name='d_h1_conv')
    h1 = ops.lrelu(d_bn1(h1, train=train))

    h2 = ops.conv2d(h1, hparams.df_dim*4, name='d_h2_conv')
    h2 = ops.lrelu(d_bn2(h2, train=train))

    h3 = ops.conv2d(h2, hparams.df_dim*8, name='d_h3_conv')
    h3 = ops.lrelu(d_bn3(h3, train=train))

    h4 = ops.linear(tf.reshape(h3, [hparams.batch_size, -1]), 1, 'd_h3_lin')

    d_logit = h4
    d = tf.nn.sigmoid(d_logit)

    return d, d_logit 
Example #21
Source File: pot.py    From adagan with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def compute_moments(_inputs, moments=[2, 3]):
    """From an image input, compute moments"""
    _inputs_sq = tf.square(_inputs)
    _inputs_cube = tf.pow(_inputs, 3)
    height = int(_inputs.get_shape()[1])
    width = int(_inputs.get_shape()[2])
    channels = int(_inputs.get_shape()[3])
    def ConvFlatten(x, kernel_size):
#                 w_sum = tf.ones([kernel_size, kernel_size, channels, 1]) / (kernel_size * kernel_size * channels)
        w_sum = tf.eye(num_rows=channels, num_columns=channels, batch_shape=[kernel_size * kernel_size])
        w_sum = tf.reshape(w_sum, [kernel_size, kernel_size, channels, channels])
        w_sum = w_sum / (kernel_size * kernel_size)
        sum_ = tf.nn.conv2d(x, w_sum, strides=[1, 1, 1, 1], padding='VALID')
        size = prod_dim(sum_)
        assert size == (height - kernel_size + 1) * (width - kernel_size + 1) * channels, size
        return tf.reshape(sum_, [-1, size])
    outputs = []
    for size in [3, 4, 5]:
        mean = ConvFlatten(_inputs, size)
        square = ConvFlatten(_inputs_sq, size)
        var = square - tf.square(mean)
        if 2 in moments:
            outputs.append(var)
        if 3 in moments:
            cube = ConvFlatten(_inputs_cube, size)
            skewness = cube - 3.0 * mean * var - tf.pow(mean, 3)  # Unnormalized
            outputs.append(skewness)
    return tf.concat(outputs, 1) 
Example #22
Source File: PGGAN.py    From progressive_growing_of_gans_tensorflow with MIT License 5 votes vote down vote up
def generate(self, z_var, pg=1, t=False, alpha_trans=0.0):
        with tf.variable_scope('generator') as scope:

            de = tf.reshape(Pixl_Norm(z_var), [self.batch_size, 1, 1, int(self.get_nf(1))])
            de = conv2d(de, output_dim=self.get_nf(1), k_h=4, k_w=4, d_w=1, d_h=1, use_wscale=self.use_wscale, gain=np.sqrt(2)/4, padding='Other', name='gen_n_1_conv')
            de = Pixl_Norm(lrelu(de))
            de = tf.reshape(de, [self.batch_size, 4, 4, int(self.get_nf(1))])
            de = conv2d(de, output_dim=self.get_nf(1), d_w=1, d_h=1, use_wscale=self.use_wscale, name='gen_n_2_conv')
            de = Pixl_Norm(lrelu(de))

            for i in range(pg - 1):
                if i == pg - 2 and t:
                    #To RGB
                    de_iden = conv2d(de, output_dim=3, k_w=1, k_h=1, d_w=1, d_h=1, use_wscale=self.use_wscale,
                                     name='gen_y_rgb_conv_{}'.format(de.shape[1]))
                    de_iden = upscale(de_iden, 2)

                de = upscale(de, 2)
                de = Pixl_Norm(lrelu(
                    conv2d(de, output_dim=self.get_nf(i + 1), d_w=1, d_h=1, use_wscale=self.use_wscale, name='gen_n_conv_1_{}'.format(de.shape[1]))))
                de = Pixl_Norm(lrelu(
                    conv2d(de, output_dim=self.get_nf(i + 1), d_w=1, d_h=1, use_wscale=self.use_wscale, name='gen_n_conv_2_{}'.format(de.shape[1]))))

            #To RGB
            de = conv2d(de, output_dim=3, k_w=1, k_h=1, d_w=1, d_h=1, use_wscale=self.use_wscale, gain=1, name='gen_y_rgb_conv_{}'.format(de.shape[1]))

            if pg == 1: return de
            if t: de = (1 - alpha_trans) * de_iden + alpha_trans*de
            else: de = de

            return de 
Example #23
Source File: PGGAN.py    From progressive_growing_of_gans_tensorflow with MIT License 5 votes vote down vote up
def discriminate(self, conv, reuse=False, pg=1, t=False, alpha_trans=0.01):
        #dis_as_v = []
        with tf.variable_scope("discriminator") as scope:

            if reuse == True:
                scope.reuse_variables()
            if t:
                conv_iden = downscale2d(conv)
                #from RGB
                conv_iden = lrelu(conv2d(conv_iden, output_dim= self.get_nf(pg - 2), k_w=1, k_h=1, d_h=1, d_w=1, use_wscale=self.use_wscale,
                           name='dis_y_rgb_conv_{}'.format(conv_iden.shape[1])))
            # fromRGB
            conv = lrelu(conv2d(conv, output_dim=self.get_nf(pg - 1), k_w=1, k_h=1, d_w=1, d_h=1, use_wscale=self.use_wscale, name='dis_y_rgb_conv_{}'.format(conv.shape[1])))

            for i in range(pg - 1):
                conv = lrelu(conv2d(conv, output_dim=self.get_nf(pg - 1 - i), d_h=1, d_w=1, use_wscale=self.use_wscale,
                                    name='dis_n_conv_1_{}'.format(conv.shape[1])))
                conv = lrelu(conv2d(conv, output_dim=self.get_nf(pg - 2 - i), d_h=1, d_w=1, use_wscale=self.use_wscale,
                                                      name='dis_n_conv_2_{}'.format(conv.shape[1])))
                conv = downscale2d(conv)
                if i == 0 and t:
                    conv = alpha_trans * conv + (1 - alpha_trans) * conv_iden

            conv = MinibatchstateConcat(conv)
            conv = lrelu(
                conv2d(conv, output_dim=self.get_nf(1), k_w=3, k_h=3, d_h=1, d_w=1, use_wscale=self.use_wscale, name='dis_n_conv_1_{}'.format(conv.shape[1])))
            conv = lrelu(
                conv2d(conv, output_dim=self.get_nf(1), k_w=4, k_h=4, d_h=1, d_w=1, use_wscale=self.use_wscale, padding='VALID', name='dis_n_conv_2_{}'.format(conv.shape[1])))
            conv = tf.reshape(conv, [self.batch_size, -1])

            #for D
            output = fully_connect(conv, output_size=1, use_wscale=self.use_wscale, gain=1, name='dis_n_fully')

            return tf.nn.sigmoid(output), output 
Example #24
Source File: models.py    From wae with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def began_decoder(opts, noise, is_training=False, reuse=False):

    output_shape = datashapes[opts['dataset']]
    num_units = opts['g_num_filters']
    num_layers = opts['g_num_layers']
    batch_size = tf.shape(noise)[0]

    h0 = ops.linear(opts, noise, num_units * 8 * 8, scope='h0_lin')
    h0 = tf.reshape(h0, [-1, 8, 8, num_units])
    layer_x = h0
    for i in xrange(num_layers):
        if i % 3 < 2:
            # Don't change resolution
            layer_x = ops.conv2d(opts, layer_x, num_units,
                                 d_h=1, d_w=1, scope='h%d_conv' % i)
            layer_x = tf.nn.elu(layer_x)
        else:
            if i != num_layers - 1:
                # Upsampling by factor of 2 with NN
                scale = 2 ** (i / 3 + 1)
                layer_x = ops.upsample_nn(layer_x, [scale * 8, scale * 8],
                                          scope='h%d_upsample' % i, reuse=reuse)
                # Skip connection
                append = ops.upsample_nn(h0, [scale * 8, scale * 8],
                                          scope='h%d_skipup' % i, reuse=reuse)
                layer_x = tf.concat([layer_x, append], axis=3)

    last_h = ops.conv2d(opts, layer_x, output_shape[-1],
                        d_h=1, d_w=1, scope='hfinal_conv')
    if opts['input_normalize_sym']:
        return tf.nn.tanh(last_h), last_h
    else:
        return tf.nn.sigmoid(last_h), last_h 
Example #25
Source File: models.py    From wae with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def began_encoder(opts, inputs, is_training=False, reuse=False):
    num_units = opts['e_num_filters']
    assert num_units == opts['g_num_filters'], \
        'BEGAN requires same number of filters in encoder and decoder'
    num_layers = opts['e_num_layers']
    layer_x = ops.conv2d(opts, inputs, num_units, scope='hfirst_conv')
    for i in xrange(num_layers):
        if i % 3 < 2:
            if i != num_layers - 2:
                ii = i - (i / 3)
                scale = (ii + 1 - ii / 2)
            else:
                ii = i - (i / 3)
                scale = (ii - (ii - 1) / 2)
            layer_x = ops.conv2d(opts, layer_x, num_units * scale, d_h=1, d_w=1,
                                 scope='h%d_conv' % i)
            layer_x = tf.nn.elu(layer_x)
        else:
            if i != num_layers - 1:
                layer_x = ops.downsample(layer_x, scope='h%d_maxpool' % i,
                                         reuse=reuse)
    # Tensor should be [N, 8, 8, filters] at this point
    if opts['e_noise'] != 'gaussian':
        res = ops.linear(opts, layer_x, opts['zdim'], scope='hfinal_lin')
        return res
    else:
        mean = ops.linear(opts, layer_x, opts['zdim'], scope='mean_lin')
        log_sigmas = ops.linear(opts, layer_x,
                                opts['zdim'], scope='log_sigmas_lin')
        return mean, log_sigmas 
Example #26
Source File: pot.py    From adagan with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def began_dec(self, opts, noise, is_training, reuse, keep_prob):
        """ Architecture reported here: https://arxiv.org/pdf/1703.10717.pdf
        """

        output_shape = self._data.data_shape
        num_units = opts['g_num_filters']
        num_layers = opts['g_num_layers']
        batch_size = tf.shape(noise)[0]

        h0 = ops.linear(
            opts, noise, num_units * 8 * 8, scope='h0_lin')
        h0 = tf.reshape(h0, [-1, 8, 8, num_units])
        layer_x = h0
        for i in xrange(num_layers):
            if i % 3 < 2:
                # Don't change resolution
                layer_x = ops.conv2d(opts, layer_x, num_units, d_h=1, d_w=1, scope='h%d_conv' % i)
                layer_x = tf.nn.elu(layer_x)
            else:
                if i != num_layers - 1:
                    # Upsampling by factor of 2 with NN
                    scale = 2 ** (i / 3 + 1)
                    layer_x = ops.upsample_nn(layer_x, [scale * 8, scale * 8],
                                              scope='h%d_upsample' % i, reuse=reuse)
                    # Skip connection
                    append = ops.upsample_nn(h0, [scale * 8, scale * 8],
                                              scope='h%d_skipup' % i, reuse=reuse)
                    layer_x = tf.concat([layer_x, append], axis=3)

        last_h = ops.conv2d(opts, layer_x, output_shape[-1], d_h=1, d_w=1, scope='hlast_conv')

        if opts['input_normalize_sym']:
            return tf.nn.tanh(last_h)
        else:
            return tf.nn.sigmoid(last_h) 
Example #27
Source File: generator.py    From self-attention-gan with Apache License 2.0 5 votes vote down vote up
def generator_old(zs,
              target_class,
              gf_dim,
              num_classes,
              is_training=True,
              scope='Generator'):
  """Builds the generator graph propagating from z to x.

  Args:
    zs: The list of noise tensors.
    target_class: The conditional labels in the generation.
    gf_dim: The gf dimension.
    num_classes: Number of classes in the labels.
    scope: Optional scope for `variable_op_scope`.

  Returns:
    outputs: The output layer of the generator.
  """

  with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
    # project `z` and reshape
    act0 = ops.linear(zs, gf_dim * 16 * 4 * 4, scope='g_h0')
    act0 = tf.reshape(act0, [-1, 4, 4, gf_dim * 16])

    act1 = block_no_sn(act0, target_class, gf_dim * 16,
                 num_classes, is_training, 'g_block1')  # 8 * 8
    act2 = block_no_sn(act1, target_class, gf_dim * 8,
                 num_classes, is_training, 'g_block2')  # 16 * 16
    act3 = block_no_sn(act2, target_class, gf_dim * 4,
                 num_classes, is_training, 'g_block3')  # 32 * 32
    act4 = block_no_sn(act3, target_class, gf_dim * 2,
                 num_classes, is_training, 'g_block4')  # 64 * 64
    act5 = block_no_sn(act4, target_class, gf_dim,
                 num_classes, is_training, 'g_block5')  # 128 * 128
    bn = ops.batch_norm(name='g_bn')

    act5 = tf.nn.relu(bn(act5, is_training))
    act6 = ops.conv2d(act5, 3, 3, 3, 1, 1, name='g_conv_last')
    out = tf.nn.tanh(act6)
    print('GAN baseline with moving average')
    return out 
Example #28
Source File: model.py    From Self-Supervised-Gans-Pytorch with MIT License 5 votes vote down vote up
def __init__(self, spectral_normed, num_rotation,
                ssup, channel, resnet = False):
        super(Discriminator, self).__init__()
        self.resnet = resnet
        self.num_rotation = num_rotation
        self.ssup = ssup

        self.lrelu = nn.LeakyReLU()
        self.relu = nn.ReLU()
        self.conv1 = conv2d(channel, 64, kernel_size = 3, stride = 1, padding = 1,
                            spectral_normed = spectral_normed)
        self.conv2 = conv2d(64, 128, spectral_normed = spectral_normed,
                            padding = 0)
        self.conv3 = conv2d(128, 256, spectral_normed = spectral_normed,
                            padding = 0)
        self.conv4 = conv2d(256, 512, spectral_normed = spectral_normed,
                            padding = 0)
        self.fully_connect_gan1 = nn.Linear(512, 1)
        self.fully_connect_rot1 = nn.Linear(512, 4)
        self.softmax = nn.Softmax()

        self.re1 = Residual_D(channel, 128, spectral_normed = spectral_normed,
                            down_sampling = True, is_start = True)
        self.re2 = Residual_D(128, 128, spectral_normed = spectral_normed,
                            down_sampling = True)
        self.re3 = Residual_D(128, 128, spectral_normed = spectral_normed)
        self.re4 = Residual_D(128, 128, spectral_normed = spectral_normed)
        self.fully_connect_gan2 = nn.Linear(128, 1)
        self.fully_connect_rot2 = nn.Linear(128, 4)
        self.sigmoid = nn.Sigmoid() 
Example #29
Source File: main.py    From SoundNet-tensorflow with MIT License 5 votes vote down vote up
def add_generator(self, name_scope='SoundNet'):
        with tf.variable_scope(name_scope) as scope:
            self.layers = {}
            
            # Stream one: conv1 ~ conv7
            self.layers[1] = conv2d(self.sound_input_placeholder, 1, 16, k_h=64, d_h=2, p_h=32, name_scope='conv1')
            self.layers[2] = batch_norm(self.layers[1], 16, self.config['eps'], name_scope='conv1')
            self.layers[3] = relu(self.layers[2], name_scope='conv1')
            self.layers[4] = maxpool(self.layers[3], k_h=8, d_h=8, name_scope='conv1')

            self.layers[5] = conv2d(self.layers[4], 16, 32, k_h=32, d_h=2, p_h=16, name_scope='conv2')
            self.layers[6] = batch_norm(self.layers[5], 32, self.config['eps'], name_scope='conv2')
            self.layers[7] = relu(self.layers[6], name_scope='conv2')
            self.layers[8] = maxpool(self.layers[7], k_h=8, d_h=8, name_scope='conv2')

            self.layers[9] = conv2d(self.layers[8], 32, 64, k_h=16, d_h=2, p_h=8, name_scope='conv3')
            self.layers[10] = batch_norm(self.layers[9], 64, self.config['eps'], name_scope='conv3')
            self.layers[11] = relu(self.layers[10], name_scope='conv3')

            self.layers[12] = conv2d(self.layers[11], 64, 128, k_h=8, d_h=2, p_h=4, name_scope='conv4')
            self.layers[13] = batch_norm(self.layers[12], 128, self.config['eps'], name_scope='conv4')
            self.layers[14] = relu(self.layers[13], name_scope='conv4')

            self.layers[15] = conv2d(self.layers[14], 128, 256, k_h=4, d_h=2, p_h=2, name_scope='conv5')
            self.layers[16] = batch_norm(self.layers[15], 256, self.config['eps'], name_scope='conv5')
            self.layers[17] = relu(self.layers[16], name_scope='conv5')
            self.layers[18] = maxpool(self.layers[17], k_h=4, d_h=4, name_scope='conv5')

            self.layers[19] = conv2d(self.layers[18], 256, 512, k_h=4, d_h=2, p_h=2, name_scope='conv6')
            self.layers[20] = batch_norm(self.layers[19], 512, self.config['eps'], name_scope='conv6')
            self.layers[21] = relu(self.layers[20], name_scope='conv6')

            self.layers[22] = conv2d(self.layers[21], 512, 1024, k_h=4, d_h=2, p_h=2, name_scope='conv7')
            self.layers[23] = batch_norm(self.layers[22], 1024, self.config['eps'], name_scope='conv7')
            self.layers[24] = relu(self.layers[23], name_scope='conv7')

            # Split one: conv8, conv8_2
            # NOTE: here we use a padding of 2 to skip an unknown error
            # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/common_shape_fns.cc#L45
            self.layers[25] = conv2d(self.layers[24], 1024, 1000, k_h=8, d_h=2, p_h=2, name_scope='conv8')
            self.layers[26] = conv2d(self.layers[24], 1024, 401, k_h=8, d_h=2, p_h=2, name_scope='conv8_2') 
Example #30
Source File: model.py    From Self-Supervised-Gans-Pytorch with MIT License 5 votes vote down vote up
def __init__(self, z_size, channel, resnet = False, output_size = 32):
        super(Generator, self).__init__()
        s = 4
        self.output_size = output_size
        if self.output_size == 32:
            s = 4
        if self.output_size == 48:
            s = 6
        self.s = s
        self.z_size = z_size
        self.resnet = resnet
        self.fully_connect = nn.Linear(z_size, s*s*256)
        self.relu = nn.ReLU()
        self.tanh = nn.Tanh()

        self.deconv1 = deconv2d(256, 256, padding = 0)
        self.bn1 = nn.BatchNorm2d(256)
        self.deconv2 = deconv2d(256, 128, padding = 0) 
        self.bn2 = nn.BatchNorm2d(128)
        self.deconv3 = deconv2d(128, 64, padding = 0)
        self.bn3 = nn.BatchNorm2d(64)
        self.conv4 = conv2d(64, channel, padding = 1, kernel_size = 3, stride = 1)
        self.conv_res4 = conv2d(256,channel, padding = 1, kernel_size = 3, stride = 1)

        self.re1 = Residual_G(256, 256, up_sampling = True)
        self.re2 = Residual_G(256, 256, up_sampling = True)
        self.re3 = Residual_G(256, 256, up_sampling = True)
        self.bn = nn.BatchNorm2d(256)