Python ops.batch_norm() Examples
The following are 30
code examples of ops.batch_norm().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
ops
, or try the search function
.
Example #1
Source File: gan.py From adagan with BSD 3-Clause "New" or "Revised" License | 6 votes |
def discriminator(self, opts, input_, is_training, prefix='DISCRIMINATOR', reuse=False): """Discriminator function, suitable for simple toy experiments. """ num_filters = opts['d_num_filters'] with tf.variable_scope(prefix, reuse=reuse): h0 = ops.conv2d(opts, input_, num_filters, scope='h0_conv') h0 = ops.batch_norm(opts, h0, is_training, reuse, scope='bn_layer1') h0 = ops.lrelu(h0) h1 = ops.conv2d(opts, h0, num_filters * 2, scope='h1_conv') h1 = ops.batch_norm(opts, h1, is_training, reuse, scope='bn_layer2') h1 = ops.lrelu(h1) h2 = ops.conv2d(opts, h1, num_filters * 4, scope='h2_conv') h2 = ops.batch_norm(opts, h2, is_training, reuse, scope='bn_layer3') h2 = ops.lrelu(h2) h3 = ops.linear(opts, h2, 1, scope='h3_lin') return h3
Example #2
Source File: gan.py From adagan with BSD 3-Clause "New" or "Revised" License | 6 votes |
def generator(self, opts, noise, is_training, reuse=False): with tf.variable_scope("GENERATOR", reuse=reuse): h0 = ops.linear(opts, noise, 100, scope='h0_lin') h0 = ops.batch_norm(opts, h0, is_training, reuse, scope='bn_layer1', scale=False) h0 = tf.nn.softplus(h0) h1 = ops.linear(opts, h0, 100, scope='h1_lin') h1 = ops.batch_norm(opts, h1, is_training, reuse, scope='bn_layer2', scale=False) h1 = tf.nn.softplus(h1) h2 = ops.linear(opts, h1, 28 * 28, scope='h2_lin') # h2 = ops.batch_norm(opts, h2, is_training, reuse, scope='bn_layer3') h2 = tf.reshape(h2, [-1, 28, 28, 1]) if opts['input_normalize_sym']: return tf.nn.tanh(h2) else: return tf.nn.sigmoid(h2)
Example #3
Source File: inception_model.py From InceptionV3_TensorFlow with MIT License | 6 votes |
def inception_v3_parameters(weight_decay=0.00004, stddev=0.1, batch_norm_decay=0.9997, batch_norm_epsilon=0.001): """Yields the scope with the default parameters for inception_v3. Args: weight_decay: the weight decay for weights variables. stddev: standard deviation of the truncated guassian weight distribution. batch_norm_decay: decay for the moving average of batch_norm momentums. batch_norm_epsilon: small float added to variance to avoid dividing by zero. Yields: a arg_scope with the parameters needed for inception_v3. """ # Set weight_decay for weights in Conv and FC layers. with scopes.arg_scope([ops.conv2d, ops.fc], weight_decay=weight_decay): # Set stddev, activation and parameters for batch_norm. with scopes.arg_scope([ops.conv2d], stddev=stddev, activation=tf.nn.relu, batch_norm_params={ 'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon}) as arg_scope: yield arg_scope
Example #4
Source File: models.py From CausalGAN with MIT License | 6 votes |
def discriminator_on_z(image, config, reuse=None): batch_size=tf.shape(image)[0] with tf.variable_scope("disc_z_labeler",reuse=reuse) as vs: dl_bn1 = batch_norm(name='dl_bn1') dl_bn2 = batch_norm(name='dl_bn2') dl_bn3 = batch_norm(name='dl_bn3') h0 = lrelu(conv2d(image, config.df_dim, name='dzl_h0_conv'))#16,32,32,64 h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dzl_h1_conv')))#16,16,16,128 h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dzl_h2_conv')))#16,16,16,248 h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dzl_h3_conv'))) dim3=np.prod(h3.get_shape().as_list()[1:]) h3_flat=tf.reshape(h3, [-1,dim3]) D_labels_logits = linear(h3_flat, config.z_dim, 'dzl_h3_Label') D_labels = tf.nn.tanh(D_labels_logits) variables = tf.contrib.framework.get_variables(vs) return D_labels,variables
Example #5
Source File: models.py From wae with BSD 3-Clause "New" or "Revised" License | 6 votes |
def dcgan_encoder(opts, inputs, is_training=False, reuse=False): num_units = opts['e_num_filters'] num_layers = opts['e_num_layers'] layer_x = inputs for i in xrange(num_layers): scale = 2**(num_layers - i - 1) layer_x = ops.conv2d(opts, layer_x, num_units / scale, scope='h%d_conv' % i) if opts['batch_norm']: layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='h%d_bn' % i) layer_x = tf.nn.relu(layer_x) if opts['e_noise'] != 'gaussian': res = ops.linear(opts, layer_x, opts['zdim'], scope='hfinal_lin') return res else: mean = ops.linear(opts, layer_x, opts['zdim'], scope='mean_lin') log_sigmas = ops.linear(opts, layer_x, opts['zdim'], scope='log_sigmas_lin') return mean, log_sigmas
Example #6
Source File: models.py From CausalGAN with MIT License | 6 votes |
def discriminator_gen_labeler(image, output_dim, config, reuse=None): batch_size=tf.shape(image)[0] with tf.variable_scope("disc_gen_labeler",reuse=reuse) as vs: dl_bn1 = batch_norm(name='dl_bn1') dl_bn2 = batch_norm(name='dl_bn2') dl_bn3 = batch_norm(name='dl_bn3') h0 = lrelu(conv2d(image, config.df_dim, name='dgl_h0_conv'))#16,32,32,64 h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dgl_h1_conv')))#16,16,16,128 h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dgl_h2_conv')))#16,16,16,248 h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dgl_h3_conv'))) dim3=np.prod(h3.get_shape().as_list()[1:]) h3_flat=tf.reshape(h3, [-1,dim3]) D_labels_logits = linear(h3_flat, output_dim, 'dgl_h3_Label') D_labels = tf.nn.sigmoid(D_labels_logits) variables = tf.contrib.framework.get_variables(vs) return D_labels, D_labels_logits,variables
Example #7
Source File: models.py From CausalGAN with MIT License | 6 votes |
def discriminator_labeler(image, output_dim, config, reuse=None): batch_size=tf.shape(image)[0] with tf.variable_scope("disc_labeler",reuse=reuse) as vs: dl_bn1 = batch_norm(name='dl_bn1') dl_bn2 = batch_norm(name='dl_bn2') dl_bn3 = batch_norm(name='dl_bn3') h0 = lrelu(conv2d(image, config.df_dim, name='dl_h0_conv'))#16,32,32,64 h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dl_h1_conv')))#16,16,16,128 h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dl_h2_conv')))#16,16,16,248 h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dl_h3_conv'))) dim3=np.prod(h3.get_shape().as_list()[1:]) h3_flat=tf.reshape(h3, [-1,dim3]) D_labels_logits = linear(h3_flat, output_dim, 'dl_h3_Label') D_labels = tf.nn.sigmoid(D_labels_logits) variables = tf.contrib.framework.get_variables(vs) return D_labels, D_labels_logits, variables
Example #8
Source File: vae.py From adagan with BSD 3-Clause "New" or "Revised" License | 6 votes |
def discriminator(self, opts, input_, is_training, prefix='DISCRIMINATOR', reuse=False): """Encoder function, suitable for simple toy experiments. """ num_filters = opts['d_num_filters'] with tf.variable_scope(prefix, reuse=reuse): h0 = ops.conv2d(opts, input_, num_filters / 8, scope='h0_conv') h0 = ops.batch_norm(opts, h0, is_training, reuse, scope='bn_layer1') h0 = tf.nn.relu(h0) h1 = ops.conv2d(opts, h0, num_filters / 4, scope='h1_conv') h1 = ops.batch_norm(opts, h1, is_training, reuse, scope='bn_layer2') h1 = tf.nn.relu(h1) h2 = ops.conv2d(opts, h1, num_filters / 2, scope='h2_conv') h2 = ops.batch_norm(opts, h2, is_training, reuse, scope='bn_layer3') h2 = tf.nn.relu(h2) h3 = ops.conv2d(opts, h2, num_filters, scope='h3_conv') h3 = ops.batch_norm(opts, h3, is_training, reuse, scope='bn_layer4') h3 = tf.nn.relu(h3) # Already has NaNs!! latent_mean = ops.linear(opts, h3, opts['latent_space_dim'], scope='h3_lin') log_latent_sigmas = ops.linear(opts, h3, opts['latent_space_dim'], scope='h3_lin_sigma') return latent_mean, log_latent_sigmas
Example #9
Source File: model_def.py From csgm with MIT License | 5 votes |
def generator(hparams, z, train, reuse): if reuse: tf.get_variable_scope().reuse_variables() output_size = 64 s = output_size s2, s4, s8, s16 = int(s/2), int(s/4), int(s/8), int(s/16) g_bn0 = ops.batch_norm(name='g_bn0') g_bn1 = ops.batch_norm(name='g_bn1') g_bn2 = ops.batch_norm(name='g_bn2') g_bn3 = ops.batch_norm(name='g_bn3') # project `z` and reshape h0 = tf.reshape(ops.linear(z, hparams.gf_dim*8*s16*s16, 'g_h0_lin'), [-1, s16, s16, hparams.gf_dim * 8]) h0 = tf.nn.relu(g_bn0(h0, train=train)) h1 = ops.deconv2d(h0, [hparams.batch_size, s8, s8, hparams.gf_dim*4], name='g_h1') h1 = tf.nn.relu(g_bn1(h1, train=train)) h2 = ops.deconv2d(h1, [hparams.batch_size, s4, s4, hparams.gf_dim*2], name='g_h2') h2 = tf.nn.relu(g_bn2(h2, train=train)) h3 = ops.deconv2d(h2, [hparams.batch_size, s2, s2, hparams.gf_dim*1], name='g_h3') h3 = tf.nn.relu(g_bn3(h3, train=train)) h4 = ops.deconv2d(h3, [hparams.batch_size, s, s, hparams.c_dim], name='g_h4') x_gen = tf.nn.tanh(h4) return x_gen
Example #10
Source File: model_def.py From sparse_gen with MIT License | 5 votes |
def discriminator(hparams, x, train, reuse): if reuse: tf.get_variable_scope().reuse_variables() d_bn1 = ops.batch_norm(name='d_bn1') d_bn2 = ops.batch_norm(name='d_bn2') d_bn3 = ops.batch_norm(name='d_bn3') h0 = ops.lrelu(ops.conv2d(x, hparams.df_dim, name='d_h0_conv')) h1 = ops.conv2d(h0, hparams.df_dim*2, name='d_h1_conv') h1 = ops.lrelu(d_bn1(h1, train=train)) h2 = ops.conv2d(h1, hparams.df_dim*4, name='d_h2_conv') h2 = ops.lrelu(d_bn2(h2, train=train)) h3 = ops.conv2d(h2, hparams.df_dim*8, name='d_h3_conv') h3 = ops.lrelu(d_bn3(h3, train=train)) h4 = ops.linear(tf.reshape(h3, [hparams.batch_size, -1]), 1, 'd_h3_lin') d_logit = h4 d = tf.nn.sigmoid(d_logit) return d, d_logit
Example #11
Source File: model_def.py From csgm with MIT License | 5 votes |
def discriminator(hparams, x, train, reuse): if reuse: tf.get_variable_scope().reuse_variables() d_bn1 = ops.batch_norm(name='d_bn1') d_bn2 = ops.batch_norm(name='d_bn2') d_bn3 = ops.batch_norm(name='d_bn3') h0 = ops.lrelu(ops.conv2d(x, hparams.df_dim, name='d_h0_conv')) h1 = ops.conv2d(h0, hparams.df_dim*2, name='d_h1_conv') h1 = ops.lrelu(d_bn1(h1, train=train)) h2 = ops.conv2d(h1, hparams.df_dim*4, name='d_h2_conv') h2 = ops.lrelu(d_bn2(h2, train=train)) h3 = ops.conv2d(h2, hparams.df_dim*8, name='d_h3_conv') h3 = ops.lrelu(d_bn3(h3, train=train)) h4 = ops.linear(tf.reshape(h3, [hparams.batch_size, -1]), 1, 'd_h3_lin') d_logit = h4 d = tf.nn.sigmoid(d_logit) return d, d_logit
Example #12
Source File: content_predictor.py From COCO-GAN with MIT License | 5 votes |
def forward(self, h, is_training): print(" [Build] Spatial Predictor ; is_training: {}".format(is_training)) update_collection = self._get_update_collection(is_training) with tf.variable_scope("Q_content_prediction_head", reuse=tf.AUTO_REUSE): h = snlinear(h, self.aux_dim, 'fc1', update_collection=update_collection) h = batch_norm(name='bn1')(h, is_training=is_training) h = lrelu(h) h = snlinear(h, self.z_dim, 'fc2', update_collection=update_collection) return tf.nn.tanh(h)
Example #13
Source File: spatial_prediction.py From COCO-GAN with MIT License | 5 votes |
def forward(self, h, is_training): print(" [Build] Spatial Predictor ; is_training: {}".format(is_training)) update_collection = self._get_update_collection(is_training) with tf.variable_scope("GD_spatial_prediction_head", reuse=tf.AUTO_REUSE): h = snlinear(h, self.aux_dim, 'fc1', update_collection=update_collection) h = batch_norm(name='bn1')(h, is_training=is_training) h = lrelu(h) h = snlinear(h, self.spatial_dim, 'fc2', update_collection=update_collection) return tf.nn.tanh(h)
Example #14
Source File: main.py From SoundNet-tensorflow with MIT License | 5 votes |
def add_generator(self, name_scope='SoundNet'): with tf.variable_scope(name_scope) as scope: self.layers = {} # Stream one: conv1 ~ conv7 self.layers[1] = conv2d(self.sound_input_placeholder, 1, 16, k_h=64, d_h=2, p_h=32, name_scope='conv1') self.layers[2] = batch_norm(self.layers[1], 16, self.config['eps'], name_scope='conv1') self.layers[3] = relu(self.layers[2], name_scope='conv1') self.layers[4] = maxpool(self.layers[3], k_h=8, d_h=8, name_scope='conv1') self.layers[5] = conv2d(self.layers[4], 16, 32, k_h=32, d_h=2, p_h=16, name_scope='conv2') self.layers[6] = batch_norm(self.layers[5], 32, self.config['eps'], name_scope='conv2') self.layers[7] = relu(self.layers[6], name_scope='conv2') self.layers[8] = maxpool(self.layers[7], k_h=8, d_h=8, name_scope='conv2') self.layers[9] = conv2d(self.layers[8], 32, 64, k_h=16, d_h=2, p_h=8, name_scope='conv3') self.layers[10] = batch_norm(self.layers[9], 64, self.config['eps'], name_scope='conv3') self.layers[11] = relu(self.layers[10], name_scope='conv3') self.layers[12] = conv2d(self.layers[11], 64, 128, k_h=8, d_h=2, p_h=4, name_scope='conv4') self.layers[13] = batch_norm(self.layers[12], 128, self.config['eps'], name_scope='conv4') self.layers[14] = relu(self.layers[13], name_scope='conv4') self.layers[15] = conv2d(self.layers[14], 128, 256, k_h=4, d_h=2, p_h=2, name_scope='conv5') self.layers[16] = batch_norm(self.layers[15], 256, self.config['eps'], name_scope='conv5') self.layers[17] = relu(self.layers[16], name_scope='conv5') self.layers[18] = maxpool(self.layers[17], k_h=4, d_h=4, name_scope='conv5') self.layers[19] = conv2d(self.layers[18], 256, 512, k_h=4, d_h=2, p_h=2, name_scope='conv6') self.layers[20] = batch_norm(self.layers[19], 512, self.config['eps'], name_scope='conv6') self.layers[21] = relu(self.layers[20], name_scope='conv6') self.layers[22] = conv2d(self.layers[21], 512, 1024, k_h=4, d_h=2, p_h=2, name_scope='conv7') self.layers[23] = batch_norm(self.layers[22], 1024, self.config['eps'], name_scope='conv7') self.layers[24] = relu(self.layers[23], name_scope='conv7') # Split one: conv8, conv8_2 # NOTE: here we use a padding of 2 to skip an unknown error # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/common_shape_fns.cc#L45 self.layers[25] = conv2d(self.layers[24], 1024, 1000, k_h=8, d_h=2, p_h=2, name_scope='conv8') self.layers[26] = conv2d(self.layers[24], 1024, 401, k_h=8, d_h=2, p_h=2, name_scope='conv8_2')
Example #15
Source File: model.py From SoundNet-tensorflow with MIT License | 5 votes |
def add_generator(self, name_scope='SoundNet'): with tf.variable_scope(name_scope) as scope: self.layers = {} # Stream one: conv1 ~ conv7 self.layers[1] = conv2d(self.sound_input_placeholder, 1, 16, k_h=64, d_h=2, p_h=32, name_scope='conv1') self.layers[2] = batch_norm(self.layers[1], 16, self.config['eps'], name_scope='conv1') self.layers[3] = relu(self.layers[2], name_scope='conv1') self.layers[4] = maxpool(self.layers[3], k_h=8, d_h=8, name_scope='conv1') self.layers[5] = conv2d(self.layers[4], 16, 32, k_h=32, d_h=2, p_h=16, name_scope='conv2') self.layers[6] = batch_norm(self.layers[5], 32, self.config['eps'], name_scope='conv2') self.layers[7] = relu(self.layers[6], name_scope='conv2') self.layers[8] = maxpool(self.layers[7], k_h=8, d_h=8, name_scope='conv2') self.layers[9] = conv2d(self.layers[8], 32, 64, k_h=16, d_h=2, p_h=8, name_scope='conv3') self.layers[10] = batch_norm(self.layers[9], 64, self.config['eps'], name_scope='conv3') self.layers[11] = relu(self.layers[10], name_scope='conv3') self.layers[12] = conv2d(self.layers[11], 64, 128, k_h=8, d_h=2, p_h=4, name_scope='conv4') self.layers[13] = batch_norm(self.layers[12], 128, self.config['eps'], name_scope='conv4') self.layers[14] = relu(self.layers[13], name_scope='conv4') self.layers[15] = conv2d(self.layers[14], 128, 256, k_h=4, d_h=2, p_h=2, name_scope='conv5') self.layers[16] = batch_norm(self.layers[15], 256, self.config['eps'], name_scope='conv5') self.layers[17] = relu(self.layers[16], name_scope='conv5') self.layers[18] = maxpool(self.layers[17], k_h=4, d_h=4, name_scope='conv5') self.layers[19] = conv2d(self.layers[18], 256, 512, k_h=4, d_h=2, p_h=2, name_scope='conv6') self.layers[20] = batch_norm(self.layers[19], 512, self.config['eps'], name_scope='conv6') self.layers[21] = relu(self.layers[20], name_scope='conv6') self.layers[22] = conv2d(self.layers[21], 512, 1024, k_h=4, d_h=2, p_h=2, name_scope='conv7') self.layers[23] = batch_norm(self.layers[22], 1024, self.config['eps'], name_scope='conv7') self.layers[24] = relu(self.layers[23], name_scope='conv7') # Split one: conv8, conv8_2 self.layers[25] = conv2d(self.layers[24], 1024, 1000, k_h=8, d_h=2, name_scope='conv8') self.layers[26] = conv2d(self.layers[24], 1024, 401, k_h=8, d_h=2, name_scope='conv8_2')
Example #16
Source File: generator.py From self-attention-gan with Apache License 2.0 | 5 votes |
def generator_old(zs, target_class, gf_dim, num_classes, is_training=True, scope='Generator'): """Builds the generator graph propagating from z to x. Args: zs: The list of noise tensors. target_class: The conditional labels in the generation. gf_dim: The gf dimension. num_classes: Number of classes in the labels. scope: Optional scope for `variable_op_scope`. Returns: outputs: The output layer of the generator. """ with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): # project `z` and reshape act0 = ops.linear(zs, gf_dim * 16 * 4 * 4, scope='g_h0') act0 = tf.reshape(act0, [-1, 4, 4, gf_dim * 16]) act1 = block_no_sn(act0, target_class, gf_dim * 16, num_classes, is_training, 'g_block1') # 8 * 8 act2 = block_no_sn(act1, target_class, gf_dim * 8, num_classes, is_training, 'g_block2') # 16 * 16 act3 = block_no_sn(act2, target_class, gf_dim * 4, num_classes, is_training, 'g_block3') # 32 * 32 act4 = block_no_sn(act3, target_class, gf_dim * 2, num_classes, is_training, 'g_block4') # 64 * 64 act5 = block_no_sn(act4, target_class, gf_dim, num_classes, is_training, 'g_block5') # 128 * 128 bn = ops.batch_norm(name='g_bn') act5 = tf.nn.relu(bn(act5, is_training)) act6 = ops.conv2d(act5, 3, 3, 3, 1, 1, name='g_conv_last') out = tf.nn.tanh(act6) print('GAN baseline with moving average') return out
Example #17
Source File: generator.py From self-attention-gan with Apache License 2.0 | 5 votes |
def generator(zs, target_class, gf_dim, num_classes, is_training=True): """Builds the generator graph propagating from z to x. Args: zs: The list of noise tensors. target_class: The conditional labels in the generation. gf_dim: The gf dimension. num_classes: Number of classes in the labels. scope: Optional scope for `variable_op_scope`. Returns: outputs: The output layer of the generator. """ with tf.variable_scope('model', reuse=tf.AUTO_REUSE): # project `z` and reshape act0 = ops.snlinear(zs, gf_dim * 16 * 4 * 4, name='g_snh0') act0 = tf.reshape(act0, [-1, 4, 4, gf_dim * 16]) act1 = block(act0, target_class, gf_dim * 16, num_classes, is_training, 'g_block1') # 8 * 8 act2 = block(act1, target_class, gf_dim * 8, num_classes, is_training, 'g_block2') # 16 * 16 act3 = block(act2, target_class, gf_dim * 4, num_classes, is_training, 'g_block3') # 32 * 32 act4 = block(act3, target_class, gf_dim * 2, num_classes, is_training, 'g_block4') # 64 * 64 act5 = block(act4, target_class, gf_dim, num_classes, is_training, 'g_block5') # 128 * 128 bn = ops.batch_norm(name='g_bn') act5 = tf.nn.relu(bn(act5, is_training)) act6 = ops.snconv2d(act5, 3, 3, 3, 1, 1, name='g_snconv_last') out = tf.nn.tanh(act6) print('Generator Structure') return out
Example #18
Source File: generator.py From self-attention-gan with Apache License 2.0 | 5 votes |
def generator_test(zs, target_class, gf_dim, num_classes, is_training=True): """Builds the generator graph propagating from z to x. Args: zs: The list of noise tensors. target_class: The conditional labels in the generation. gf_dim: The gf dimension. num_classes: Number of classes in the labels. scope: Optional scope for `variable_op_scope`. Returns: outputs: The output layer of the generator. """ with tf.variable_scope('model', reuse=tf.AUTO_REUSE): # project `z` and reshape act0 = ops.snlinear(zs, gf_dim * 16 * 4 * 4, name='g_snh0') act0 = tf.reshape(act0, [-1, 4, 4, gf_dim * 16]) act1 = block(act0, target_class, gf_dim * 16, num_classes, is_training, 'g_block1') # 8 * 8 act2 = block(act1, target_class, gf_dim * 8, num_classes, is_training, 'g_block2') # 16 * 16 act3 = block(act2, target_class, gf_dim * 4, num_classes, is_training, 'g_block3') # 32 * 32 act3 = non_local.sn_non_local_block_sim(act3, None, name='g_non_local') act4 = block(act3, target_class, gf_dim * 2, num_classes, is_training, 'g_block4') # 64 * 64 act5 = block(act4, target_class, gf_dim, num_classes, is_training, 'g_block5') # 128 * 128 bn = ops.batch_norm(name='g_bn') act5 = tf.nn.relu(bn(act5, is_training)) act6 = ops.snconv2d(act5, 3, 3, 3, 1, 1, name='g_snconv_last') out = tf.nn.tanh(act6) print('Generator TEST structure') return out
Example #19
Source File: models.py From wae with BSD 3-Clause "New" or "Revised" License | 5 votes |
def decoder(opts, noise, reuse=False, is_training=True): assert opts['dataset'] in datashapes, 'Unknown dataset!' output_shape = datashapes[opts['dataset']] num_units = opts['g_num_filters'] with tf.variable_scope("generator", reuse=reuse): if opts['g_arch'] == 'mlp': # Architecture with only fully connected layers and ReLUs layer_x = noise i = 0 for i in xrange(opts['g_num_layers']): layer_x = ops.linear(opts, layer_x, num_units, 'h%d_lin' % i) layer_x = tf.nn.relu(layer_x) if opts['batch_norm']: layer_x = ops.batch_norm( opts, layer_x, is_training, reuse, scope='h%d_bn' % i) out = ops.linear(opts, layer_x, np.prod(output_shape), 'h%d_lin' % (i + 1)) out = tf.reshape(out, [-1] + list(output_shape)) if opts['input_normalize_sym']: return tf.nn.tanh(out), out else: return tf.nn.sigmoid(out), out elif opts['g_arch'] in ['dcgan', 'dcgan_mod']: # Fully convolutional architecture similar to DCGAN res = dcgan_decoder(opts, noise, is_training, reuse) elif opts['g_arch'] == 'ali': # Architecture smilar to "Adversarially learned inference" paper res = ali_decoder(opts, noise, is_training, reuse) elif opts['g_arch'] == 'began': # Architecture similar to the BEGAN paper res = began_decoder(opts, noise, is_training, reuse) else: raise ValueError('%s Unknown decoder architecture' % opts['g_arch']) return res
Example #20
Source File: models.py From wae with BSD 3-Clause "New" or "Revised" License | 5 votes |
def dcgan_decoder(opts, noise, is_training=False, reuse=False): output_shape = datashapes[opts['dataset']] num_units = opts['g_num_filters'] batch_size = tf.shape(noise)[0] num_layers = opts['g_num_layers'] if opts['g_arch'] == 'dcgan': height = output_shape[0] / 2**num_layers width = output_shape[1] / 2**num_layers elif opts['g_arch'] == 'dcgan_mod': height = output_shape[0] / 2**(num_layers - 1) width = output_shape[1] / 2**(num_layers - 1) h0 = ops.linear( opts, noise, num_units * height * width, scope='h0_lin') h0 = tf.reshape(h0, [-1, height, width, num_units]) h0 = tf.nn.relu(h0) layer_x = h0 for i in xrange(num_layers - 1): scale = 2**(i + 1) _out_shape = [batch_size, height * scale, width * scale, num_units / scale] layer_x = ops.deconv2d(opts, layer_x, _out_shape, scope='h%d_deconv' % i) if opts['batch_norm']: layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='h%d_bn' % i) layer_x = tf.nn.relu(layer_x) _out_shape = [batch_size] + list(output_shape) if opts['g_arch'] == 'dcgan': last_h = ops.deconv2d( opts, layer_x, _out_shape, scope='hfinal_deconv') elif opts['g_arch'] == 'dcgan_mod': last_h = ops.deconv2d( opts, layer_x, _out_shape, d_h=1, d_w=1, scope='hfinal_deconv') if opts['input_normalize_sym']: return tf.nn.tanh(last_h), last_h else: return tf.nn.sigmoid(last_h), last_h
Example #21
Source File: model_def.py From sparse_gen with MIT License | 5 votes |
def generator(hparams, z, train, reuse): if reuse: tf.get_variable_scope().reuse_variables() output_size = 64 s = output_size s2, s4, s8, s16 = int(s/2), int(s/4), int(s/8), int(s/16) g_bn0 = ops.batch_norm(name='g_bn0') g_bn1 = ops.batch_norm(name='g_bn1') g_bn2 = ops.batch_norm(name='g_bn2') g_bn3 = ops.batch_norm(name='g_bn3') # project `z` and reshape h0 = tf.reshape(ops.linear(z, hparams.gf_dim*8*s16*s16, 'g_h0_lin'), [-1, s16, s16, hparams.gf_dim * 8]) h0 = tf.nn.relu(g_bn0(h0, train=train)) h1 = ops.deconv2d(h0, [hparams.batch_size, s8, s8, hparams.gf_dim*4], name='g_h1') h1 = tf.nn.relu(g_bn1(h1, train=train)) h2 = ops.deconv2d(h1, [hparams.batch_size, s4, s4, hparams.gf_dim*2], name='g_h2') h2 = tf.nn.relu(g_bn2(h2, train=train)) h3 = ops.deconv2d(h2, [hparams.batch_size, s2, s2, hparams.gf_dim*1], name='g_h3') h3 = tf.nn.relu(g_bn3(h3, train=train)) h4 = ops.deconv2d(h3, [hparams.batch_size, s, s, hparams.c_dim], name='g_h4') x_gen = tf.nn.tanh(h4) return x_gen
Example #22
Source File: model_def_new.py From csgm with MIT License | 5 votes |
def generator(hparams, z, scope_name, train, reuse): with tf.variable_scope(scope_name) as scope: if reuse: scope.reuse_variables() output_size = 64 s = output_size s2, s4, s8, s16 = int(s/2), int(s/4), int(s/8), int(s/16) g_bn0 = ops.batch_norm(name='g_bn0') g_bn1 = ops.batch_norm(name='g_bn1') g_bn2 = ops.batch_norm(name='g_bn2') g_bn3 = ops.batch_norm(name='g_bn3') # project `z` and reshape h0 = tf.reshape(ops.linear(z, hparams.gf_dim*8*s16*s16, 'g_h0_lin'), [-1, s16, s16, hparams.gf_dim * 8]) h0 = tf.nn.relu(g_bn0(h0, train=train)) h1 = ops.deconv2d(h0, [hparams.batch_size, s8, s8, hparams.gf_dim*4], name='g_h1') h1 = tf.nn.relu(g_bn1(h1, train=train)) h2 = ops.deconv2d(h1, [hparams.batch_size, s4, s4, hparams.gf_dim*2], name='g_h2') h2 = tf.nn.relu(g_bn2(h2, train=train)) h3 = ops.deconv2d(h2, [hparams.batch_size, s2, s2, hparams.gf_dim*1], name='g_h3') h3 = tf.nn.relu(g_bn3(h3, train=train)) h4 = ops.deconv2d(h3, [hparams.batch_size, s, s, hparams.c_dim], name='g_h4') x_gen = tf.nn.tanh(h4) return x_gen
Example #23
Source File: gan.py From adagan with BSD 3-Clause "New" or "Revised" License | 5 votes |
def discriminator(self, opts, input_, is_training, prefix='DISCRIMINATOR', reuse=False): shape = tf.shape(input_) num = shape[0] with tf.variable_scope(prefix, reuse=reuse): h0 = input_ h0 = tf.add(h0, tf.random_normal(shape, stddev=0.3)) h0 = ops.linear(opts, h0, 1000, scope='h0_linear') # h0 = ops.batch_norm(opts, h0, is_training, reuse, scope='bn_layer1') h0 = tf.nn.relu(h0) h1 = tf.add(h0, tf.random_normal([num, 1000], stddev=0.5)) h1 = ops.linear(opts, h1, 500, scope='h1_linear') # h1 = ops.batch_norm(opts, h1, is_training, reuse, scope='bn_layer2') h1 = tf.nn.relu(h1) h2 = tf.add(h1, tf.random_normal([num, 500], stddev=0.5)) h2 = ops.linear(opts, h2, 250, scope='h2_linear') # h2 = ops.batch_norm(opts, h2, is_training, reuse, scope='bn_layer3') h2 = tf.nn.relu(h2) h3 = tf.add(h2, tf.random_normal([num, 250], stddev=0.5)) h3 = ops.linear(opts, h3, 250, scope='h3_linear') # h3 = ops.batch_norm(opts, h3, is_training, reuse, scope='bn_layer4') h3 = tf.nn.relu(h3) h4 = tf.add(h3, tf.random_normal([num, 250], stddev=0.5)) h4 = ops.linear(opts, h4, 250, scope='h4_linear') # h4 = ops.batch_norm(opts, h4, is_training, reuse, scope='bn_layer5') h4 = tf.nn.relu(h4) h5 = ops.linear(opts, h4, 10, scope='h5_linear') return h5, h3
Example #24
Source File: pot.py From adagan with BSD 3-Clause "New" or "Revised" License | 5 votes |
def generator(self, opts, noise, is_training=False, reuse=False, keep_prob=1.): """ Decoder actually. """ output_shape = self._data.data_shape num_units = opts['g_num_filters'] with tf.variable_scope("GENERATOR", reuse=reuse): # if not opts['convolutions']: if opts['g_arch'] == 'mlp': layer_x = noise for i in range(opts['g_num_layers']): layer_x = ops.linear(opts, layer_x, num_units, 'h%d_lin' % i) layer_x = tf.nn.relu(layer_x) if opts['batch_norm']: layer_x = ops.batch_norm( opts, layer_x, is_training, reuse, scope='bn%d' % i) out = ops.linear(opts, layer_x, np.prod(output_shape), 'h%d_lin' % (i + 1)) out = tf.reshape(out, [-1] + list(output_shape)) if opts['input_normalize_sym']: return tf.nn.tanh(out) else: return tf.nn.sigmoid(out) elif opts['g_arch'] in ['dcgan', 'dcgan_mod']: return self.dcgan_like_arch(opts, noise, is_training, reuse, keep_prob) elif opts['g_arch'] == 'conv_up_res': return self.conv_up_res(opts, noise, is_training, reuse, keep_prob) elif opts['g_arch'] == 'ali': return self.ali_deconv(opts, noise, is_training, reuse, keep_prob) elif opts['g_arch'] == 'began': return self.began_dec(opts, noise, is_training, reuse, keep_prob) else: raise ValueError('%s unknown' % opts['g_arch'])
Example #25
Source File: pot.py From adagan with BSD 3-Clause "New" or "Revised" License | 5 votes |
def encoder(self, opts, input_, is_training=False, reuse=False, keep_prob=1.): if opts['e_add_noise']: def add_noise(x): shape = tf.shape(x) return x + tf.truncated_normal(shape, 0.0, 0.01) def do_nothing(x): return x input_ = tf.cond(is_training, lambda: add_noise(input_), lambda: do_nothing(input_)) num_units = opts['e_num_filters'] num_layers = opts['e_num_layers'] with tf.variable_scope("ENCODER", reuse=reuse): if not opts['convolutions']: hi = input_ for i in range(num_layers): hi = ops.linear(opts, hi, num_units, scope='h%d_lin' % i) if opts['batch_norm']: hi = ops.batch_norm(opts, hi, is_training, reuse, scope='bn%d' % i) hi = tf.nn.relu(hi) if opts['e_is_random']: latent_mean = ops.linear( opts, hi, opts['latent_space_dim'], 'h%d_lin' % (i + 1)) log_latent_sigmas = ops.linear( opts, hi, opts['latent_space_dim'], 'h%d_lin_sigma' % (i + 1)) return latent_mean, log_latent_sigmas else: return ops.linear(opts, hi, opts['latent_space_dim'], 'h%d_lin' % (i + 1)) elif opts['e_arch'] == 'dcgan': return self.dcgan_encoder(opts, input_, is_training, reuse, keep_prob) elif opts['e_arch'] == 'ali': return self.ali_encoder(opts, input_, is_training, reuse, keep_prob) elif opts['e_arch'] == 'began': return self.began_encoder(opts, input_, is_training, reuse, keep_prob) else: raise ValueError('%s Unknown' % opts['e_arch'])
Example #26
Source File: pot.py From adagan with BSD 3-Clause "New" or "Revised" License | 5 votes |
def dcgan_encoder(self, opts, input_, is_training=False, reuse=False, keep_prob=1.): num_units = opts['e_num_filters'] num_layers = opts['e_num_layers'] layer_x = input_ for i in xrange(num_layers): scale = 2**(num_layers-i-1) layer_x = ops.conv2d(opts, layer_x, num_units / scale, scope='h%d_conv' % i) if opts['batch_norm']: layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bn%d' % i) layer_x = tf.nn.relu(layer_x) if opts['dropout']: _keep_prob = tf.minimum( 1., 0.9 - (0.9 - keep_prob) * float(i + 1) / num_layers) layer_x = tf.nn.dropout(layer_x, _keep_prob) if opts['e_3x3_conv'] > 0: before = layer_x for j in range(opts['e_3x3_conv']): layer_x = ops.conv2d(opts, layer_x, num_units / scale, d_h=1, d_w=1, scope='conv2d_3x3_%d_%d' % (i, j), conv_filters_dim=3) layer_x = tf.nn.relu(layer_x) layer_x += before # Residual connection. if opts['e_is_random']: latent_mean = ops.linear( opts, layer_x, opts['latent_space_dim'], scope='hlast_lin') log_latent_sigmas = ops.linear( opts, layer_x, opts['latent_space_dim'], scope='hlast_lin_sigma') return latent_mean, log_latent_sigmas else: return ops.linear(opts, layer_x, opts['latent_space_dim'], scope='hlast_lin')
Example #27
Source File: model_def_new.py From csgm with MIT License | 5 votes |
def discriminator(hparams, x, scope_name, train, reuse): with tf.variable_scope(scope_name) as scope: if reuse: scope.reuse_variables() d_bn1 = ops.batch_norm(name='d_bn1') d_bn2 = ops.batch_norm(name='d_bn2') d_bn3 = ops.batch_norm(name='d_bn3') h0 = ops.lrelu(ops.conv2d(x, hparams.df_dim, name='d_h0_conv')) h1 = ops.conv2d(h0, hparams.df_dim*2, name='d_h1_conv') h1 = ops.lrelu(d_bn1(h1, train=train)) h2 = ops.conv2d(h1, hparams.df_dim*4, name='d_h2_conv') h2 = ops.lrelu(d_bn2(h2, train=train)) h3 = ops.conv2d(h2, hparams.df_dim*8, name='d_h3_conv') h3 = ops.lrelu(d_bn3(h3, train=train)) h4 = ops.linear(tf.reshape(h3, [hparams.batch_size, -1]), 1, 'd_h3_lin') d_logit = h4 d = tf.nn.sigmoid(d_logit) return d, d_logit
Example #28
Source File: generator.py From COCO-GAN with MIT License | 4 votes |
def forward(self, z, coord, is_training): valid_sizes = {4, 8, 16, 32, 64, 128, 256} assert (self.micro_patch_size[0] in valid_sizes and self.micro_patch_size[1] in valid_sizes), \ "I haven't test your micro patch size: {}".format(self.micro_patch_size) update_collection = self._get_update_collection(is_training) print(" [Build] Generator ; is_training: {}".format(is_training)) with tf.variable_scope("G_generator", reuse=tf.AUTO_REUSE): init_sp = 2 init_ngf_mult = 16 cond = tf.concat([z, coord], axis=1) h = snlinear(cond, self.ngf_base*init_ngf_mult*init_sp*init_sp, 'g_z_fc', update_collection=update_collection) h = tf.reshape(h, [-1, init_sp, init_sp, self.ngf_base*init_ngf_mult]) # Stacking residual blocks num_resize_layers = int(math.log(min(self.micro_patch_size), 2) - 1) num_total_layers = num_resize_layers + self.num_extra_layers basic_layers = [8, 4, 2] if num_total_layers>=len(basic_layers): num_replicate_layers = num_total_layers - len(basic_layers) ngf_mult_list = basic_layers + [1, ] * num_replicate_layers else: ngf_mult_list = basic_layers[:num_total_layers] print("\t ngf_mult_list = {}".format(ngf_mult_list)) for idx, ngf_mult in enumerate(ngf_mult_list): n_ch = self.ngf_base * ngf_mult # Standard layers first if idx < num_resize_layers: resize, is_extra = True, False # Extra layers do not resize spatial size else: resize, is_extra = False, True h = self._g_residual_block(h, cond, n_ch, idx=idx, is_training=is_training, resize=resize) print("\t GResBlock: id={}, out_shape={}, resize={}, is_extra={}" .format(idx, h.shape.as_list(), resize, is_extra)) h = batch_norm(name="g_last_bn")(h, is_training=is_training) h = tf.nn.relu(h) h = snconv2d(h, self.c_dim, name='g_last_conv_2', update_collection=update_collection) return tf.nn.tanh(h)
Example #29
Source File: model_mmd_fm.py From opt-mmd with BSD 3-Clause "New" or "Revised" License | 4 votes |
def __init__(self, sess, config, is_crop=True, batch_size=64, output_size=64, z_dim=100, gf_dim=64, df_dim=64, gfc_dim=1024, dfc_dim=1024, c_dim=3, dataset_name='default', checkpoint_dir=None, sample_dir=None, log_dir=None): """ Args: sess: TensorFlow session batch_size: The size of batch. Should be specified before training. output_size: (optional) The resolution in pixels of the images. [64] z_dim: (optional) Dimension of dim for Z. [100] gf_dim: (optional) Dimension of gen filters in first conv layer. [64] df_dim: (optional) Dimension of discrim filters in first conv layer. [64] gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024] dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024] c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3] """ self.sess = sess self.config = config self.is_crop = is_crop self.is_grayscale = (c_dim == 1) self.batch_size = batch_size self.sample_size = batch_size self.output_size = output_size self.sample_dir = sample_dir self.log_dir=log_dir self.checkpoint_dir = checkpoint_dir self.z_dim = z_dim self.gf_dim = gf_dim self.df_dim = df_dim self.gfc_dim = gfc_dim self.dfc_dim = dfc_dim self.c_dim = c_dim # batch normalization : deals with poor initialization helps gradient flow self.d_bn1 = batch_norm(name='d_bn1') self.d_bn2 = batch_norm(name='d_bn2') self.d_bn3 = batch_norm(name='d_bn3') self.g_bn0 = batch_norm(name='g_bn0') self.g_bn1 = batch_norm(name='g_bn1') self.g_bn2 = batch_norm(name='g_bn2') self.g_bn3 = batch_norm(name='g_bn3') self.dataset_name = dataset_name self.build_model()
Example #30
Source File: model_mmd.py From opt-mmd with BSD 3-Clause "New" or "Revised" License | 4 votes |
def __init__(self, sess, config, is_crop=True, batch_size=64, output_size=64, z_dim=100, gf_dim=64, df_dim=64, gfc_dim=1024, dfc_dim=1024, c_dim=3, dataset_name='default', checkpoint_dir=None, sample_dir=None, log_dir=None): """ Args: sess: TensorFlow session batch_size: The size of batch. Should be specified before training. output_size: (optional) The resolution in pixels of the images. [64] z_dim: (optional) Dimension of dim for Z. [100] gf_dim: (optional) Dimension of gen filters in first conv layer. [64] df_dim: (optional) Dimension of discrim filters in first conv layer. [64] gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024] dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024] c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3] """ self.sess = sess self.config = config self.is_crop = is_crop self.is_grayscale = (c_dim == 1) self.batch_size = batch_size self.sample_size = batch_size self.output_size = output_size self.sample_dir = sample_dir self.log_dir=log_dir self.checkpoint_dir = checkpoint_dir self.z_dim = z_dim self.gf_dim = gf_dim self.df_dim = df_dim self.gfc_dim = gfc_dim self.dfc_dim = dfc_dim self.c_dim = c_dim # batch normalization : deals with poor initialization helps gradient flow self.d_bn1 = batch_norm(name='d_bn1') self.d_bn2 = batch_norm(name='d_bn2') self.d_bn3 = batch_norm(name='d_bn3') self.g_bn0 = batch_norm(name='g_bn0') self.g_bn1 = batch_norm(name='g_bn1') self.g_bn2 = batch_norm(name='g_bn2') self.g_bn3 = batch_norm(name='g_bn3') self.dataset_name = dataset_name self.build_model()