Python tensorflow.AUTO_REUSE Examples
The following are 30
code examples of tensorflow.AUTO_REUSE().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: running_mean_std.py From HardRLWithYoutube with MIT License | 6 votes |
def __init__(self, epsilon=1e-4, shape=(), scope=''): sess = get_session() self._new_mean = tf.placeholder(shape=shape, dtype=tf.float64) self._new_var = tf.placeholder(shape=shape, dtype=tf.float64) self._new_count = tf.placeholder(shape=(), dtype=tf.float64) with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): self._mean = tf.get_variable('mean', initializer=np.zeros(shape, 'float64'), dtype=tf.float64) self._var = tf.get_variable('std', initializer=np.ones(shape, 'float64'), dtype=tf.float64) self._count = tf.get_variable('count', initializer=np.full((), epsilon, 'float64'), dtype=tf.float64) self.update_ops = tf.group([ self._var.assign(self._new_var), self._mean.assign(self._new_mean), self._count.assign(self._new_count) ]) sess.run(tf.variables_initializer([self._mean, self._var, self._count])) self.sess = sess self._set_mean_var_count()
Example #2
Source File: ops.py From DeblurGAN-tf with MIT License | 6 votes |
def conv2d(input_, output_dim, kernel_h=3, kernel_w=None, stride_h=1, stride_w=None, padding='SAME', reuse=False, initializer=None, use_bias = True, name="conv2d"): if kernel_w == None: kernel_w = kernel_h if stride_w == None: stride_w = stride_h if initializer == None: initializer = tf.contrib.layers.xavier_initializer() with tf.variable_scope(name, reuse = tf.AUTO_REUSE): if reuse==True: scope.reuse_variables() w = tf.get_variable('w', [kernel_h, kernel_w, input_.get_shape()[-1], output_dim], initializer=initializer) conv = tf.nn.conv2d(input_, w, strides=[1,stride_h, stride_w, 1], padding=padding) if use_bias: b = tf.get_variable('bias', [output_dim], initializer=tf.constant_initializer(0.0)) conv = tf.nn.bias_add(conv, b) return conv
Example #3
Source File: running_mean_std.py From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 | 6 votes |
def __init__(self, epsilon=1e-4, shape=(), scope=''): sess = get_session() self._new_mean = tf.placeholder(shape=shape, dtype=tf.float64) self._new_var = tf.placeholder(shape=shape, dtype=tf.float64) self._new_count = tf.placeholder(shape=(), dtype=tf.float64) with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): self._mean = tf.get_variable('mean', initializer=np.zeros(shape, 'float64'), dtype=tf.float64) self._var = tf.get_variable('std', initializer=np.ones(shape, 'float64'), dtype=tf.float64) self._count = tf.get_variable('count', initializer=np.full((), epsilon, 'float64'), dtype=tf.float64) self.update_ops = tf.group([ self._var.assign(self._new_var), self._mean.assign(self._new_mean), self._count.assign(self._new_count) ]) sess.run(tf.variables_initializer([self._mean, self._var, self._count])) self.sess = sess self._set_mean_var_count()
Example #4
Source File: ops.py From DeblurGAN-tf with MIT License | 6 votes |
def res_block(input_, output_dim, name='res_block', is_dropout=False, drop_p=0.5): shortcut = input_ num_input_c = shortcut.shape.as_list()[-1] with tf.variable_scope(name, reuse=tf.AUTO_REUSE): conv = conv2d(input_, output_dim, name=name+'/conv1') conv = norm_layer(conv, 'instance') conv = tf.nn.relu(conv) if is_dropout: conv = tf.nn.dropout(conv, keep_prob = drop_p) #conv = conv2d(conv, output_dim, num_input_c * 2, name=name+'/conv2') conv = conv2d(conv, output_dim, name=name+'/conv2') conv = norm_layer(conv, 'instance') conv = tf.identity(conv+shortcut, name='residual_block_output') return conv
Example #5
Source File: model.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def fprop(self, x, **kwargs): del kwargs my_conv = functools.partial(tf.layers.conv2d, kernel_size=3, strides=2, padding='valid', activation=tf.nn.relu, kernel_initializer=HeReLuNormalInitializer) my_dense = functools.partial( tf.layers.dense, kernel_initializer=HeReLuNormalInitializer) with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE): for depth in [96, 256, 384, 384, 256]: x = my_conv(x, depth) y = tf.layers.flatten(x) y = my_dense(y, 4096, tf.nn.relu) y = fc7 = my_dense(y, 4096, tf.nn.relu) y = my_dense(y, 1000) return {'fc7': fc7, self.O_LOGITS: y, self.O_PROBS: tf.nn.softmax(logits=y)}
Example #6
Source File: ops.py From DeblurGAN-tf with MIT License | 6 votes |
def fc_layer(input_, output_dim, initializer = None, activation='linear', name=None): if initializer == None: initializer = tf.contrib.layers.xavier_initializer() shape = input_.get_shape().as_list() with tf.variable_scope(name or "Linear", reuse=tf.AUTO_REUSE) as scope: if len(shape) > 2 : input_ = tf.layers.flatten(input_) shape = input_.get_shape().as_list() w = tf.get_variable("fc_w", [shape[1], output_dim], dtype=tf.float32, initializer = initializer) b = tf.get_variable("fc_b", [output_dim], initializer = tf.constant_initializer(0.0)) result = tf.matmul(input_, w) + b if activation == 'linear': return result elif activation == 'relu': return tf.nn.relu(result) elif activation == 'sigmoid': return tf.nn.sigmoid(result) elif activation == 'tanh': return tf.nn.tanh(result)
Example #7
Source File: modules.py From transformer with Apache License 2.0 | 6 votes |
def ln(inputs, epsilon = 1e-8, scope="ln"): '''Applies layer normalization. See https://arxiv.org/abs/1607.06450. inputs: A tensor with 2 or more dimensions, where the first dimension has `batch_size`. epsilon: A floating number. A very small number for preventing ZeroDivision Error. scope: Optional scope for `variable_scope`. Returns: A tensor with the same shape and data dtype as `inputs`. ''' with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): inputs_shape = inputs.get_shape() params_shape = inputs_shape[-1:] mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True) beta= tf.get_variable("beta", params_shape, initializer=tf.zeros_initializer()) gamma = tf.get_variable("gamma", params_shape, initializer=tf.ones_initializer()) normalized = (inputs - mean) / ( (variance + epsilon) ** (.5) ) outputs = gamma * normalized + beta return outputs
Example #8
Source File: face_attack.py From Adversarial-Face-Attack with GNU General Public License v3.0 | 6 votes |
def build_pgd_attack(self, eps): victim_embeddings = tf.constant(self.victim_embeddings, dtype=tf.float32) def one_step_attack(image, grad): """ core components of this attack are: (a) PGD adversarial attack (https://arxiv.org/pdf/1706.06083.pdf) (b) momentum (https://arxiv.org/pdf/1710.06081.pdf) (c) input diversity (https://arxiv.org/pdf/1803.06978.pdf) """ orig_image = image image = self.structure(image) image = (image - 127.5) / 128.0 image = image + tf.random_uniform(tf.shape(image), minval=-1e-2, maxval=1e-2) prelogits, _ = self.network.inference(image, 1.0, False, bottleneck_layer_size=512) embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings') embeddings = tf.reshape(embeddings[0], [512, 1]) objective = tf.reduce_mean(tf.matmul(victim_embeddings, embeddings)) # to be maximized noise, = tf.gradients(objective, orig_image) noise = noise / tf.reduce_mean(tf.abs(noise), [1, 2, 3], keep_dims=True) noise = 0.9 * grad + noise adv = tf.clip_by_value(orig_image + tf.sign(noise) * 1.0, lower_bound, upper_bound) return adv, noise input = tf.to_float(self.image_batch) lower_bound = tf.clip_by_value(input - eps, 0, 255.) upper_bound = tf.clip_by_value(input + eps, 0, 255.) with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): adv, _ = tf.while_loop( lambda _, __: True, one_step_attack, (input, tf.zeros_like(input)), back_prop=False, maximum_iterations=100, parallel_iterations=1) self.adv_image = adv return adv
Example #9
Source File: models.py From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 | 6 votes |
def __call__(self, obs, reuse=False): with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE): x = self.network_builder(obs) # x = tf.layers.dense(x, self.nb_actions, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) # x = tf.nn.tanh(x) x = tf.layers.dense(x, self.hidden_layer1, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) x = tf.nn.tanh(x) x = tf.layers.dense(x, self.hidden_layer2, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) x = tf.nn.tanh(x) # x = tf.layers.dense(x, self.hidden_layer3, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) # x = tf.nn.tanh(x) # x = tf.layers.dense(x, self.hidden_layer4, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) # x = tf.nn.tanh(x) x = tf.layers.dense(x, self.nb_actions, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) # x = self.step_activation(tf.nn.sigmoid(x)) # sigmoid ~ (0,1), tanh ~ (-1 , 1) '''1000 makes the binarization of output of sigmoid has smaller difference that cannot be backpropagated with tensorflow''' x = tf.nn.sigmoid(1000*x) # sigmoid ~ (0,1), tanh ~ (-1 , 1) return x
Example #10
Source File: models.py From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 | 6 votes |
def __call__(self, obs, action, reuse=False): with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE): x = tf.concat([obs, action], axis=-1) # this assumes observation and action can be concatenated x = self.network_builder(x) # x = tf.layers.dense(x, 1, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) x = tf.layers.dense(x, self.hidden_layer1, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) x = tf.nn.tanh(x) x = tf.layers.dense(x, self.hidden_layer2, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) x = tf.nn.tanh(x) x = tf.layers.dense(x, self.hidden_layer3, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) x = tf.nn.tanh(x) x = tf.layers.dense(x, self.hidden_layer4, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) x = tf.nn.tanh(x) x = tf.layers.dense(x, 1, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) return x
Example #11
Source File: models.py From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 | 6 votes |
def __call__(self, obs, reuse=False): with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE): x = self.network_builder(obs) # x = tf.layers.dense(x, self.nb_actions, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) # x = tf.nn.tanh(x) x = tf.layers.dense(x, self.hidden_layer1, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) x = tf.nn.tanh(x) x = tf.layers.dense(x, self.hidden_layer2, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) x = tf.nn.tanh(x) x = tf.layers.dense(x, self.hidden_layer3, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) x = tf.nn.tanh(x) x = tf.layers.dense(x, self.hidden_layer4, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) x = tf.nn.tanh(x) x = tf.layers.dense(x, self.nb_actions, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) ''' in order to discrete the action, apply the trick of sharp sigmoid. with the following line: x<0 will be near 0, x>0 will be near 1 then the discrete step in action choice wont affect too much of accuracy (with only small change of action value!) the key point of this step is to keep most part of discrete operation in the tensor graph, so as to be back propagated ''' x = tf.nn.sigmoid(1000*x) # sigmoid ~ (0,1), tanh ~ (-1 , 1) return x
Example #12
Source File: models.py From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 | 6 votes |
def __call__(self, obs, reuse=False): with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE): x = self.network_builder(obs) # x = tf.layers.dense(x, self.nb_actions, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) # x = tf.nn.tanh(x) x = tf.layers.dense(x, self.hidden_layer1, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) x = tf.nn.tanh(x) x = tf.layers.dense(x, self.hidden_layer2, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) x = tf.nn.tanh(x) x = tf.layers.dense(x, self.hidden_layer3, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) x = tf.nn.tanh(x) x = tf.layers.dense(x, self.hidden_layer4, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) x = tf.nn.tanh(x) x = tf.layers.dense(x, self.nb_actions, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) ''' in order to discrete the action, apply the trick of sharp sigmoid. with the following line: x<0 will be near 0, x>0 will be near 1 then the discrete step in action choice wont affect too much of accuracy (with only small change of action value!) the key point of this step is to keep most part of discrete operation in the tensor graph, so as to be back propagated ''' x = tf.nn.sigmoid(1000*x) # sigmoid ~ (0,1), tanh ~ (-1 , 1) return x
Example #13
Source File: running_mean_std.py From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 | 6 votes |
def __init__(self, epsilon=1e-4, shape=(), scope=''): sess = get_session() self._new_mean = tf.placeholder(shape=shape, dtype=tf.float64) self._new_var = tf.placeholder(shape=shape, dtype=tf.float64) self._new_count = tf.placeholder(shape=(), dtype=tf.float64) with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): self._mean = tf.get_variable('mean', initializer=np.zeros(shape, 'float64'), dtype=tf.float64) self._var = tf.get_variable('std', initializer=np.ones(shape, 'float64'), dtype=tf.float64) self._count = tf.get_variable('count', initializer=np.full((), epsilon, 'float64'), dtype=tf.float64) self.update_ops = tf.group([ self._var.assign(self._new_var), self._mean.assign(self._new_mean), self._count.assign(self._new_count) ]) sess.run(tf.variables_initializer([self._mean, self._var, self._count])) self.sess = sess self._set_mean_var_count()
Example #14
Source File: faster_rcnn_meta_arch.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name. Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ with tf.variable_scope( scope, values=[proposal_feature_maps], reuse=tf.AUTO_REUSE): return self._extract_box_classifier_features(proposal_feature_maps, scope)
Example #15
Source File: next_frame.py From fine-lm with MIT License | 6 votes |
def reward_prediction( self, input_image, input_reward, action, lstm_state, latent): """Builds a reward prediction network.""" conv_size = self.tinyify([32, 32, 16, 4]) lstm_size = self.tinyify([32, 64, 128, 64, 32]) with tf.variable_scope("reward_pred", reuse=tf.AUTO_REUSE): hidden5, _ = self.bottom_part_tower( input_image, input_reward, action, latent, lstm_state, lstm_size, conv_size) x = hidden5 x = slim.batch_norm(x, scope="reward_bn0") x = slim.conv2d(x, conv_size[1], [3, 3], scope="reward_conv1") x = slim.batch_norm(x, scope="reward_bn1") x = slim.conv2d(x, conv_size[2], [3, 3], scope="reward_conv2") x = slim.batch_norm(x, scope="reward_bn2") x = slim.conv2d(x, conv_size[3], [3, 3], scope="reward_conv3") pred_reward = self.decode_to_shape( x, input_reward.shape, "reward_dec") return pred_reward, lstm_state
Example #16
Source File: nn_parser.py From deepnlp with MIT License | 6 votes |
def _init_model(self, session): """Create Parser model and initialize with random or load parameters in session.""" config_dict = load_config(self.model_config_path) config = get_config(config_dict, self.name) model_var_scope = get_model_var_scope(self.var_scope, self.name) print ("NOTICE: Initializing model var scope '%s'" % model_var_scope) # Check if self.model already exist if self.model is None: # Create Graph Only once with tf.variable_scope(model_var_scope, reuse = tf.AUTO_REUSE): self.model = parse_model.NNParser(config=config) if len(glob.glob(self.ckpt_path + '.data*')) > 0: # file exist with pattern: 'parser.ckpt.data*' print("NOTICE: Loading model parameters from %s" % self.ckpt_path) all_vars = tf.global_variables() model_vars = [k for k in all_vars if model_var_scope in k.name.split("/")] # Only Restore the Variable in Graph begin with parser/.... tf.train.Saver(model_vars).restore(session, self.ckpt_path) else: print("NOTICE: Model not found, Try to run method: deepnlp.download('parse')") print("NOTICE: Created with fresh parameters.") session.run(tf.global_variables_initializer())
Example #17
Source File: pos_tagger.py From deepnlp with MIT License | 6 votes |
def _init_pos_model(self, session): """Create POS Tagger model and initialize with random or load parameters in session.""" # initilize config config_dict = load_config(self.model_config_path) config = get_config(config_dict, self.name) config.batch_size = 1 config.num_steps = 1 # iterator one token per time model_var_scope = get_model_var_scope(self.var_scope, self.name) print ("NOTICE: Input POS Model Var Scope Name '%s'" % model_var_scope) # Check if self.model already exist if self.model is None: with tf.variable_scope(model_var_scope, tf.AUTO_REUSE): self.model = pos_model.POSTagger(is_training=False, config=config) # save object after is_training # Load Specific .data* ckpt file if len(glob.glob(self.ckpt_path + '.data*')) > 0: # file exist with pattern: 'pos.ckpt.data*' print("NOTICE: Loading model parameters from %s" % self.ckpt_path) all_vars = tf.global_variables() model_vars = [k for k in all_vars if model_var_scope in k.name.split("/")] tf.train.Saver(model_vars).restore(session, self.ckpt_path) else: print("NOTICE: Model not found, Try to run method: deepnlp.download(module='pos', name='%s')" % self.name) print("NOTICE: Created with fresh parameters.") session.run(tf.global_variables_initializer())
Example #18
Source File: modalities.py From fine-lm with MIT License | 6 votes |
def targets_bottom(self, x, summary_prefix="targets_bottom"): # pylint: disable=arguments-differ inputs = x with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE): common_layers.summarize_video(inputs, summary_prefix) inputs_shape = common_layers.shape_list(inputs) # We embed each of 256=self.top_dimensionality possible pixel values. embedding_var = tf.get_variable( "pixel_embedding", [self.top_dimensionality, self.PIXEL_EMBEDDING_SIZE]) hot_inputs = tf.one_hot(tf.to_int32(inputs), self.top_dimensionality) hot_inputs = tf.reshape(hot_inputs, [-1, self.top_dimensionality]) embedded = tf.matmul(hot_inputs, embedding_var) # Let's now merge all channels that were embedded into a single vector. merged_size = self.PIXEL_EMBEDDING_SIZE * inputs_shape[4] embedded = tf.reshape(embedded, inputs_shape[:4] + [merged_size]) transposed = common_layers.time_to_channels(embedded) return tf.layers.dense( transposed, self._body_input_depth, name="merge_pixel_embedded_frames")
Example #19
Source File: discretization.py From fine-lm with MIT License | 6 votes |
def get_vq_bottleneck(bottleneck_size, hidden_size): """Get lookup table for VQ bottleneck.""" with tf.variable_scope("vq", reuse=tf.AUTO_REUSE): means = tf.get_variable( name="means", shape=[bottleneck_size, hidden_size], initializer=tf.uniform_unit_scaling_initializer()) ema_count = tf.get_variable( name="ema_count", shape=[bottleneck_size], initializer=tf.constant_initializer(0), trainable=False) with tf.colocate_with(means): ema_means = tf.get_variable( name="ema_means", initializer=means.initialized_value(), trainable=False) return means, ema_means, ema_count
Example #20
Source File: models.py From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 | 6 votes |
def __call__(self, obs, action, reuse=False): with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE): x = tf.concat([obs, action], axis=-1) # this assumes observation and action can be concatenated x = self.network_builder(x) # x = tf.layers.dense(x, 1, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) x = tf.layers.dense(x, self.hidden_layer1, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) x = tf.nn.tanh(x) x = tf.layers.dense(x, self.hidden_layer2, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) x = tf.nn.tanh(x) x = tf.layers.dense(x, self.hidden_layer3, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) x = tf.nn.tanh(x) x = tf.layers.dense(x, self.hidden_layer4, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) x = tf.nn.tanh(x) x = tf.layers.dense(x, 1, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)) return x
Example #21
Source File: test.py From Chinese-Character-and-Calligraphic-Image-Processing with MIT License | 6 votes |
def generator(self, inputs_condition): inputs = inputs_condition with tf.variable_scope("generator", reuse=tf.AUTO_REUSE): inputs1 = leaky_relu(conv2d("conv1", inputs, 64, 5, 2))#128x128x128 inputs2 = leaky_relu(instanceNorm("in1", conv2d("conv2", inputs1, 128, 5, 2)))#64x64x256 inputs3 = leaky_relu(instanceNorm("in2", conv2d("conv3", inputs2, 256, 5, 2)))#32x32x512 inputs4 = leaky_relu(instanceNorm("in3", conv2d("conv4", inputs3, 512, 5, 2)))#16x16x512 inputs5 = leaky_relu(instanceNorm("in4", conv2d("conv5", inputs4, 512, 5, 2)))#8x8x512 inputs6 = leaky_relu(instanceNorm("in5", conv2d("conv6", inputs5, 512, 5, 2)))#4x4x512 inputs7 = leaky_relu(instanceNorm("in6", conv2d("conv7", inputs6, 512, 5, 2)))#2x2x512 inputs8 = leaky_relu(instanceNorm("in7", conv2d("conv8", inputs7, 512, 5, 2)))#1x1x512 outputs1 = tf.nn.relu(tf.concat([tf.nn.dropout(instanceNorm("in9", deconv2d("dconv1", inputs8, 512, 5, 2)), 0.5), inputs7], axis=3)) # 2x2x512 outputs2 = tf.nn.relu(tf.concat([tf.nn.dropout(instanceNorm("in10", deconv2d("dconv2", outputs1, 512, 5, 2)), 0.5), inputs6], axis=3)) # 4x4x512 outputs3 = tf.nn.relu(tf.concat([tf.nn.dropout(instanceNorm("in11", deconv2d("dconv3", outputs2, 512, 5, 2)), 0.5), inputs5], axis=3))#8x8x512 outputs4 = tf.nn.relu(tf.concat([instanceNorm("in12", deconv2d("dconv4", outputs3, 512, 5, 2)), inputs4], axis=3))#16x16x512 outputs5 = tf.nn.relu(tf.concat([instanceNorm("in13", deconv2d("dconv5", outputs4, 256, 5, 2)), inputs3], axis=3))#32x32x256 outputs6 = tf.nn.relu(tf.concat([instanceNorm("in14", deconv2d("dconv6", outputs5, 128, 5, 2)), inputs2], axis=3))#64x64x128 outputs7 = tf.nn.relu(tf.concat([instanceNorm("in15", deconv2d("dconv7", outputs6, 64, 5, 2)), inputs1], axis=3))#128x128x64 outputs8 = tf.nn.tanh((deconv2d("dconv8", outputs7, 3, 5, 2)))#256x256x3 return outputs8
Example #22
Source File: train_policy.py From cs294-112_hws with MIT License | 6 votes |
def build_critic(x, h, output_size, scope, n_layers, size, gru_size, recurrent=True, activation=tf.tanh, output_activation=None, regularizer=None): """ build recurrent critic arguments: regularizer: regularization for weights (see `build_policy()` for rest) n.b. the policy and critic should not share weights """ with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): if recurrent: x, h = build_rnn(x, h, gru_size, scope, n_layers, size, activation=activation, output_activation=output_activation, regularizer=regularizer) else: x = tf.reshape(x, (-1, x.get_shape()[1]*x.get_shape()[2])) x = build_mlp(x, gru_size, scope, n_layers + 1, size, activation=activation, output_activation=activation, regularizer=regularizer) x = tf.layers.dense(x, output_size, activation=output_activation, name='decoder', kernel_regularizer=regularizer, bias_regularizer=regularizer) return x
Example #23
Source File: tensorFactorisation.py From decompose with MIT License | 5 votes |
def __init__(self, U: List[Tensor], priorU: List[Distribution], likelihood: Likelihood, dtype: tf.DType, stopCriterion, phase: Phase, noiseUniformity: NoiseUniformity, transform: bool = False) -> None: # setup the model self.dtype = dtype self.__transform = transform self.__noiseUniformity = noiseUniformity self.likelihood = likelihood self.stopCriterion = stopCriterion self.postU = [] # type: List[PostU] for f, priorUf in enumerate(priorU): postUf = PostU(likelihood, priorUf, f) self.postU.append(postUf) # create or reuse the variables for the filter banks for f, Uf in enumerate(copy(U)): if transform and (f == 0): paramName = "{}tr".format(f) else: paramName = "{}".format(f) with tf.variable_scope("U", reuse=tf.AUTO_REUSE): UfVar = tf.get_variable(paramName, dtype=dtype, initializer=Uf) U[f] = UfVar self.__U = tuple(U) if phase == Phase.EM or phase == Phase.INIT: self.__setEm() elif phase == Phase.BCD: self.__setBcd() else: raise ValueError
Example #24
Source File: model.py From transformer with Apache License 2.0 | 5 votes |
def encode(self, xs, training=True): ''' Returns memory: encoder outputs. (N, T1, d_model) ''' with tf.variable_scope("encoder", reuse=tf.AUTO_REUSE): x, seqlens, sents1 = xs # src_masks src_masks = tf.math.equal(x, 0) # (N, T1) # embedding enc = tf.nn.embedding_lookup(self.embeddings, x) # (N, T1, d_model) enc *= self.hp.d_model**0.5 # scale enc += positional_encoding(enc, self.hp.maxlen1) enc = tf.layers.dropout(enc, self.hp.dropout_rate, training=training) ## Blocks for i in range(self.hp.num_blocks): with tf.variable_scope("num_blocks_{}".format(i), reuse=tf.AUTO_REUSE): # self-attention enc = multihead_attention(queries=enc, keys=enc, values=enc, key_masks=src_masks, num_heads=self.hp.num_heads, dropout_rate=self.hp.dropout_rate, training=training, causality=False) # feed forward enc = ff(enc, num_units=[self.hp.d_ff, self.hp.d_model]) memory = enc return memory, sents1, src_masks
Example #25
Source File: ner_tagger.py From deepnlp with MIT License | 5 votes |
def _init_ner_model(self, session): """Create ner Tagger model and initialize or load parameters in session.""" # initilize config config_dict = load_config(self.model_config_path) config = get_config(config_dict, self.name) if config is None: print ("WARNING: Input model name %s has no configuration..." % self.name) config.batch_size = 1 config.num_steps = 1 # iterator one token per time model_var_scope = get_model_var_scope(self.var_scope, self.name) print ("NOTICE: Input NER Model Var Scope Name '%s'" % model_var_scope) # Check if self.model already exist if self.model is None: with tf.variable_scope(model_var_scope, reuse = tf.AUTO_REUSE): self.model = ner_model.NERTagger(is_training=True, config=config) # save object after is_training #else: # Model Graph Def already exist # print ("DEBUG: Model Def already exists") # update model parameters if len(glob.glob(self.ckpt_path + '.data*')) > 0: # file exist with pattern: 'ner.ckpt.data*' print("NOTICE: Loading model parameters from %s" % self.ckpt_path) all_vars = tf.global_variables() model_vars = [k for k in all_vars if model_var_scope in k.name.split("/")] # e.g. ner_var_scope_zh tf.train.Saver(model_vars).restore(session, self.ckpt_path) else: print("NOTICE: Model not found, Try to run method: deepnlp.download(module='ner', name='%s')" % self.name) print("NOTICE: Created with fresh parameters.") session.run(tf.global_variables_initializer())
Example #26
Source File: pix2pix.py From Chinese-Character-and-Calligraphic-Image-Processing with MIT License | 5 votes |
def discriminator(self, inputs, inputs_condition): inputs = tf.concat([inputs, inputs_condition], axis=3) inputs = tf.random_crop(inputs, [1, 70, 70, 2]) with tf.variable_scope("discriminator", reuse=tf.AUTO_REUSE): with tf.variable_scope("conv1"): inputs = leaky_relu(conv2d("conv1", inputs, 64, 5, 2)) with tf.variable_scope("conv2"): inputs = leaky_relu(instanceNorm("in1", conv2d("conv2", inputs, 128, 5, 2))) with tf.variable_scope("conv3"): inputs = leaky_relu(instanceNorm("in2", conv2d("conv3", inputs, 256, 5, 2))) with tf.variable_scope("conv4"): inputs = leaky_relu(instanceNorm("in3", conv2d("conv4", inputs, 512, 5, 2))) with tf.variable_scope("outputs"): inputs = conv2d("conv5", inputs, 1, 5, 1) return inputs
Example #27
Source File: test.py From Chinese-Character-and-Calligraphic-Image-Processing with MIT License | 5 votes |
def discriminator(self, inputs, inputs_condition): inputs = tf.concat([inputs, inputs_condition], axis=3) inputs = tf.random_crop(inputs, [1, 70, 70, 2]) with tf.variable_scope("discriminator", reuse=tf.AUTO_REUSE): with tf.variable_scope("conv1"): inputs = leaky_relu(conv2d("conv1", inputs, 64, 5, 2)) with tf.variable_scope("conv2"): inputs = leaky_relu(instanceNorm("in1", conv2d("conv2", inputs, 128, 5, 2))) with tf.variable_scope("conv3"): inputs = leaky_relu(instanceNorm("in2", conv2d("conv3", inputs, 256, 5, 2))) with tf.variable_scope("conv4"): inputs = leaky_relu(instanceNorm("in3", conv2d("conv4", inputs, 512, 5, 2))) with tf.variable_scope("outputs"): inputs = conv2d("conv5", inputs, 1, 5, 1) return inputs
Example #28
Source File: models.py From nlp-tensorflow with MIT License | 5 votes |
def _build_net(self): with tf.variable_scope("placeholder"): self.input_x = tf.placeholder(tf.float32, shape=(None, self.vocab_size)) self.input_y = tf.placeholder(tf.int32, shape=(None,)) with tf.variable_scope("output", reuse=tf.AUTO_REUSE): W1 = tf.get_variable("W1", dtype=tf.float32, initializer=tf.truncated_normal((self.vocab_size, self.hidden_size))) b1 = tf.get_variable("b1", dtype=tf.float32, initializer=tf.constant(0.1, shape=(self.hidden_size,))) W2 = tf.get_variable("W2", dtype=tf.float32, initializer=tf.truncated_normal((self.hidden_size, self.n_class))) b2 = tf.get_variable("b2", dtype=tf.float32, initializer=tf.constant(0.1, shape=(self.n_class,))) h = tf.nn.relu(tf.nn.xw_plus_b(self.input_x, W1, b1)) logits = tf.nn.xw_plus_b(h, W2, b2) self.prob = tf.reduce_max(tf.nn.softmax(logits), axis=1) self.prediction = tf.cast(tf.argmax(logits, axis=1), tf.int32) with tf.variable_scope("loss"): self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=self.input_y)) with tf.variable_scope("train", reuse=tf.AUTO_REUSE): optimizer = tf.train.AdamOptimizer(self.lr) self.train_op = optimizer.minimize(self.loss) with tf.variable_scope("accuracy"): correct = tf.equal(self.prediction, self.input_y) self.accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) self.sess.run(tf.global_variables_initializer())
Example #29
Source File: modules.py From transformer with Apache License 2.0 | 5 votes |
def positional_encoding(inputs, maxlen, masking=True, scope="positional_encoding"): '''Sinusoidal Positional_Encoding. See 3.5 inputs: 3d tensor. (N, T, E) maxlen: scalar. Must be >= T masking: Boolean. If True, padding positions are set to zeros. scope: Optional scope for `variable_scope`. returns 3d tensor that has the same shape as inputs. ''' E = inputs.get_shape().as_list()[-1] # static N, T = tf.shape(inputs)[0], tf.shape(inputs)[1] # dynamic with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): # position indices position_ind = tf.tile(tf.expand_dims(tf.range(T), 0), [N, 1]) # (N, T) # First part of the PE function: sin and cos argument position_enc = np.array([ [pos / np.power(10000, (i-i%2)/E) for i in range(E)] for pos in range(maxlen)]) # Second part, apply the cosine to even columns and sin to odds. position_enc[:, 0::2] = np.sin(position_enc[:, 0::2]) # dim 2i position_enc[:, 1::2] = np.cos(position_enc[:, 1::2]) # dim 2i+1 position_enc = tf.convert_to_tensor(position_enc, tf.float32) # (maxlen, E) # lookup outputs = tf.nn.embedding_lookup(position_enc, position_ind) # masks if masking: outputs = tf.where(tf.equal(inputs, 0), inputs, outputs) return tf.to_float(outputs)
Example #30
Source File: vgg_model.py From DeblurGAN-tf with MIT License | 5 votes |
def __init__(self, name, include_top=False, weights='imagenet'): with tf.variable_scope(name, reuse=tf.AUTO_REUSE) as scope: if name.upper() == 'VGG19': self.vgg = tf.keras.applications.VGG19(include_top=include_top, weights=weights) elif name.upper() == 'VGG16': self.vgg = tf.keras.applications.VGG16(include_top=include_top, weights=weights) else: raise TypeError('Not supported model: VGG{}'.format(name)) self.model = tf.keras.Model(inputs=self.vgg.input, outputs = self.vgg.get_layer('block3_conv3').output) self.model.trainable=False print(" [*] ", name, " model was created")