Python tensorflow.uniform_unit_scaling_initializer() Examples
The following are 30
code examples of tensorflow.uniform_unit_scaling_initializer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: train_probes.py From AIX360 with Apache License 2.0 | 6 votes |
def fully_connected(x, out_dim): """FullyConnected layer Parameters: x (Tensor): Input tensor to the fully connected layer out_dim (int): Output dimension of the fully connected layer. Return: The Tensor corresponding to the fully connected layer output. """ w = tf.get_variable( 'DW', [x.get_shape()[1], out_dim], initializer=tf.uniform_unit_scaling_initializer(factor=1.0)) b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer()) return tf.nn.xw_plus_b(x, w, b)
Example #2
Source File: discretization.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def get_vq_codebook(codebook_size, hidden_size): """Get lookup table for VQ bottleneck.""" with tf.variable_scope("vq", reuse=tf.AUTO_REUSE): means = tf.get_variable( name="means", shape=[codebook_size, hidden_size], initializer=tf.uniform_unit_scaling_initializer()) ema_count = tf.get_variable( name="ema_count", shape=[codebook_size], initializer=tf.constant_initializer(0), trainable=False) with tf.colocate_with(means): ema_means = tf.get_variable( name="ema_means", initializer=means.initialized_value(), trainable=False) return means, ema_means, ema_count
Example #3
Source File: transformer_nat.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def init_vq_bottleneck(bottleneck_size, hidden_size): """Get lookup table for VQ bottleneck.""" means = tf.get_variable( name="means", shape=[bottleneck_size, hidden_size], initializer=tf.uniform_unit_scaling_initializer()) ema_count = tf.get_variable( name="ema_count", shape=[bottleneck_size], initializer=tf.constant_initializer(0), trainable=False) with tf.colocate_with(means): ema_means = tf.get_variable( name="ema_means", initializer=means.initialized_value(), trainable=False) return means, ema_means, ema_count
Example #4
Source File: models.py From Question_Answering_Models with MIT License | 6 votes |
def add_embeddings(self): """ embedding层 """ with tf.variable_scope('embedding'): if self.config.embeddings is not None: embeddings = tf.Variable(self.config.embeddings, name="embeddings", trainable=False) else: embeddings = tf.get_variable('embeddings', shape=[self.config.vocab_size, self.config.embedding_size], initializer=tf.uniform_unit_scaling_initializer()) q_embed = tf.nn.embedding_lookup(embeddings, self.q) aplus_embed = tf.nn.embedding_lookup(embeddings, self.aplus) aminus_embed = tf.nn.embedding_lookup(embeddings, self.aminus) q_embed = tf.nn.dropout(q_embed, keep_prob=self.keep_prob) aplus_embed = tf.nn.dropout(aplus_embed, keep_prob=self.keep_prob) aminus_embed = tf.nn.dropout(aminus_embed, keep_prob=self.keep_prob) return q_embed, aplus_embed, aminus_embed
Example #5
Source File: model.py From CNN-in-Answer-selection with MIT License | 6 votes |
def _HL_layer(self, bottom, n_weight, name): """ 全连接层 """ assert len(bottom.get_shape()) == 3 n_prev_weight = bottom.get_shape()[-1] max_len = bottom.get_shape()[1] initer = tf.truncated_normal_initializer(stddev=0.01) W = tf.get_variable(name + 'W', dtype=tf.float32, shape=[n_prev_weight, n_weight], initializer=tf.uniform_unit_scaling_initializer()) b = tf.get_variable(name + 'b', dtype=tf.float32, initializer=tf.constant(0.1, shape=[n_weight], dtype=tf.float32)) bottom_2 = tf.reshape(bottom, [-1, n_prev_weight]) hl = tf.nn.bias_add(tf.matmul(bottom_2, W), b) hl_tanh = tf.nn.tanh(hl) HL = tf.reshape(hl_tanh, [-1, max_len, n_weight]) return HL
Example #6
Source File: discretization.py From BERT with Apache License 2.0 | 6 votes |
def get_vq_codebook(codebook_size, hidden_size): """Get lookup table for VQ bottleneck.""" with tf.variable_scope("vq", reuse=tf.AUTO_REUSE): means = tf.get_variable( name="means", shape=[codebook_size, hidden_size], initializer=tf.uniform_unit_scaling_initializer()) ema_count = tf.get_variable( name="ema_count", shape=[codebook_size], initializer=tf.constant_initializer(0), trainable=False) with tf.colocate_with(means): ema_means = tf.get_variable( name="ema_means", initializer=means.initialized_value(), trainable=False) return means, ema_means, ema_count
Example #7
Source File: transformer_nat.py From BERT with Apache License 2.0 | 6 votes |
def init_vq_bottleneck(bottleneck_size, hidden_size): """Get lookup table for VQ bottleneck.""" means = tf.get_variable( name="means", shape=[bottleneck_size, hidden_size], initializer=tf.uniform_unit_scaling_initializer()) ema_count = tf.get_variable( name="ema_count", shape=[bottleneck_size], initializer=tf.constant_initializer(0), trainable=False) with tf.colocate_with(means): ema_means = tf.get_variable( name="ema_means", initializer=means.initialized_value(), trainable=False) return means, ema_means, ema_count
Example #8
Source File: model.py From Customer-Chatbot with MIT License | 6 votes |
def _build(self, embeddings): if embeddings is not None: self.Embedding = tf.Variable(tf.to_float(embeddings), trainable=False, name='Embedding') else: self.Embedding = tf.get_variable('Embedding', shape=[self.vocab_size, self.embedding_size], initializer=tf.uniform_unit_scaling_initializer()) self.q_embed = tf.nn.dropout(tf.nn.embedding_lookup(self.Embedding, self._ques), keep_prob=self.dropout_keep_prob) self.a_embed = tf.nn.dropout(tf.nn.embedding_lookup(self.Embedding, self._ans), keep_prob=self.dropout_keep_prob) with tf.variable_scope('siamese') as scope: # 计算隐藏和卷积层 hl_q = self._HL_layer(self.q_embed, self.hidden_size, 'HL_layer') conv1_q = self._cnn_layer(hl_q) scope.reuse_variables() hl_a = self._HL_layer(self.a_embed, self.hidden_size, 'HL_layer') conv1_a = self._cnn_layer(hl_a) with tf.variable_scope('fc') as scope: con = tf.concat([conv1_q, conv1_a], axis=-1) logits = self.fc_layer(con, 1, 'fc_layer') res = tf.nn.sigmoid(logits) return logits, res
Example #9
Source File: model.py From Customer-Chatbot with MIT License | 6 votes |
def _build(self, embeddings): if embeddings is not None: self.Embedding = tf.Variable(tf.to_float(embeddings), trainable=False, name='Embedding') else: self.Embedding = tf.get_variable('Embedding', shape=[self.vocab_size, self.embedding_size], initializer=tf.uniform_unit_scaling_initializer()) self.q_embed = tf.nn.dropout(tf.nn.embedding_lookup(self.Embedding, self._ques), keep_prob=self.dropout_keep_prob) self.a_embed = tf.nn.dropout(tf.nn.embedding_lookup(self.Embedding, self._ans), keep_prob=self.dropout_keep_prob) with tf.variable_scope('siamese') as scope: # 计算隐藏和卷积层 hl_q = self._HL_layer(self.q_embed, self.hidden_size, 'HL_layer') conv1_q = self._cnn_layer(hl_q) scope.reuse_variables() hl_a = self._HL_layer(self.a_embed, self.hidden_size, 'HL_layer') conv1_a = self._cnn_layer(hl_a) with tf.variable_scope('fc') as scope: con = tf.concat([conv1_q, conv1_a], axis=-1) logits = self.fc_layer(con, 1, 'fc_layer') res = tf.nn.sigmoid(logits) return logits, res
Example #10
Source File: discretization.py From fine-lm with MIT License | 6 votes |
def get_vq_bottleneck(bottleneck_size, hidden_size): """Get lookup table for VQ bottleneck.""" with tf.variable_scope("vq", reuse=tf.AUTO_REUSE): means = tf.get_variable( name="means", shape=[bottleneck_size, hidden_size], initializer=tf.uniform_unit_scaling_initializer()) ema_count = tf.get_variable( name="ema_count", shape=[bottleneck_size], initializer=tf.constant_initializer(0), trainable=False) with tf.colocate_with(means): ema_means = tf.get_variable( name="ema_means", initializer=means.initialized_value(), trainable=False) return means, ema_means, ema_count
Example #11
Source File: transformer_nat.py From fine-lm with MIT License | 6 votes |
def init_vq_bottleneck(bottleneck_size, hidden_size): """Get lookup table for VQ bottleneck.""" means = tf.get_variable( name="means", shape=[bottleneck_size, hidden_size], initializer=tf.uniform_unit_scaling_initializer()) ema_count = tf.get_variable( name="ema_count", shape=[bottleneck_size], initializer=tf.constant_initializer(0), trainable=False) with tf.colocate_with(means): ema_means = tf.get_variable( name="ema_means", initializer=means.initialized_value(), trainable=False) return means, ema_means, ema_count
Example #12
Source File: resnet_cifar.py From tanda with MIT License | 5 votes |
def _fully_connected(self, x, out_dim): """FullyConnected layer for final output.""" w = tf.get_variable( 'DW', [x.get_shape()[1], out_dim], initializer=tf.uniform_unit_scaling_initializer(factor=1.0)) b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer()) return tf.nn.xw_plus_b(x, w, b)
Example #13
Source File: cifar10_model.py From MultiRobustness with MIT License | 5 votes |
def _fully_connected(self, x, out_dim): """FullyConnected layer for final output.""" num_non_batch_dimensions = len(x.shape) prod_non_batch_dimensions = 1 for ii in range(num_non_batch_dimensions - 1): prod_non_batch_dimensions *= int(x.shape[ii + 1]) x = tf.reshape(x, [tf.shape(x)[0], -1]) w = tf.get_variable( 'DW', [prod_non_batch_dimensions, out_dim], initializer=tf.uniform_unit_scaling_initializer(factor=1.0)) b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer()) return tf.nn.xw_plus_b(x, w, b)
Example #14
Source File: resnet_model.py From Action_Recognition_Zoo with MIT License | 5 votes |
def _fully_connected(self, x, out_dim): """FullyConnected layer for final output.""" x = tf.reshape(x, [self.hps.batch_size, -1]) w = tf.get_variable( 'DW', [x.get_shape()[1], out_dim], initializer=tf.uniform_unit_scaling_initializer(factor=1.0)) b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer()) return tf.nn.xw_plus_b(x, w, b)
Example #15
Source File: neural.py From attend_infer_repeat with GNU General Public License v3.0 | 5 votes |
def activation_based_init(nonlinearity): """Returns initialiaation based on a nonlinearlity""" init = tf.uniform_unit_scaling_initializer() if nonlinearity == tf.nn.relu: init = tf.contrib.layers.xavier_initializer() elif nonlinearity == tf.nn.elu: init = tf.contrib.layers.variance_scaling_initializer() elif nonlinearity == selu: init = tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_IN') return init
Example #16
Source File: resnet_model.py From object_detection_with_tensorflow with MIT License | 5 votes |
def _fully_connected(self, x, out_dim): """FullyConnected layer for final output.""" x = tf.reshape(x, [self.hps.batch_size, -1]) w = tf.get_variable( 'DW', [x.get_shape()[1], out_dim], initializer=tf.uniform_unit_scaling_initializer(factor=1.0)) b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer()) return tf.nn.xw_plus_b(x, w, b)
Example #17
Source File: madry_tf.py From fast_adversarial with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _fully_connected(self, x, out_dim): """FullyConnected layer for final output.""" num_non_batch_dimensions = len(x.shape) prod_non_batch_dimensions = 1 for ii in range(num_non_batch_dimensions - 1): prod_non_batch_dimensions *= int(x.shape[ii + 1]) x = tf.reshape(x, [tf.shape(x)[0], -1]) w = tf.get_variable( 'DW', [prod_non_batch_dimensions, out_dim], initializer=tf.uniform_unit_scaling_initializer(factor=1.0)) b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer()) return tf.nn.xw_plus_b(x, w, b)
Example #18
Source File: model_utils.py From f-lm with MIT License | 5 votes |
def sharded_variable(name, shape, num_shards, dtype=tf.float32, transposed=False): # The final size of the sharded variable may be larger than requested. # This should be fine for embeddings. shard_size = int((shape[0] + num_shards - 1) / num_shards) if transposed: initializer = tf.uniform_unit_scaling_initializer(dtype=dtype) else: initializer = tf.uniform_unit_scaling_initializer(dtype=dtype) return [tf.get_variable(name + "_" + str(i), [shard_size, shape[1]], initializer=initializer, dtype=dtype) for i in range(num_shards)] # XXX(rafal): Code below copied from rnn_cell.py
Example #19
Source File: resnet_model.py From hands-detection with MIT License | 5 votes |
def _fully_connected(self, x, out_dim): """FullyConnected layer for final output.""" x = tf.reshape(x, [self.hps.batch_size, -1]) w = tf.get_variable( 'DW', [x.get_shape()[1], out_dim], initializer=tf.uniform_unit_scaling_initializer(factor=1.0)) b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer()) return tf.nn.xw_plus_b(x, w, b)
Example #20
Source File: iclr_2017_benchmark.py From fold with Apache License 2.0 | 5 votes |
def tree_lstm(self, left, right): # A variation on the tree LSTM -- we add an extra hidden layer. if self._weights is None: with tf.variable_scope(self._vscope): self._weights_0 = tf.get_variable( "weights_0", [FLAGS.vector_size*2, FLAGS.vector_size], initializer=tf.uniform_unit_scaling_initializer(1.43)) self._bias_0 = tf.get_variable("bias_0", [FLAGS.vector_size], initializer=tf.zeros_initializer()) self._weights = tf.get_variable( "weights", [FLAGS.vector_size, FLAGS.vector_size*4], initializer=tf.uniform_unit_scaling_initializer(1.0)) self._bias = tf.get_variable("bias", [FLAGS.vector_size*4], initializer=tf.zeros_initializer()) # One hidden layer x = tf.concat([left, right], 1) h0 = tf.nn.relu(tf.add(tf.matmul(x, self._weights_0), self._bias_0)) # Do a single matrix multiply to compute all gates h1 = tf.add(tf.matmul(h0, self._weights), self._bias) (hfl, hfr, hi, hg) = tf.split(h1, 4, axis=1) fl = tf.nn.sigmoid(hfl) # forget left fr = tf.nn.sigmoid(hfr) # forget right i = tf.nn.sigmoid(hi) # input gate g = tf.nn.tanh(hg) # computation ylr = tf.add(tf.multiply(fl, left), tf.multiply(fr, right)) ygi = tf.multiply(i, g) y = tf.add(ylr, ygi) return y
Example #21
Source File: iclr_2017_benchmark.py From fold with Apache License 2.0 | 5 votes |
def tree_fc(self, left, right): # A simple tree RNN with a single fully connected layer. if self._weights is None: with tf.variable_scope(self._vscope): self._weights = tf.get_variable( "weights", [FLAGS.vector_size*2, FLAGS.vector_size], initializer=tf.uniform_unit_scaling_initializer(1.43)) self._bias = tf.get_variable("bias", [FLAGS.vector_size], initializer=tf.zeros_initializer()) x = tf.concat([left, right], 1) result = tf.add(tf.matmul(x, self._weights), self._bias) return tf.nn.relu(result)
Example #22
Source File: mnist_convnet.py From unrestricted-adversarial-examples with Apache License 2.0 | 5 votes |
def _fully_connected(self, x, out_dim): """FullyConnected layer for final output.""" num_non_batch_dimensions = len(x.shape) prod_non_batch_dimensions = 1 for ii in range(num_non_batch_dimensions - 1): prod_non_batch_dimensions *= int(x.shape[ii + 1]) x = tf.reshape(x, [tf.shape(x)[0], -1]) w = tf.get_variable( 'DW', [prod_non_batch_dimensions, out_dim], initializer=tf.uniform_unit_scaling_initializer(factor=1.0)) b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer()) return tf.nn.xw_plus_b(x, w, b)
Example #23
Source File: resnet_model.py From resnet-tensorflow with MIT License | 5 votes |
def _fully_connected(self, x, out_dim): """FullyConnected layer for final output.""" x = tf.reshape(x, [self.hps.batch_size, -1]) w = tf.get_variable( 'DW', [x.get_shape()[1], out_dim], initializer=tf.uniform_unit_scaling_initializer(factor=1.0)) b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer()) return tf.nn.xw_plus_b(x, w, b)
Example #24
Source File: resnet_model_basic.py From DualLearning with MIT License | 5 votes |
def _fully_connected(self, x, out_dim): """FullyConnected layer for final output.""" x = tf.reshape(x, [self.batch_size, -1]) w = tf.get_variable( 'DW', [x.get_shape()[1], out_dim], initializer=tf.uniform_unit_scaling_initializer(factor=1.0)) b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer()) return tf.nn.xw_plus_b(x, w, b)
Example #25
Source File: deeplab_model.py From SketchySceneColorization with MIT License | 5 votes |
def _fully_convolutional(self, x, out_dim): """FullyConvolutional layer for final output.""" w = tf.get_variable( 'DW', [1, 1, self.filters[-1], out_dim], initializer=tf.uniform_unit_scaling_initializer(factor=1.0)) b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer()) return tf.nn.conv2d(x, w, self._stride_arr(1), padding='SAME') + b
Example #26
Source File: deeplab_model.py From SketchySceneColorization with MIT License | 5 votes |
def _fully_connected(self, x, out_dim): """FullyConnected layer for final output.""" x = tf.reshape(x, [self.batch_size, -1]) w = tf.get_variable( 'DW', [self.filters[-1], out_dim], initializer=tf.uniform_unit_scaling_initializer(factor=1.0)) b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer()) return tf.nn.xw_plus_b(x, w, b)
Example #27
Source File: RMI_model.py From SketchySceneColorization with MIT License | 5 votes |
def _fully_connected(self, x, in_dim, out_dim, name): """FullyConnected layer for final output.""" with tf.variable_scope(name): w = tf.get_variable( 'DW', [in_dim, out_dim], initializer=tf.uniform_unit_scaling_initializer(factor=1.0)) b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer()) return tf.nn.xw_plus_b(x, w, b)
Example #28
Source File: model.py From NMT_GAN with Apache License 2.0 | 5 votes |
def prepare(self, is_training): assert not self._prepared self.is_training = is_training # Select devices according to running is_training flag. devices = self.config.train.devices if is_training else self.config.test.devices self.devices = ['/gpu:'+i for i in devices.split(',')] or ['/cpu:0'] # If we have multiple devices (typically GPUs), we set /cpu:0 as the sync device. self.sync_device = self.devices[0] if len(self.devices) == 1 else '/cpu:0' if is_training: with self.graph.as_default(): with tf.device(self.sync_device): # Preparing optimizer. self.global_step = tf.get_variable(name='global_step', dtype=INT_TYPE, shape=[], trainable=False, initializer=tf.zeros_initializer) self.learning_rate = tf.convert_to_tensor(self.config.train.learning_rate) if self.config.train.optimizer == 'adam': self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate) elif self.config.train.optimizer == 'adam_decay': self.learning_rate = learning_rate_decay(self.config, self.global_step) self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9, beta2=0.98, epsilon=1e-9) elif self.config.train.optimizer == 'sgd': self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate) elif self.config.train.optimizer == 'mom': self.optimizer = tf.train.MomentumOptimizer(self.learning_rate, momentum=0.9) else: logging.info("No optimizer is defined for the model") raise ValueError self._initializer = init_ops.variance_scaling_initializer(scale=1, mode='fan_avg', distribution='uniform') # self._initializer = tf.uniform_unit_scaling_initializer() self._prepared = True
Example #29
Source File: resnet_model_mnist.py From Reverse-Cross-Entropy with Apache License 2.0 | 5 votes |
def _fully_connected(self, x, out_dim): """FullyConnected layer for final output.""" x = tf.reshape(x, [self.hps.batch_size, -1]) w = tf.get_variable( 'DW', [x.get_shape()[1], out_dim], initializer=tf.uniform_unit_scaling_initializer(factor=1.0)) b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer()) return tf.nn.xw_plus_b(x, w, b)
Example #30
Source File: resnet_model_cifar.py From Reverse-Cross-Entropy with Apache License 2.0 | 5 votes |
def _fully_connected(self, x, out_dim): """FullyConnected layer for final output.""" x = tf.reshape(x, [self.hps.batch_size, -1]) w = tf.get_variable( 'DW', [x.get_shape()[1], out_dim], initializer=tf.uniform_unit_scaling_initializer(factor=1.0)) b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer()) return tf.nn.xw_plus_b(x, w, b)