Python tensorflow.python.ops.variable_scope.get_variable() Examples
The following are 30
code examples of tensorflow.python.ops.variable_scope.get_variable().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.variable_scope
, or try the search function
.
Example #1
Source File: core_rnn_cell_impl.py From auto-alt-text-lambda-api with MIT License | 7 votes |
def __call__(self, inputs, state, scope=None): """Run the cell on embedded inputs.""" with vs.variable_scope(scope or "embedding_wrapper"): # "EmbeddingWrapper" with ops.device("/cpu:0"): if self._initializer: initializer = self._initializer elif vs.get_variable_scope().initializer: initializer = vs.get_variable_scope().initializer else: # Default initializer for embeddings should have variance=1. sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1. initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3) if type(state) is tuple: data_type = state[0].dtype else: data_type = state.dtype embedding = vs.get_variable( "embedding", [self._embedding_classes, self._embedding_size], initializer=initializer, dtype=data_type) embedded = embedding_ops.embedding_lookup( embedding, array_ops.reshape(inputs, [-1])) return self._cell(embedded, state)
Example #2
Source File: rnn_cell.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _get_sharded_variable(name, shape, dtype, num_shards): """Get a list of sharded variables with the given dtype.""" if num_shards > shape[0]: raise ValueError("Too many shards: shape=%s, num_shards=%d" % (shape, num_shards)) unit_shard_size = int(math.floor(shape[0] / num_shards)) remaining_rows = shape[0] - unit_shard_size * num_shards shards = [] for i in range(num_shards): current_size = unit_shard_size if i < remaining_rows: current_size += 1 shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:], dtype=dtype)) return shards
Example #3
Source File: rnn_cell.py From Multiview2Novelview with MIT License | 6 votes |
def _highway(self, inp, out): input_size = inp.get_shape().with_rank(2)[1].value carry_weight = vs.get_variable("carry_w", [input_size, input_size]) carry_bias = vs.get_variable( "carry_b", [input_size], initializer=init_ops.constant_initializer( self._carry_bias_init)) carry = math_ops.sigmoid(nn_ops.xw_plus_b(inp, carry_weight, carry_bias)) if self._couple_carry_transform_gates: transform = 1 - carry else: transform_weight = vs.get_variable("transform_w", [input_size, input_size]) transform_bias = vs.get_variable( "transform_b", [input_size], initializer=init_ops.constant_initializer( -self._carry_bias_init)) transform = math_ops.sigmoid(nn_ops.xw_plus_b(inp, transform_weight, transform_bias)) return inp * carry + out * transform
Example #4
Source File: feature_column.py From lambda-packs with MIT License | 6 votes |
def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): """Returns an IdWeightPair. `IdWeightPair` is a pair of `SparseTensor`s which represents ids and weights. `IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets` `SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a `SparseTensor` of `float` or `None` to indicate all weights should be taken to be 1. If specified, `weight_tensor` must have exactly the same shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing output of a `VarLenFeature` which is a ragged matrix. Args: inputs: A `LazyBuilder` as a cache to get input tensors required to create `IdWeightPair`. weight_collections: List of graph collections to which variables (if any will be created) are added. trainable: If `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see ${tf.get_variable}). """ pass
Example #5
Source File: feature_column.py From lambda-packs with MIT License | 6 votes |
def _create_dense_column_weighted_sum( column, builder, units, weight_collections, trainable): """Create a weighted sum of a dense column for linear_model.""" tensor = column._get_dense_tensor( # pylint: disable=protected-access builder, weight_collections=weight_collections, trainable=trainable) num_elements = column._variable_shape.num_elements() # pylint: disable=protected-access batch_size = array_ops.shape(tensor)[0] tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements)) weight = variable_scope.get_variable( name='weights', shape=[num_elements, units], initializer=init_ops.zeros_initializer(), trainable=trainable, collections=weight_collections) return math_ops.matmul(tensor, weight, name='weighted_sum')
Example #6
Source File: evaluation.py From lambda-packs with MIT License | 6 votes |
def _get_or_create_eval_step(): """Gets or creates the eval step `Tensor`. Returns: A `Tensor` representing a counter for the evaluation step. Raises: ValueError: If multiple `Tensors` have been added to the `tf.GraphKeys.EVAL_STEP` collection. """ graph = ops.get_default_graph() eval_steps = graph.get_collection(ops.GraphKeys.EVAL_STEP) if len(eval_steps) == 1: return eval_steps[0] elif len(eval_steps) > 1: raise ValueError('Multiple tensors added to tf.GraphKeys.EVAL_STEP') else: counter = variable_scope.get_variable( 'eval_step', shape=[], dtype=dtypes.int64, initializer=init_ops.zeros_initializer(), trainable=False, collections=[ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.EVAL_STEP]) return counter
Example #7
Source File: training_util.py From lambda-packs with MIT License | 6 votes |
def create_global_step(graph=None): """Create global step tensor in graph. Args: graph: The graph in which to create the global step tensor. If missing, use default graph. Returns: Global step tensor. Raises: ValueError: if global step tensor is already defined. """ graph = graph or ops.get_default_graph() if get_global_step(graph) is not None: raise ValueError('"global_step" already exists.') # Create in proper graph and base name_scope. with graph.as_default() as g, g.name_scope(None): return variable_scope.get_variable( ops.GraphKeys.GLOBAL_STEP, shape=[], dtype=dtypes.int64, initializer=init_ops.zeros_initializer(), trainable=False, collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
Example #8
Source File: core_rnn_cell.py From lambda-packs with MIT License | 6 votes |
def call(self, inputs, state): """Run the cell on embedded inputs.""" with ops.device("/cpu:0"): if self._initializer: initializer = self._initializer elif vs.get_variable_scope().initializer: initializer = vs.get_variable_scope().initializer else: # Default initializer for embeddings should have variance=1. sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1. initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3) if isinstance(state, tuple): data_type = state[0].dtype else: data_type = state.dtype embedding = vs.get_variable( "embedding", [self._embedding_classes, self._embedding_size], initializer=initializer, dtype=data_type) embedded = embedding_ops.embedding_lookup(embedding, array_ops.reshape(inputs, [-1])) return self._cell(embedded, state)
Example #9
Source File: rnn_cell.py From lambda-packs with MIT License | 6 votes |
def _attention(self, query, attn_states): conv2d = nn_ops.conv2d reduce_sum = math_ops.reduce_sum softmax = nn_ops.softmax tanh = math_ops.tanh with vs.variable_scope("attention"): k = vs.get_variable( "attn_w", [1, 1, self._attn_size, self._attn_vec_size]) v = vs.get_variable("attn_v", [self._attn_vec_size]) hidden = array_ops.reshape(attn_states, [-1, self._attn_length, 1, self._attn_size]) hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME") y = _linear(query, self._attn_vec_size, True) y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size]) s = reduce_sum(v * tanh(hidden_features + y), [2, 3]) a = softmax(s) d = reduce_sum( array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2]) new_attns = array_ops.reshape(d, [-1, self._attn_size]) new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1]) return new_attns, new_attn_states
Example #10
Source File: rnn_cell.py From Multiview2Novelview with MIT License | 6 votes |
def _get_sharded_variable(name, shape, dtype, num_shards): """Get a list of sharded variables with the given dtype.""" if num_shards > shape[0]: raise ValueError("Too many shards: shape=%s, num_shards=%d" % (shape, num_shards)) unit_shard_size = int(math.floor(shape[0] / num_shards)) remaining_rows = shape[0] - unit_shard_size * num_shards shards = [] for i in range(num_shards): current_size = unit_shard_size if i < remaining_rows: current_size += 1 shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:], dtype=dtype)) return shards
Example #11
Source File: rnn_cell.py From lambda-packs with MIT License | 6 votes |
def _highway(self, inp, out): input_size = inp.get_shape().with_rank(2)[1].value carry_weight = vs.get_variable("carry_w", [input_size, input_size]) carry_bias = vs.get_variable( "carry_b", [input_size], initializer=init_ops.constant_initializer( self._carry_bias_init)) carry = math_ops.sigmoid(nn_ops.xw_plus_b(inp, carry_weight, carry_bias)) if self._couple_carry_transform_gates: transform = 1 - carry else: transform_weight = vs.get_variable("transform_w", [input_size, input_size]) transform_bias = vs.get_variable( "transform_b", [input_size], initializer=init_ops.constant_initializer( -self._carry_bias_init)) transform = math_ops.sigmoid(nn_ops.xw_plus_b(inp, transform_weight, transform_bias)) return inp * carry + out * transform
Example #12
Source File: specs_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def Var(name, *args, **kw): """Implements an operator that generates a variable. This function is still experimental. Use it only for generating a single variable instance for each name. Args: name: Name of the variable. *args: Other arguments to get_variable. **kw: Other keywords for get_variable. Returns: A specs object for generating a variable. """ def var(_): return variable_scope.get_variable(name, *args, **kw) return specs_lib.Callable(var)
Example #13
Source File: core_rnn_cell.py From Multiview2Novelview with MIT License | 6 votes |
def call(self, inputs, state): """Run the cell on embedded inputs.""" with ops.device("/cpu:0"): if self._initializer: initializer = self._initializer elif vs.get_variable_scope().initializer: initializer = vs.get_variable_scope().initializer else: # Default initializer for embeddings should have variance=1. sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1. initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3) if isinstance(state, tuple): data_type = state[0].dtype else: data_type = state.dtype embedding = vs.get_variable( "embedding", [self._embedding_classes, self._embedding_size], initializer=initializer, dtype=data_type) embedded = embedding_ops.embedding_lookup(embedding, array_ops.reshape(inputs, [-1])) return self._cell(embedded, state)
Example #14
Source File: embeddings_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def categorical_variable(tensor_in, n_classes, embedding_size, name): """Creates an embedding for categorical variable with given number of classes. Args: tensor_in: Input tensor with class identifier (can be batch or N-dimensional). n_classes: Number of classes. embedding_size: Size of embedding vector to represent each class. name: Name of this categorical variable. Returns: Tensor of input shape, with additional dimension for embedding. Example: Calling categorical_variable([1, 2], 5, 10, "my_cat"), will return 2 x 10 tensor, where each row is representation of the class. """ with vs.variable_scope(name): embeddings = vs.get_variable(name + '_embeddings', [n_classes, embedding_size]) return embedding_lookup(embeddings, tensor_in)
Example #15
Source File: embeddings_ops.py From lambda-packs with MIT License | 6 votes |
def categorical_variable(tensor_in, n_classes, embedding_size, name): """Creates an embedding for categorical variable with given number of classes. Args: tensor_in: Input tensor with class identifier (can be batch or N-dimensional). n_classes: Number of classes. embedding_size: Size of embedding vector to represent each class. name: Name of this categorical variable. Returns: Tensor of input shape, with additional dimension for embedding. Example: Calling categorical_variable([1, 2], 5, 10, "my_cat"), will return 2 x 10 tensor, where each row is representation of the class. """ with vs.variable_scope(name): embeddings = vs.get_variable(name + '_embeddings', [n_classes, embedding_size]) return embedding_lookup(embeddings, tensor_in)
Example #16
Source File: specs_ops.py From lambda-packs with MIT License | 6 votes |
def Var(name, *args, **kw): """Implements an operator that generates a variable. This function is still experimental. Use it only for generating a single variable instance for each name. Args: name: Name of the variable. *args: Other arguments to get_variable. **kw: Other keywords for get_variable. Returns: A specs object for generating a variable. """ def var(_): return variable_scope.get_variable(name, *args, **kw) return specs_lib.Callable(var)
Example #17
Source File: bit_utils.py From bit-rnn with Apache License 2.0 | 5 votes |
def __exit__(self, *args): global _object_stack _object_stack.pop() tf.get_variable = self._old_get_variable variable_scope.get_variable = self._old_get_variable
Example #18
Source File: bit_utils.py From bit-rnn with Apache License 2.0 | 5 votes |
def __enter__(self): global _object_stack _object_stack.append(self) self._old_get_variable = tf.get_variable tf.get_variable = _new_get_variable variable_scope.get_variable = _new_get_variable
Example #19
Source File: decisions_to_data.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _define_vars(self, params, **kwargs): with ops.device(self.device_assigner.get_device(self.layer_num)): self.tree_parameters = variable_scope.get_variable( name='stochastic_hard_tree_parameters_%d' % self.layer_num, shape=[params.num_nodes, params.num_features], initializer=init_ops.truncated_normal_initializer( mean=params.weight_init_mean, stddev=params.weight_init_std)) self.tree_thresholds = variable_scope.get_variable( name='stochastic_hard_tree_thresholds_%d' % self.layer_num, shape=[params.num_nodes], initializer=init_ops.truncated_normal_initializer( mean=params.weight_init_mean, stddev=params.weight_init_std))
Example #20
Source File: decisions_to_data.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _define_vars(self, params, **kwargs): with ops.device(self.device_assigner.get_device(self.layer_num)): self.tree_parameters = variable_scope.get_variable( name='hard_tree_parameters_%d' % self.layer_num, shape=[params.num_nodes, params.num_features], initializer=variable_scope.truncated_normal_initializer( mean=params.weight_init_mean, stddev=params.weight_init_std)) self.tree_thresholds = variable_scope.get_variable( name='hard_tree_thresholds_%d' % self.layer_num, shape=[params.num_nodes], initializer=variable_scope.truncated_normal_initializer( mean=params.weight_init_mean, stddev=params.weight_init_std))
Example #21
Source File: decisions_to_data.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _define_vars(self, params, **kwargs): with ops.device(self.device_assigner.get_device(self.layer_num)): self.tree_parameters = variable_scope.get_variable( name='tree_parameters_%d' % self.layer_num, shape=[params.num_nodes, params.num_features], initializer=init_ops.truncated_normal_initializer( mean=params.weight_init_mean, stddev=params.weight_init_std)) self.tree_thresholds = variable_scope.get_variable( name='tree_thresholds_%d' % self.layer_num, shape=[params.num_nodes], initializer=init_ops.truncated_normal_initializer( mean=params.weight_init_mean, stddev=params.weight_init_std))
Example #22
Source File: crf.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def crf_log_likelihood(inputs, tag_indices, sequence_lengths, transition_params=None): """Computes the log-likelihood of tag sequences in a CRF. Args: inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials to use as input to the CRF layer. tag_indices: A [batch_size, max_seq_len] matrix of tag indices for which we compute the log-likelihood. sequence_lengths: A [batch_size] vector of true sequence lengths. transition_params: A [num_tags, num_tags] transition matrix, if available. Returns: log_likelihood: A scalar containing the log-likelihood of the given sequence of tag indices. transition_params: A [num_tags, num_tags] transition matrix. This is either provided by the caller or created in this function. """ # Get shape information. num_tags = inputs.get_shape()[2].value # Get the transition matrix if not provided. if transition_params is None: transition_params = vs.get_variable("transitions", [num_tags, num_tags]) sequence_scores = crf_sequence_score(inputs, tag_indices, sequence_lengths, transition_params) log_norm = crf_log_norm(inputs, sequence_lengths, transition_params) # Normalize the scores to get the log-likelihood. log_likelihood = sequence_scores - log_norm return log_likelihood, transition_params
Example #23
Source File: input_pipeline_ops.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def seek_next(string_list, shuffle=False, seed=None, num_epochs=None): """Returns an op that seeks the next element in a list of strings. Seeking happens in a round robin fashion. This op creates a variable called counter that is initialized to -1 and is used to keep track of which element in the list was returned. If num_epochs is not None, then we limit the number of times we go around the string_list before OutOfRangeError is thrown. It creates a variable to keep track of this. Args: string_list: A list of strings. shuffle: If true, we shuffle the string_list differently for each epoch. seed: Seed used for shuffling. num_epochs: Returns OutOfRangeError once string_list has been repeated num_epoch times. If unspecified then keeps on looping. Returns: An op that produces the next element in the provided list. """ expanded_list = _create_list(string_list, shuffle, seed, num_epochs) with variable_scope.variable_scope("obtain_next"): counter = variable_scope.get_variable( name="obtain_next_counter", initializer=constant_op.constant( -1, dtype=dtypes.int64), dtype=dtypes.int64) with ops.device(counter.device): string_tensor = constant_op.constant( expanded_list, name="obtain_next_expanded_list") if num_epochs: filename_counter = variable_scope.get_variable( name="obtain_next_filename_counter", initializer=constant_op.constant( 0, dtype=dtypes.int64), dtype=dtypes.int64) c = filename_counter.count_up_to(len(expanded_list)) with ops.control_dependencies([c]): return obtain_next(string_tensor, counter) else: return obtain_next(string_tensor, counter)
Example #24
Source File: resnet_v1_test.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def testConv2DSameOdd(self): n, n2 = 5, 3 # Input image. x = create_test_input(1, n, n, 1) # Convolution kernel. w = create_test_input(1, 3, 3, 1) w = array_ops.reshape(w, [3, 3, 1, 1]) variable_scope.get_variable('Conv/weights', initializer=w) variable_scope.get_variable('Conv/biases', initializer=array_ops.zeros([1])) variable_scope.get_variable_scope().reuse_variables() y1 = layers.conv2d(x, 1, [3, 3], stride=1, scope='Conv') y1_expected = math_ops.to_float([[14, 28, 43, 58, 34], [28, 48, 66, 84, 46], [43, 66, 84, 102, 55], [58, 84, 102, 120, 64], [34, 46, 55, 64, 30]]) y1_expected = array_ops.reshape(y1_expected, [1, n, n, 1]) y2 = resnet_utils.subsample(y1, 2) y2_expected = math_ops.to_float([[14, 43, 34], [43, 84, 55], [34, 55, 30]]) y2_expected = array_ops.reshape(y2_expected, [1, n2, n2, 1]) y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv') y3_expected = y2_expected y4 = layers.conv2d(x, 1, [3, 3], stride=2, scope='Conv') y4_expected = y2_expected with self.test_session() as sess: sess.run(variables.global_variables_initializer()) self.assertAllClose(y1.eval(), y1_expected.eval()) self.assertAllClose(y2.eval(), y2_expected.eval()) self.assertAllClose(y3.eval(), y3_expected.eval()) self.assertAllClose(y4.eval(), y4_expected.eval())
Example #25
Source File: resnet_v1_test.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def testConv2DSameEven(self): n, n2 = 4, 2 # Input image. x = create_test_input(1, n, n, 1) # Convolution kernel. w = create_test_input(1, 3, 3, 1) w = array_ops.reshape(w, [3, 3, 1, 1]) variable_scope.get_variable('Conv/weights', initializer=w) variable_scope.get_variable('Conv/biases', initializer=array_ops.zeros([1])) variable_scope.get_variable_scope().reuse_variables() y1 = layers.conv2d(x, 1, [3, 3], stride=1, scope='Conv') y1_expected = math_ops.to_float([[14, 28, 43, 26], [28, 48, 66, 37], [43, 66, 84, 46], [26, 37, 46, 22]]) y1_expected = array_ops.reshape(y1_expected, [1, n, n, 1]) y2 = resnet_utils.subsample(y1, 2) y2_expected = math_ops.to_float([[14, 43], [43, 84]]) y2_expected = array_ops.reshape(y2_expected, [1, n2, n2, 1]) y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv') y3_expected = y2_expected y4 = layers.conv2d(x, 1, [3, 3], stride=2, scope='Conv') y4_expected = math_ops.to_float([[48, 37], [37, 22]]) y4_expected = array_ops.reshape(y4_expected, [1, n2, n2, 1]) with self.test_session() as sess: sess.run(variables.global_variables_initializer()) self.assertAllClose(y1.eval(), y1_expected.eval()) self.assertAllClose(y2.eval(), y2_expected.eval()) self.assertAllClose(y3.eval(), y3_expected.eval()) self.assertAllClose(y4.eval(), y4_expected.eval())
Example #26
Source File: resnet_v2_test.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def testConv2DSameEven(self): n, n2 = 4, 2 # Input image. x = create_test_input(1, n, n, 1) # Convolution kernel. w = create_test_input(1, 3, 3, 1) w = array_ops.reshape(w, [3, 3, 1, 1]) variable_scope.get_variable('Conv/weights', initializer=w) variable_scope.get_variable('Conv/biases', initializer=array_ops.zeros([1])) variable_scope.get_variable_scope().reuse_variables() y1 = layers.conv2d(x, 1, [3, 3], stride=1, scope='Conv') y1_expected = math_ops.to_float([[14, 28, 43, 26], [28, 48, 66, 37], [43, 66, 84, 46], [26, 37, 46, 22]]) y1_expected = array_ops.reshape(y1_expected, [1, n, n, 1]) y2 = resnet_utils.subsample(y1, 2) y2_expected = math_ops.to_float([[14, 43], [43, 84]]) y2_expected = array_ops.reshape(y2_expected, [1, n2, n2, 1]) y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv') y3_expected = y2_expected y4 = layers.conv2d(x, 1, [3, 3], stride=2, scope='Conv') y4_expected = math_ops.to_float([[48, 37], [37, 22]]) y4_expected = array_ops.reshape(y4_expected, [1, n2, n2, 1]) with self.test_session() as sess: sess.run(variables.global_variables_initializer()) self.assertAllClose(y1.eval(), y1_expected.eval()) self.assertAllClose(y2.eval(), y2_expected.eval()) self.assertAllClose(y3.eval(), y3_expected.eval()) self.assertAllClose(y4.eval(), y4_expected.eval())
Example #27
Source File: rnn_cell.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _linear(self, args): out_size = 4 * self._num_units proj_size = args.get_shape()[-1] weights = vs.get_variable("weights", [proj_size, out_size]) out = math_ops.matmul(args, weights) if not self._layer_norm: bias = vs.get_variable("biases", [out_size]) out = nn_ops.bias_add(out, bias) return out
Example #28
Source File: rnn_cell.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _norm(self, inp, scope): shape = inp.get_shape()[-1:] gamma_init = init_ops.constant_initializer(self._g) beta_init = init_ops.constant_initializer(self._b) with vs.variable_scope(scope): # Initialize beta and gamma for use by layer_norm. vs.get_variable("gamma", shape=shape, initializer=gamma_init) vs.get_variable("beta", shape=shape, initializer=beta_init) normalized = layers.layer_norm(inp, reuse=True, scope=scope) return normalized
Example #29
Source File: tpu_estimator.py From Chinese-XLNet with Apache License 2.0 | 5 votes |
def _create_global_step(graph): graph = graph or ops.get_default_graph() if training.get_global_step(graph) is not None: raise ValueError('"global_step" already exists.') # Create in proper graph and base name_scope. with graph.as_default() as g, g.name_scope(None): return variable_scope.get_variable( ops.GraphKeys.GLOBAL_STEP, shape=[], dtype=dtypes.int64, initializer=init_ops.zeros_initializer(), trainable=False, use_resource=True, collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
Example #30
Source File: test_forward.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def _test_variable(data): """ One iteration of a variable """ tf.reset_default_graph() input_op = array_ops.placeholder(shape=data.shape, dtype=data.dtype) input_tensor = array_ops.reshape(input_op, data.shape) size = input_tensor.shape.dims[1] with variable_scope.variable_scope("linear", reuse=None): w = variable_scope.get_variable( "w", shape=[size, size], dtype=input_tensor.dtype) math_ops.matmul(input_tensor, w) compare_tf_with_tvm(data, 'Placeholder:0', 'MatMul:0', init_global_variables=True)