Python tensorflow.python.ops.variable_scope.variable() Examples
The following are 30
code examples of tensorflow.python.ops.variable_scope.variable().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.variable_scope
, or try the search function
.
Example #1
Source File: federated_averaging_optimizer.py From federated-averaging-tutorials with Apache License 2.0 | 6 votes |
def _generate_shared_variables(self): """Generate a global variable placed on ps for each trainable variable. This creates a new copy of each user-defined trainable variable and places them on ps_device. These variables store the averaged parameters. """ # Only the chief should initialize the variables if self._is_chief: collections = [ops.GraphKeys.GLOBAL_VARIABLES, "global_model"] else: collections = ["global_model"] # Generate new global variables dependent on trainable variables. with ops.device(self._device_setter): for v in variables.trainable_variables(): _ = variable_scope.variable( name="%s/%s" % (self._name, v.op.name), initial_value=v.initialized_value(), trainable=False, collections=collections) # Place the global step in the ps so that all the workers can see it self._global_step = variables.Variable(0, name="%s_global_step" % self._name, trainable=False)
Example #2
Source File: variables.py From tensornets with MIT License | 6 votes |
def local_variable(initial_value, validate_shape=True, name=None, use_resource=None): """Create a variable with a value and add it to `GraphKeys.LOCAL_VARIABLES`. Args: initial_value: See variables.Variable.__init__. validate_shape: See variables.Variable.__init__. name: See variables.Variable.__init__. use_resource: If `True` use a ResourceVariable instead of a Variable. Returns: New variable. """ return variable_scope.variable( initial_value, trainable=False, collections=[ops.GraphKeys.LOCAL_VARIABLES], validate_shape=validate_shape, use_resource=use_resource, name=name)
Example #3
Source File: variables.py From tensornets with MIT License | 6 votes |
def global_variable(initial_value, validate_shape=True, name=None, use_resource=None): """Create a variable with a value and add it to `GraphKeys.GLOBAL_VARIABLES`. Args: initial_value: See variables.Variable.__init__. validate_shape: See variables.Variable.__init__. name: See variables.Variable.__init__. use_resource: If `True` use a ResourceVariable instead of a Variable. Returns: New variable. """ return variable_scope.variable( initial_value, trainable=False, collections=[ops.GraphKeys.GLOBAL_VARIABLES], validate_shape=validate_shape, use_resource=use_resource, name=name)
Example #4
Source File: variables.py From tensornets with MIT License | 6 votes |
def get_variables(scope=None, suffix=None, collection=ops.GraphKeys.GLOBAL_VARIABLES): """Gets the list of variables, filtered by scope and/or suffix. Args: scope: an optional scope for filtering the variables to return. Can be a variable scope or a string. suffix: an optional suffix for filtering the variables to return. collection: in which collection search for. Defaults to `GraphKeys.GLOBAL_VARIABLES`. Returns: a list of variables in collection with scope and suffix. """ if isinstance(scope, variable_scope.VariableScope): scope = scope.name if suffix is not None: if ':' not in suffix: suffix += ':' scope = (scope or '') + '.*' + suffix return ops.get_collection(collection, scope)
Example #5
Source File: metrics_impl.py From lambda-packs with MIT License | 6 votes |
def _create_local(name, shape, collections=None, validate_shape=True, dtype=dtypes.float32): """Creates a new local variable. Args: name: The name of the new or existing variable. shape: Shape of the new or existing variable. collections: A list of collection names to which the Variable will be added. validate_shape: Whether to validate the shape of the variable. dtype: Data type of the variables. Returns: The created variable. """ # Make sure local variables are added to tf.GraphKeys.LOCAL_VARIABLES collections = list(collections or []) collections += [ops.GraphKeys.LOCAL_VARIABLES] return variable_scope.variable( array_ops.zeros(shape, dtype=dtype), name=name, trainable=False, collections=collections, validate_shape=validate_shape)
Example #6
Source File: variables.py From tensornets with MIT License | 6 votes |
def get_variable_full_name(var): """Returns the full name of a variable. For normal Variables, this is the same as the var.op.name. For sliced or PartitionedVariables, this name is the same for all the slices/partitions. In both cases, this is normally the name used in a checkpoint file. Args: var: A `Variable` object. Returns: A string that is the full name. """ if var._save_slice_info: return var._save_slice_info.full_name else: return var.op.name # TODO(nsilberman): add flag to load exponential moving averages instead # # TODO(sguada): Update docs in slim/g3doc/index.md to describe # the new feature where the var_list dictionary can have values that # are each a list of Variables.
Example #7
Source File: AMSGrad.py From scGAN with MIT License | 6 votes |
def _create_slots(self, var_list): first_var = min(var_list, key=lambda x: x.name) create_new = self._beta1_power is None if not create_new and context.in_graph_mode(): create_new = (self._beta1_power.graph is not first_var.graph) if create_new: with ops.colocate_with(first_var): self._beta1_power = variable_scope.variable( self._beta1, name="beta1_power", trainable=False) self._beta2_power = variable_scope.variable( self._beta2, name="beta2_power", trainable=False) # Create slots for the first and second moments. for v in var_list: self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name) self._zeros_slot(v, "vhat", self._name)
Example #8
Source File: sync_replicas_optimizer.py From lambda-packs with MIT License | 6 votes |
def compute_gradients(self, *args, **kwargs): """Compute gradients of "loss" for the variables in "var_list". This simply wraps the compute_gradients() from the real optimizer. The gradients will be aggregated in the apply_gradients() so that user can modify the gradients like clipping with per replica global norm if needed. The global norm with aggregated gradients can be bad as one replica's huge gradients can hurt the gradients from other replicas. Args: *args: Arguments for compute_gradients(). **kwargs: Keyword arguments for compute_gradients(). Returns: A list of (gradient, variable) pairs. """ return self._opt.compute_gradients(*args, **kwargs)
Example #9
Source File: adam.py From lambda-packs with MIT License | 6 votes |
def _create_slots(self, var_list): # Create the beta1 and beta2 accumulators on the same device as the first # variable. if (self._beta1_power is None or self._beta1_power.graph is not var_list[0].graph): with ops.colocate_with(var_list[0]): self._beta1_power = variable_scope.variable(self._beta1, name="beta1_power", trainable=False) self._beta2_power = variable_scope.variable(self._beta2, name="beta2_power", trainable=False) # Create slots for the first and second moments. for v in var_list: self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name)
Example #10
Source File: variables.py From lambda-packs with MIT License | 6 votes |
def get_variables(scope=None, suffix=None, collection=ops.GraphKeys.GLOBAL_VARIABLES): """Gets the list of variables, filtered by scope and/or suffix. Args: scope: an optional scope for filtering the variables to return. Can be a variable scope or a string. suffix: an optional suffix for filtering the variables to return. collection: in which collection search for. Defaults to `GraphKeys.GLOBAL_VARIABLES`. Returns: a list of variables in collection with scope and suffix. """ if isinstance(scope, variable_scope.VariableScope): scope = scope.name if suffix is not None: if ':' not in suffix: suffix += ':' scope = (scope or '') + '.*' + suffix return ops.get_collection(collection, scope)
Example #11
Source File: variables.py From lambda-packs with MIT License | 6 votes |
def get_unique_variable(var_op_name): """Gets the variable uniquely identified by that var_op_name. Args: var_op_name: the full name of the variable op, including the scope. Returns: a tensorflow variable. Raises: ValueError: if no variable uniquely identified by the name exists. """ candidates = get_variables(scope=var_op_name) if not candidates: raise ValueError('Couldnt find variable %s' % var_op_name) for candidate in candidates: if candidate.op.name == var_op_name: return candidate raise ValueError('Variable %s does not uniquely identify a variable', var_op_name)
Example #12
Source File: variables.py From lambda-packs with MIT License | 6 votes |
def assign_from_values_fn(var_names_to_values): """Returns a function that assigns specific variables from the given values. This function provides a mechanism for performing assignment of variables to values in a way that does not fill the graph with large assignment values. Args: var_names_to_values: A map from variable names to values. Returns: A function that takes a single argument, a `tf.Session`, that applies the assignment operation. Raises: ValueError: if any of the given variable names were not found. """ assign_op, feed_dict = assign_from_values(var_names_to_values) def callback(session): return session.run(assign_op, feed_dict) return callback # pylint: disable=protected-access # Currently variable_scope doesn't provide very good APIs to access # all variables under scope and retrieve and check existing scopes.
Example #13
Source File: variables.py From tf-slim with Apache License 2.0 | 6 votes |
def global_variable(initial_value, validate_shape=True, name=None, use_resource=None): """Create a variable with a value and add it to `GraphKeys.GLOBAL_VARIABLES`. Args: initial_value: See variables.Variable.__init__. validate_shape: See variables.Variable.__init__. name: See variables.Variable.__init__. use_resource: If `True` use a ResourceVariable instead of a Variable. Returns: New variable. """ return variable_scope.variable( initial_value, trainable=False, collections=[ops.GraphKeys.GLOBAL_VARIABLES], validate_shape=validate_shape, use_resource=use_resource, name=name)
Example #14
Source File: metric_ops.py From lambda-packs with MIT License | 6 votes |
def aggregate_metrics(*value_update_tuples): """Aggregates the metric value tensors and update ops into two lists. Args: *value_update_tuples: a variable number of tuples, each of which contain the pair of (value_tensor, update_op) from a streaming metric. Returns: A list of value `Tensor` objects and a list of update ops. Raises: ValueError: if `value_update_tuples` is empty. """ if not value_update_tuples: raise ValueError('Expected at least one value_tensor/update_op pair') value_ops, update_ops = zip(*value_update_tuples) return list(value_ops), list(update_ops)
Example #15
Source File: AMSGrad.py From AMSGrad-Tensorflow with MIT License | 6 votes |
def _create_slots(self, var_list): first_var = min(var_list, key=lambda x: x.name) create_new = self._beta1_power is None if not create_new and context.in_graph_mode(): create_new = (self._beta1_power.graph is not first_var.graph) if create_new: with ops.colocate_with(first_var): self._beta1_power = variable_scope.variable(self._beta1, name="beta1_power", trainable=False) self._beta2_power = variable_scope.variable(self._beta2, name="beta2_power", trainable=False) # Create slots for the first and second moments. for v in var_list : self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name) self._zeros_slot(v, "vhat", self._name)
Example #16
Source File: AMSGrad.py From PhysNet with MIT License | 6 votes |
def _create_slots(self, var_list): first_var = min(var_list, key=lambda x: x.name) create_new = self._beta1_power is None #if not create_new and context.in_graph_mode(): # create_new = (self._beta1_power.graph is not first_var.graph) if create_new: with ops.colocate_with(first_var): self._beta1_power = variable_scope.variable(self._beta1, name="beta1_power", trainable=False) self._beta2_power = variable_scope.variable(self._beta2, name="beta2_power", trainable=False) # Create slots for the first and second moments. for v in var_list : self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name) self._zeros_slot(v, "vhat", self._name)
Example #17
Source File: opt.py From EMNLP2018_NLI with GNU General Public License v3.0 | 6 votes |
def _create_slots(self, var_list): first_var = min(var_list, key=lambda x: x.name) create_new = self._beta1_power is None if not create_new and context.in_graph_mode(): create_new = (self._beta1_power.graph is not first_var.graph) if create_new: with ops.colocate_with(first_var): self._beta1_power = variable_scope.variable(self._beta1, name="beta1_power", trainable=False) self._beta2_power = variable_scope.variable(self._beta2, name="beta2_power", trainable=False) # Create slots for the first and second moments. for v in var_list : self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name) self._zeros_slot(v, "vhat", self._name)
Example #18
Source File: opt.py From EMNLP2018_NLI with GNU General Public License v3.0 | 6 votes |
def _create_slots(self, var_list): first_var = min(var_list, key=lambda x: x.name) create_new = self._beta1_power is None if not create_new and context.in_graph_mode(): create_new = (self._beta1_power.graph is not first_var.graph) if create_new: with ops.colocate_with(first_var): self._beta1_power = variable_scope.variable(self._beta1, name="beta1_power", trainable=False) self._beta2_power = variable_scope.variable(self._beta2, name="beta2_power", trainable=False) # Create slots for the first and second moments. for v in var_list : self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name) self._zeros_slot(v, "vhat", self._name)
Example #19
Source File: AMSGrad.py From DCRNN with MIT License | 6 votes |
def _create_slots(self, var_list): first_var = min(var_list, key=lambda x: x.name) create_new = self._beta1_power is None if not create_new and context.in_graph_mode(): create_new = (self._beta1_power.graph is not first_var.graph) if create_new: with ops.colocate_with(first_var): self._beta1_power = variable_scope.variable(self._beta1, name="beta1_power", trainable=False) self._beta2_power = variable_scope.variable(self._beta2, name="beta2_power", trainable=False) # Create slots for the first and second moments. for v in var_list: self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name) self._zeros_slot(v, "vhat", self._name)
Example #20
Source File: AMSGrad.py From Custom-Optimizer-in-TensorFlow with MIT License | 6 votes |
def _create_slots(self, var_list): first_var = min(var_list, key=lambda x: x.name) create_new = self._beta1_power is None if not create_new and context.in_graph_mode(): create_new = (self._beta1_power.graph is not first_var.graph) if create_new: with ops.colocate_with(first_var): self._beta1_power = variable_scope.variable(self._beta1, name="beta1_power", trainable=False) self._beta2_power = variable_scope.variable(self._beta2, name="beta2_power", trainable=False) # Create slots for the first and second moments. for v in var_list : self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name) self._zeros_slot(v, "vhat", self._name)
Example #21
Source File: variables.py From tf-slim with Apache License 2.0 | 6 votes |
def get_variable_full_name(var): """Returns the full name of a variable. For normal Variables, this is the same as the var.op.name. For sliced or PartitionedVariables, this name is the same for all the slices/partitions. In both cases, this is normally the name used in a checkpoint file. Args: var: A `Variable` object. Returns: A string that is the full name. """ if var._save_slice_info: return var._save_slice_info.full_name else: return var.op.name
Example #22
Source File: variables.py From tf-slim with Apache License 2.0 | 6 votes |
def get_variables(scope=None, suffix=None, collection=ops.GraphKeys.GLOBAL_VARIABLES): """Gets the list of variables, filtered by scope and/or suffix. Args: scope: an optional scope for filtering the variables to return. Can be a variable scope or a string. suffix: an optional suffix for filtering the variables to return. collection: in which collection search for. Defaults to `GraphKeys.GLOBAL_VARIABLES`. Returns: a list of variables in collection with scope and suffix. """ if scope and isinstance(scope, variable_scope.VariableScope): scope = scope.name if suffix is not None: if ':' not in suffix: suffix += ':' scope = (scope or '') + '.*' + suffix return ops.get_collection(collection, scope)
Example #23
Source File: variables.py From tf-slim with Apache License 2.0 | 6 votes |
def local_variable(initial_value, validate_shape=True, name=None, use_resource=None): """Create a variable with a value and add it to `GraphKeys.LOCAL_VARIABLES`. Args: initial_value: See variables.Variable.__init__. validate_shape: See variables.Variable.__init__. name: See variables.Variable.__init__. use_resource: If `True` use a ResourceVariable instead of a Variable. Returns: New variable. """ return variable_scope.variable( initial_value, trainable=False, collections=[ops.GraphKeys.LOCAL_VARIABLES], validate_shape=validate_shape, use_resource=use_resource, name=name)
Example #24
Source File: nadam.py From BERT with Apache License 2.0 | 5 votes |
def _create_slots(self, var_list): # Create the beta1 and beta2 accumulators on the same device as the first # variable. Sort the var_list to make sure this device is consistent across # workers (these need to go on the same PS, otherwise some updates are # silently ignored). first_var = min(var_list, key=lambda x: x.name) create_new = self._iterations is None if not create_new and context.in_graph_mode(): create_new = (self._iterations.graph is not first_var.graph) if create_new: with ops.colocate_with(first_var): self._beta1_power = variable_scope.variable(self._beta1, name="beta1_power", trainable=False) self._beta2_power = variable_scope.variable(self._beta2, name="beta2_power", trainable=False) self._iterations = variable_scope.variable(0., name="iterations", trainable=False) self._m_schedule = variable_scope.variable(1., name="m_schedule", trainable=False) # Create slots for the first and second moments. for v in var_list: self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name)
Example #25
Source File: head.py From lambda-packs with MIT License | 5 votes |
def __init__(self, label_name=None, weight_column_name=None, enable_centered_bias=False, head_name=None, loss_fn=None, thresholds=None): """`Head` for binary classification with logistic regression. Args: label_name: String, name of the key in label dict. Can be `None` if label is a tensor (single headed models). weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. enable_centered_bias: A bool. If True, estimator will learn a centered bias variable for each class. Rest of the model structure learns the residual after centered bias. head_name: name of the head. Predictions, summary, metrics keys are suffixed by `"/" + head_name` and the default variable scope is `head_name`. loss_fn: Loss function. thresholds: thresholds for eval. Raises: ValueError: if n_classes is invalid. """ super(_BinaryLogisticHead, self).__init__( problem_type=constants.ProblemType.LOGISTIC_REGRESSION, logits_dimension=1, label_name=label_name, weight_column_name=weight_column_name, head_name=head_name) self._thresholds = thresholds if thresholds else (.5,) self._loss_fn = loss_fn if loss_fn else _log_loss_with_two_classes self._enable_centered_bias = enable_centered_bias
Example #26
Source File: head.py From lambda-packs with MIT License | 5 votes |
def __init__(self, label_dimension, loss_fn, link_fn, label_name=None, weight_column_name=None, enable_centered_bias=False, head_name=None): """`Head` for regression. Args: label_dimension: Number of regression labels per example. This is the size of the last dimension of the labels `Tensor` (typically, this has shape `[batch_size, label_dimension]`). loss_fn: Loss function, takes logits and labels and returns loss. link_fn: Link function, takes a logits tensor and returns the output. label_name: String, name of the key in label dict. Can be null if label is a tensor (single headed models). weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. enable_centered_bias: A bool. If True, estimator will learn a centered bias variable for each class. Rest of the model structure learns the residual after centered bias. head_name: name of the head. Predictions, summary and metrics keys are suffixed by `"/" + head_name` and the default variable scope is `head_name`. """ super(_RegressionHead, self).__init__( problem_type=constants.ProblemType.LINEAR_REGRESSION, logits_dimension=label_dimension, label_name=label_name, weight_column_name=weight_column_name, head_name=head_name) self._loss_fn = loss_fn self._link_fn = link_fn self._enable_centered_bias = enable_centered_bias
Example #27
Source File: optimizer.py From BERT with Apache License 2.0 | 5 votes |
def _create_slots(self, var_list): # Create the beta1 and beta2 accumulators on the same device as the first # variable. Sort the var_list to make sure this device is consistent across # workers (these need to go on the same PS, otherwise some updates are # silently ignored). first_var = min(var_list, key=lambda x: x.name) create_new = self._iterations is None if not create_new and context.in_graph_mode(): create_new = (self._iterations.graph is not first_var.graph) if create_new: with ops.colocate_with(first_var): self._beta1_power = variable_scope.variable(self._beta1, name="beta1_power", trainable=False) self._beta2_power = variable_scope.variable(self._beta2, name="beta2_power", trainable=False) self._iterations = variable_scope.variable(0., name="iterations", trainable=False) self._m_schedule = variable_scope.variable(1., name="m_schedule", trainable=False) # Create slots for the first and second moments. for v in var_list: self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name)
Example #28
Source File: metrics_impl.py From lambda-packs with MIT License | 5 votes |
def _local_variable(initial_value, validate_shape=True, name=None): """Create variable and add it to `GraphKeys.LOCAL_VARIABLES` collection. Args: initial_value: See variables.Variable.__init__. validate_shape: See variables.Variable.__init__. name: See variables.Variable.__init__. Returns: New variable. """ return variable_scope.variable( initial_value, trainable=False, collections=[ops.GraphKeys.LOCAL_VARIABLES], validate_shape=validate_shape, name=name)
Example #29
Source File: sampling_ops.py From lambda-packs with MIT License | 5 votes |
def _estimate_data_distribution(labels, num_classes, smoothing_constant=10): """Estimate data distribution as labels are seen.""" # Variable to track running count of classes. Smooth by a nonzero value to # avoid division-by-zero. Higher values provide more stability at the cost of # slower convergence. if smoothing_constant <= 0: raise ValueError('smoothing_constant must be nonzero.') num_examples_per_class_seen = variable_scope.variable( initial_value=[smoothing_constant] * num_classes, trainable=False, name='class_count', dtype=dtypes.int64) # Update the class-count based on what labels are seen in batch. num_examples_per_class_seen = num_examples_per_class_seen.assign_add( math_ops.reduce_sum( array_ops.one_hot( labels, num_classes, dtype=dtypes.int64), 0)) # Normalize count into a probability. # NOTE: Without the `+= 0` line below, the test # `testMultiThreadedEstimateDataDistribution` fails. The reason is that # before this line, `num_examples_per_class_seen` is a Tensor that shares a # buffer with an underlying `ref` object. When the `ref` is changed by another # thread, `num_examples_per_class_seen` changes as well. Since this can happen # in the middle of the normalization computation, we get probabilities that # are very far from summing to one. Adding `+= 0` copies the contents of the # tensor to a new buffer, which will be consistent from the start to the end # of the normalization computation. num_examples_per_class_seen += 0 init_prob_estimate = math_ops.truediv( num_examples_per_class_seen, math_ops.reduce_sum(num_examples_per_class_seen)) # Must return float32 (not float64) to agree with downstream `_verify_input` # checks. return math_ops.cast(init_prob_estimate, dtypes.float32)
Example #30
Source File: head.py From lambda-packs with MIT License | 5 votes |
def _centered_bias_step(centered_bias, batch_size, labels, loss_fn, weights): """Creates and returns training op for centered bias.""" with ops.name_scope(None, "centered_bias_step", (labels,)) as name: logits_dimension = array_ops.shape(centered_bias)[0] logits = array_ops.reshape( array_ops.tile(centered_bias, (batch_size,)), (batch_size, logits_dimension)) with ops.name_scope(None, "centered_bias", (labels, logits)): centered_bias_loss = math_ops.reduce_mean( loss_fn(labels, logits, weights), name="training_loss") # Learn central bias by an optimizer. 0.1 is a convervative lr for a # single variable. return training.AdagradOptimizer(0.1).minimize( centered_bias_loss, var_list=(centered_bias,), name=name)