Python tensorflow.python.ops.variables.trainable_variables() Examples

The following are 30 code examples of tensorflow.python.ops.variables.trainable_variables(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.variables , or try the search function .
Example #1
Source File: tpu_estimator.py    From embedding-as-service with MIT License 6 votes vote down vote up
def _sync_variables_ops(ctx):
  """Create varriables synchronization ops.

  Gets the variables back from TPU nodes. This means the variables updated
  by TPU will now be *synced* to host memory.
  In BROADCAST mode, we skip this sync since the variables are ususally too
  big to transmit via RPC.

  Args:
    ctx: A `_InternalTPUContext` instance with mode.

  Returns:
    A list of sync ops.
  """

  if not ctx.is_input_broadcast_with_iterators():
    return [
        array_ops.check_numerics(v.read_value(),
                                 'Gradient for %s is NaN' % v.name).op
        for v in variables.trainable_variables()
    ]
  else:
    return [control_flow_ops.no_op()] 
Example #2
Source File: rev_block_lib_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def testDoubleCallInUniqueScope(self):

    @rev_block_lib.recompute_grad
    def layer_with_recompute(inputs):
      with variable_scope.variable_scope("inner", use_resource=True):
        return core_layers.dense(inputs, 2)

    with variable_scope.variable_scope("layer", use_resource=True):
      inputs = array_ops.ones((2, 4), dtypes.float32)

      with variable_scope.variable_scope("layer1", use_resource=True):
        out1 = layer_with_recompute(inputs)
      with variable_scope.variable_scope("layer2", use_resource=True):
        out2 = layer_with_recompute(inputs) + out1
      out = math_ops.reduce_sum(out2)

    tvars = variables.trainable_variables()
    assert len(tvars) == 4
    grads = gradients_impl.gradients(out, [inputs] + tvars)
    for grad in grads:
      self.assertIsNotNone(grad) 
Example #3
Source File: rev_block_lib_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def testDoubleCallInSameScopeFails(self):

    @rev_block_lib.recompute_grad
    def layer_with_recompute(inputs):
      return core_layers.dense(inputs, 2)

    with variable_scope.variable_scope("layer", use_resource=True):
      inputs = array_ops.ones((2, 4), dtypes.float32)
      out1 = layer_with_recompute(inputs)
      out2 = layer_with_recompute(inputs) + out1
      out = math_ops.reduce_sum(out2)

    tvars = variables.trainable_variables()
    assert len(tvars) == 4
    with self.assertRaisesWithPredicateMatch(
        ValueError, "called twice in the same enclosing scope"):
      gradients_impl.gradients(out, [inputs] + tvars) 
Example #4
Source File: hybrid_model.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def loss(self, data, labels):
    """The loss to minimize while training."""

    if self.is_regression:
      diff = self.training_inference_graph(data) - math_ops.to_float(labels)
      mean_squared_error = math_ops.reduce_mean(diff * diff)
      root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss")
      loss = root_mean_squared_error
    else:
      loss = math_ops.reduce_mean(
          nn_ops.sparse_softmax_cross_entropy_with_logits(
              self.training_inference_graph(data),
              array_ops.squeeze(math_ops.to_int32(labels))),
          name="loss")
    if self.regularizer:
      loss += layers.apply_regularization(self.regularizer,
                                          variables.trainable_variables())
    return loss 
Example #5
Source File: training_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
    tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

    tf_predictions = logistic_classifier(tf_inputs)
    losses.log_loss(tf_labels, tf_predictions)
    total_loss = losses.get_total_loss()

    optimizer = gradient_descent.GradientDescentOptimizer(
        learning_rate=learning_rate)

    def transform_grads_fn(grads):
      if gradient_multiplier != 1.0:
        variables = variables_lib2.trainable_variables()
        gradient_multipliers = {var: gradient_multiplier for var in variables}

        with ops.name_scope('multiply_grads'):
          return training.multiply_gradients(grads, gradient_multipliers)
      else:
        return grads

    return training.create_train_op(
        total_loss, optimizer, transform_grads_fn=transform_grads_fn) 
Example #6
Source File: tpu_estimator.py    From Chinese-XLNet with Apache License 2.0 6 votes vote down vote up
def _sync_variables_ops(ctx):
  """Create varriables synchronization ops.

  Gets the variables back from TPU nodes. This means the variables updated
  by TPU will now be *synced* to host memory.
  In BROADCAST mode, we skip this sync since the variables are ususally too
  big to transmit via RPC.

  Args:
    ctx: A `_InternalTPUContext` instance with mode.

  Returns:
    A list of sync ops.
  """

  if not ctx.is_input_broadcast_with_iterators():
    return [
        array_ops.check_numerics(v.read_value(),
                                 'Gradient for %s is NaN' % v.name).op
        for v in variables.trainable_variables()
    ]
  else:
    return [control_flow_ops.no_op()] 
Example #7
Source File: tpu_estimator.py    From xlnet with Apache License 2.0 6 votes vote down vote up
def _sync_variables_ops(ctx):
  """Create varriables synchronization ops.

  Gets the variables back from TPU nodes. This means the variables updated
  by TPU will now be *synced* to host memory.
  In BROADCAST mode, we skip this sync since the variables are ususally too
  big to transmit via RPC.

  Args:
    ctx: A `_InternalTPUContext` instance with mode.

  Returns:
    A list of sync ops.
  """

  if not ctx.is_input_broadcast_with_iterators():
    return [
        array_ops.check_numerics(v.read_value(),
                                 'Gradient for %s is NaN' % v.name).op
        for v in variables.trainable_variables()
    ]
  else:
    return [control_flow_ops.no_op()] 
Example #8
Source File: federated_averaging_optimizer.py    From federated-averaging-tutorials with Apache License 2.0 6 votes vote down vote up
def _generate_shared_variables(self):
    """Generate a global variable placed on ps for each trainable variable.

       This creates a new copy of each user-defined trainable variable and places
       them on ps_device. These variables store the averaged parameters.
    """
    # Only the chief should initialize the variables
    if self._is_chief:
      collections = [ops.GraphKeys.GLOBAL_VARIABLES, "global_model"]
    else:
      collections = ["global_model"]

    # Generate new global variables dependent on trainable variables.
    with ops.device(self._device_setter):
      for v in variables.trainable_variables():
        _ = variable_scope.variable(
            name="%s/%s" % (self._name, v.op.name),
            initial_value=v.initialized_value(), trainable=False,
            collections=collections)

      # Place the global step in the ps so that all the workers can see it
      self._global_step = variables.Variable(0, name="%s_global_step" %
          self._name, trainable=False) 
Example #9
Source File: hybrid_model.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def loss(self, data, labels):
    """The loss to minimize while training."""

    if self.is_regression:
      diff = self.training_inference_graph(data) - math_ops.to_float(labels)
      mean_squared_error = math_ops.reduce_mean(diff * diff)
      root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss")
      loss = root_mean_squared_error
    else:
      loss = math_ops.reduce_mean(
          nn_ops.sparse_softmax_cross_entropy_with_logits(
              labels=array_ops.squeeze(math_ops.to_int32(labels)),
              logits=self.training_inference_graph(data)),
          name="loss")
    if self.regularizer:
      loss += layers.apply_regularization(self.regularizer,
                                          variables.trainable_variables())
    return loss 
Example #10
Source File: learning_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
    tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

    tf_predictions = LogisticClassifier(tf_inputs)
    loss_ops.log_loss(tf_predictions, tf_labels)
    total_loss = loss_ops.get_total_loss()

    optimizer = gradient_descent.GradientDescentOptimizer(
        learning_rate=learning_rate)

    if gradient_multiplier != 1.0:
      variables = variables_lib.trainable_variables()
      gradient_multipliers = {var: gradient_multiplier for var in variables}
    else:
      gradient_multipliers = None

    return learning.create_train_op(
        total_loss, optimizer, gradient_multipliers=gradient_multipliers) 
Example #11
Source File: learning_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
    tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
    tf_labels = tf.constant(self._labels, dtype=tf.float32)

    tf_predictions = LogisticClassifier(tf_inputs)
    loss_ops.log_loss(tf_labels, tf_predictions)
    total_loss = loss_ops.get_total_loss()

    optimizer = gradient_descent.GradientDescentOptimizer(
        learning_rate=learning_rate)

    if gradient_multiplier != 1.0:
      variables = variables_lib.trainable_variables()
      gradient_multipliers = {var: gradient_multiplier for var in variables}
    else:
      gradient_multipliers = None

    return learning.create_train_op(
        total_loss, optimizer, gradient_multipliers=gradient_multipliers) 
Example #12
Source File: learning_test.py    From keras-lambda with MIT License 6 votes vote down vote up
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
    tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

    tf_predictions = LogisticClassifier(tf_inputs)
    loss_ops.log_loss(tf_predictions, tf_labels)
    total_loss = loss_ops.get_total_loss()

    optimizer = gradient_descent.GradientDescentOptimizer(
        learning_rate=learning_rate)

    if gradient_multiplier != 1.0:
      variables = variables_lib.trainable_variables()
      gradient_multipliers = {var: gradient_multiplier for var in variables}
    else:
      gradient_multipliers = None

    return learning.create_train_op(
        total_loss, optimizer, gradient_multipliers=gradient_multipliers) 
Example #13
Source File: hybrid_model.py    From lambda-packs with MIT License 6 votes vote down vote up
def loss(self, data, labels):
    """The loss to minimize while training."""

    if self.is_regression:
      diff = self.training_inference_graph(data) - math_ops.to_float(labels)
      mean_squared_error = math_ops.reduce_mean(diff * diff)
      root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss")
      loss = root_mean_squared_error
    else:
      loss = math_ops.reduce_mean(
          nn_ops.sparse_softmax_cross_entropy_with_logits(
              labels=array_ops.squeeze(math_ops.to_int32(labels)),
              logits=self.training_inference_graph(data)),
          name="loss")
    if self.regularizer:
      loss += layers.apply_regularization(self.regularizer,
                                          variables.trainable_variables())
    return loss 
Example #14
Source File: hybrid_model.py    From keras-lambda with MIT License 6 votes vote down vote up
def loss(self, data, labels):
    """The loss to minimize while training."""

    if self.is_regression:
      diff = self.training_inference_graph(data) - math_ops.to_float(labels)
      mean_squared_error = math_ops.reduce_mean(diff * diff)
      root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss")
      loss = root_mean_squared_error
    else:
      loss = math_ops.reduce_mean(
          nn_ops.sparse_softmax_cross_entropy_with_logits(
              labels=array_ops.squeeze(math_ops.to_int32(labels)),
              logits=self.training_inference_graph(data)),
          name="loss")
    if self.regularizer:
      loss += layers.apply_regularization(self.regularizer,
                                          variables.trainable_variables())
    return loss 
Example #15
Source File: seq2seq_model.py    From DeepAffinity with GNU General Public License v3.0 5 votes vote down vote up
def _rnn_get_variable(self, getter, *args, **kwargs):
    variable = getter(*args, **kwargs)
    trainable = (variable in tf_variables.trainable_variables() or
                 (isinstance(variable, tf_variables.PartitionedVariable) and
                  list(variable)[0] in tf_variables.trainable_variables()))
    if trainable and variable not in self._trainable_weights:
      self._trainable_weights.append(variable)
    elif not trainable and variable not in self._non_trainable_weights:
      self._non_trainable_weights.append(variable)
    return variable 
Example #16
Source File: seq2seq_model.py    From DeepAffinity with GNU General Public License v3.0 5 votes vote down vote up
def _rnn_get_variable(self, getter, *args, **kwargs):
    variable = getter(*args, **kwargs)
    trainable = (variable in tf_variables.trainable_variables() or
                 (isinstance(variable, tf_variables.PartitionedVariable) and
                  list(variable)[0] in tf_variables.trainable_variables()))
    if trainable and variable not in self._trainable_weights:
      self._trainable_weights.append(variable)
    elif not trainable and variable not in self._non_trainable_weights:
      self._non_trainable_weights.append(variable)
    return variable 
Example #17
Source File: seq2seq_model.py    From DeepAffinity with GNU General Public License v3.0 5 votes vote down vote up
def _rnn_get_variable(self, getter, *args, **kwargs):
    variable = getter(*args, **kwargs)
    trainable = (variable in tf_variables.trainable_variables() or
                 (isinstance(variable, tf_variables.PartitionedVariable) and
                  list(variable)[0] in tf_variables.trainable_variables()))
    if trainable and variable not in self._trainable_weights:
      self._trainable_weights.append(variable)
    elif not trainable and variable not in self._non_trainable_weights:
      self._non_trainable_weights.append(variable)
    return variable 
Example #18
Source File: rnn_dropout.py    From GtS with MIT License 5 votes vote down vote up
def _rnn_get_variable(self, getter, *args, **kwargs):
    variable = getter(*args, **kwargs)
    trainable = (variable in tf_variables.trainable_variables() or
                 (isinstance(variable, tf_variables.PartitionedVariable) and
                  list(variable)[0] in tf_variables.trainable_variables()))
    if trainable and variable not in self._trainable_weights:
      self._trainable_weights.append(variable)
    elif not trainable and variable not in self._non_trainable_weights:
      self._non_trainable_weights.append(variable)
    return variable 
Example #19
Source File: seq2seq_model.py    From DeepAffinity with GNU General Public License v3.0 5 votes vote down vote up
def _rnn_get_variable(self, getter, *args, **kwargs):
    variable = getter(*args, **kwargs)
    trainable = (variable in tf_variables.trainable_variables() or
                 (isinstance(variable, tf_variables.PartitionedVariable) and
                  list(variable)[0] in tf_variables.trainable_variables()))
    if trainable and variable not in self._trainable_weights:
      self._trainable_weights.append(variable)
    elif not trainable and variable not in self._non_trainable_weights:
      self._non_trainable_weights.append(variable)
    return variable 
Example #20
Source File: seq2seq_model.py    From DeepAffinity with GNU General Public License v3.0 5 votes vote down vote up
def _rnn_get_variable(self, getter, *args, **kwargs):
    variable = getter(*args, **kwargs)
    trainable = (variable in tf_variables.trainable_variables() or
                 (isinstance(variable, tf_variables.PartitionedVariable) and
                  list(variable)[0] in tf_variables.trainable_variables()))
    if trainable and variable not in self._trainable_weights:
      self._trainable_weights.append(variable)
    elif not trainable and variable not in self._non_trainable_weights:
      self._non_trainable_weights.append(variable)
    return variable 
Example #21
Source File: seq2seq_model.py    From DeepAffinity with GNU General Public License v3.0 5 votes vote down vote up
def _rnn_get_variable(self, getter, *args, **kwargs):
    variable = getter(*args, **kwargs)
    trainable = (variable in tf_variables.trainable_variables() or
                 (isinstance(variable, tf_variables.PartitionedVariable) and
                  list(variable)[0] in tf_variables.trainable_variables()))
    if trainable and variable not in self._trainable_weights:
      self._trainable_weights.append(variable)
    elif not trainable and variable not in self._non_trainable_weights:
      self._non_trainable_weights.append(variable)
    return variable 
Example #22
Source File: seq2seq_model.py    From DeepAffinity with GNU General Public License v3.0 5 votes vote down vote up
def _rnn_get_variable(self, getter, *args, **kwargs):
    variable = getter(*args, **kwargs)
    trainable = (variable in tf_variables.trainable_variables() or
                 (isinstance(variable, tf_variables.PartitionedVariable) and
                  list(variable)[0] in tf_variables.trainable_variables()))
    if trainable and variable not in self._trainable_weights:
      self._trainable_weights.append(variable)
    elif not trainable and variable not in self._non_trainable_weights:
      self._non_trainable_weights.append(variable)
    return variable 
Example #23
Source File: rnn_cell.py    From Artificial-Neural-Network-THU-2018 with MIT License 5 votes vote down vote up
def _rnn_get_variable(self, getter, *args, **kwargs):
        variable = getter(*args, **kwargs)
        trainable = (variable in tf_variables.trainable_variables() or
                                 (isinstance(variable, tf_variables.PartitionedVariable) and
                                    list(variable)[0] in tf_variables.trainable_variables()))
        if trainable and variable not in self._trainable_weights:
            self._trainable_weights.append(variable)
        elif not trainable and variable not in self._non_trainable_weights:
            self._non_trainable_weights.append(variable)
        return variable 
Example #24
Source File: rnn_cell.py    From Artificial-Neural-Network-THU-2018 with MIT License 5 votes vote down vote up
def _rnn_get_variable(self, getter, *args, **kwargs):
        variable = getter(*args, **kwargs)
        trainable = (variable in tf_variables.trainable_variables() or
                                 (isinstance(variable, tf_variables.PartitionedVariable) and
                                    list(variable)[0] in tf_variables.trainable_variables()))
        if trainable and variable not in self._trainable_weights:
            self._trainable_weights.append(variable)
        elif not trainable and variable not in self._non_trainable_weights:
            self._non_trainable_weights.append(variable)
        return variable 
Example #25
Source File: rnn_cell.py    From Artificial-Neural-Network-THU-2018 with MIT License 5 votes vote down vote up
def _rnn_get_variable(self, getter, *args, **kwargs):
        variable = getter(*args, **kwargs)
        trainable = (variable in tf_variables.trainable_variables() or
                                 (isinstance(variable, tf_variables.PartitionedVariable) and
                                    list(variable)[0] in tf_variables.trainable_variables()))
        if trainable and variable not in self._trainable_weights:
            self._trainable_weights.append(variable)
        elif not trainable and variable not in self._non_trainable_weights:
            self._non_trainable_weights.append(variable)
        return variable 
Example #26
Source File: optimizer.py    From lambda-packs with MIT License 5 votes vote down vote up
def _get_variable_for(v):
  """Returns the ResourceVariable responsible for v, or v if not necessary."""
  if v.op.type == "VarHandleOp":
    for var in variables.trainable_variables():
      if (isinstance(var, resource_variable_ops.ResourceVariable)
          and var.handle.op is v.op):
        return var
    raise ValueError("Got %s but  could not locate source variable." % (str(v)))
  return v 
Example #27
Source File: stochastic_weight_averaging.py    From swa-tf with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def variables_to_restore(self, moving_avg_variables=None):
        """Returns a map of names to `Variables` to restore.
        If a variable has a moving average, use the moving average variable name as
        the restore name; otherwise, use the variable name.
        For example,
        ```python
          variables_to_restore = ema.variables_to_restore()
          saver = tf.train.Saver(variables_to_restore)
        ```
        Below is an example of such mapping:
        ```
          conv/batchnorm/gamma/ExponentialMovingAverage: conv/batchnorm/gamma,
          conv_4/conv2d_params/ExponentialMovingAverage: conv_4/conv2d_params,
          global_step: global_step
        ```
        Args:
          moving_avg_variables: a list of variables that require to use of the
            moving variable name to be restored. If None, it will default to
            variables.moving_average_variables() + variables.trainable_variables()
        Returns:
          A map from restore_names to variables. The restore_name can be the
          moving_average version of the variable name if it exist, or the original
          variable name.
        """
        name_map = {}
        if moving_avg_variables is None:
            # Include trainable variables and variables which have been explicitly
            # added to the moving_average_variables collection.
            moving_avg_variables = variables.trainable_variables()
            moving_avg_variables += variables.moving_average_variables()
        # Remove duplicates
        moving_avg_variables = set(moving_avg_variables)
        # Collect all the variables with moving average,
        for v in moving_avg_variables:
            name_map[self.average_name(v)] = v
        # Make sure we restore variables without moving averages as well.
        moving_avg_variable_names = set([v.name for v in moving_avg_variables])
        for v in list(set(variables.global_variables())):
            if v.name not in moving_avg_variable_names and v.op.name not in name_map:
                name_map[v.op.name] = v
        return name_map 
Example #28
Source File: federated_averaging_optimizer.py    From federated-averaging-tutorials with Apache License 2.0 5 votes vote down vote up
def begin(self):
    local_vars = variables.trainable_variables()
    global_vars = ops.get_collection_ref("global_model")
    self._variable_init_op = self._fed_avg_optimizer._assign_vars(
        local_vars,
        global_vars) 
Example #29
Source File: rnn_cell_impl.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _rnn_get_variable(self, getter, *args, **kwargs):
    variable = getter(*args, **kwargs)
    if context.in_graph_mode():
      trainable = (variable in tf_variables.trainable_variables() or
                   (isinstance(variable, tf_variables.PartitionedVariable) and
                    list(variable)[0] in tf_variables.trainable_variables()))
    else:
      trainable = variable._trainable  # pylint: disable=protected-access
    if trainable and variable not in self._trainable_weights:
      self._trainable_weights.append(variable)
    elif not trainable and variable not in self._non_trainable_weights:
      self._non_trainable_weights.append(variable)
    return variable 
Example #30
Source File: optimizer.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _get_variable_for(v):
  """Returns the ResourceVariable responsible for v, or v if not necessary."""
  if context.in_eager_mode():
    return v
  if v.op.type == "VarHandleOp":
    for var in variables.trainable_variables():
      if (isinstance(var, resource_variable_ops.ResourceVariable)
          and var.handle.op is v.op):
        return var
    raise ValueError("Got %s but could not locate source variable." % (str(v)))
  return v