Python tensorflow.python.ops.state_ops.assign_add() Examples

The following are 30 code examples of tensorflow.python.ops.state_ops.assign_add(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.state_ops , or try the search function .
Example #1
Source File: learning_rate_decay_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testDecay(self):
    initial_lr = 0.1
    k = 10
    decay_rate = 0.96
    step = gen_state_ops._variable(shape=[], dtype=dtypes.int32, 
        name="step", container="", shared_name="")
    assign_step = state_ops.assign(step, 0)
    increment_step = state_ops.assign_add(step, 1)
    decayed_lr = learning_rate_decay.natural_exp_decay(initial_lr, step,
                                                       k, decay_rate)
    with self.test_session():
      assign_step.op.run()
      for i in range(k+1):
        expected = initial_lr * math.exp(-i / k * decay_rate)
        self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
        increment_step.op.run() 
Example #2
Source File: utils.py    From keras-adamw with MIT License 6 votes vote down vote up
def _update_t_cur_eta_t_v2(self, lr_t=None, var=None):  # tf.keras
    t_cur_update, eta_t_update = None, None  # in case not assigned

    # update `t_cur` if iterating last `(grad, var)`
    iteration_done = self._updates_processed == (self._updates_per_iter - 1)
    if iteration_done:
        t_cur_update = state_ops.assign_add(self.t_cur, 1,
                                            use_locking=self._use_locking)
        self._updates_processed = 0  # reset
    else:
        self._updates_processed += 1

    # Cosine annealing
    if self.use_cosine_annealing and iteration_done:
        # ensure eta_t is updated AFTER t_cur
        with ops.control_dependencies([t_cur_update]):
            eta_t_update = state_ops.assign(self.eta_t, _compute_eta_t(self),
                                            use_locking=self._use_locking)
        self.lr_t = lr_t * self.eta_t  # for external tracking

    return iteration_done, t_cur_update, eta_t_update 
Example #3
Source File: tpu_estimator.py    From xlnet with Apache License 2.0 6 votes vote down vote up
def _increase_eval_step_op(iterations_per_loop):
  """Returns an op to increase the eval step for TPU evaluation.

  Args:
    iterations_per_loop: Tensor. The number of eval steps running in TPU system
      before returning to CPU host for each `Session.run`.

  Returns:
    An operation
  """
  eval_step = evaluation._get_or_create_eval_step()  # pylint: disable=protected-access
  # Estimator evaluate increases 1 by default. So, we increase the difference.
  return state_ops.assign_add(
      eval_step,
      math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
      use_locking=True) 
Example #4
Source File: gmm.py    From lambda-packs with MIT License 6 votes vote down vote up
def _model_builder(self):
    """Creates a model function."""

    def _model_fn(features, labels, mode):
      """Model function."""
      assert labels is None, labels
      (all_scores, model_predictions, losses, training_op) = gmm_ops.gmm(
          self._parse_tensor_or_dict(features), self._training_initial_clusters,
          self._num_clusters, self._random_seed, self._covariance_type,
          self._params)
      incr_step = state_ops.assign_add(variables.get_global_step(), 1)
      loss = math_ops.reduce_sum(losses)
      training_op = with_dependencies([training_op, incr_step], loss)
      predictions = {
          GMM.ALL_SCORES: all_scores[0],
          GMM.ASSIGNMENTS: model_predictions[0][0],
      }
      eval_metric_ops = {
          GMM.SCORES: _streaming_sum(loss),
      }
      return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions,
                                     eval_metric_ops=eval_metric_ops,
                                     loss=loss, train_op=training_op)

    return _model_fn 
Example #5
Source File: local_cli_wrapper_test.py    From keras-lambda with MIT License 6 votes vote down vote up
def setUp(self):
    self._tmp_dir = tempfile.mktemp()

    self.v = variables.Variable(10.0, name="v")
    self.delta = constant_op.constant(1.0, name="delta")
    self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")

    self.ph = array_ops.placeholder(dtypes.float32, name="ph")
    self.xph = array_ops.transpose(self.ph, name="xph")
    self.m = constant_op.constant(
        [[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]], dtype=dtypes.float32, name="m")
    self.y = math_ops.matmul(self.m, self.xph, name="y")

    self.sess = session.Session()

    # Initialize variable.
    self.sess.run(self.v.initializer) 
Example #6
Source File: local_cli_wrapper_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def setUp(self):
    self._tmp_dir = tempfile.mktemp()

    self.v = variables.Variable(10.0, name="v")
    self.delta = constant_op.constant(1.0, name="delta")
    self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")

    self.ph = array_ops.placeholder(dtypes.float32, name="ph")
    self.xph = array_ops.transpose(self.ph, name="xph")
    self.m = constant_op.constant(
        [[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]], dtype=dtypes.float32, name="m")
    self.y = math_ops.matmul(self.m, self.xph, name="y")

    self.sess = session.Session()

    # Initialize variable.
    self.sess.run(self.v.initializer) 
Example #7
Source File: local_cli_wrapper_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def testRunsUnderDebugMode(self):
    # Test command sequence: run; run; run;
    wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
        [[], [], []], self.sess, dump_root=self._tmp_dir)

    # run under debug mode twice.
    wrapped_sess.run(self.inc_v)
    wrapped_sess.run(self.inc_v)

    # Verify that the assign_add op did take effect.
    self.assertAllClose(12.0, self.sess.run(self.v))

    # Assert correct run call numbers for which the CLI has been launched at
    # run-start and run-end.
    self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
    self.assertEqual([1, 2], wrapped_sess.observers["run_end_cli_run_numbers"])

    # Verify that the dumps have been generated and picked up during run-end.
    self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))

    # Verify that the TensorFlow runtime errors are picked up and in this case,
    # they should be both None.
    self.assertEqual([None, None], wrapped_sess.observers["tf_errors"]) 
Example #8
Source File: span_metrics.py    From text with Apache License 2.0 6 votes vote down vote up
def update_state(self, prediction_begin, prediction_end, label_begin,
                   label_end):
    """Updates metric given prediction and labelled spans.

    Args:
      prediction_begin: A `RaggedTensor` w/ `ragged_rank`=1 of type int64. This
        contains the starting positions of the predicted spans.
      prediction_end: A `RaggedTensor` w/ `ragged_rank`=1 of type int64. This
        contains the ending positions of the predicted spans.
      label_begin: A `RaggedTensor` w/ `ragged_rank`=1 of type int64. This
        contains the starting positions of the golden labelled spans.
      label_end: A `RaggedTensor` w/ `ragged_rank`=1 of type int64. This
        contains the ending positions of the golden labelled spans.
    """
    tp = math_ops.cast(
        calculate_true_positive(prediction_begin, prediction_end, label_begin,
                                label_end), dtypes.float32)
    num_pred = math_ops.cast(
        ragged_array_ops.size(prediction_begin), dtypes.float32)
    num_gold = math_ops.cast(ragged_array_ops.size(label_begin), dtypes.float32)
    fp = num_pred - tp
    fn = num_gold - tp
    self.true_positive.assign_add(tp)
    self.false_positive.assign_add(fp)
    self.false_negative.assign_add(fn) 
Example #9
Source File: span_metrics.py    From text with Apache License 2.0 6 votes vote down vote up
def _update_confusion_matrix(pred_begin, pred_end, gold_begin, gold_end):
  """Updates internal variables of the confusion matrix."""
  with ops.name_scope("UpdateConfusionMatrix"):
    total_true_pos = metrics_impl.metric_variable([],
                                                  dtypes.int32,
                                                  name="total_true_pos")
    total_false_pos = metrics_impl.metric_variable([],
                                                   dtypes.int32,
                                                   name="total_false_pos")
    total_false_neg = metrics_impl.metric_variable([],
                                                   dtypes.int32,
                                                   name="total_false_neg")

    num_gold = ragged_array_ops.size(gold_begin)
    num_pred = ragged_array_ops.size(pred_begin)
    tp = calculate_true_positive(pred_begin, pred_end, gold_begin, gold_end)
    fp = num_pred - tp
    fn = num_gold - tp
    tp_op = state_ops.assign_add(total_true_pos, tp)
    fp_op = state_ops.assign_add(total_false_pos, fp)
    fn_op = state_ops.assign_add(total_false_neg, fn)
    return (total_true_pos, total_false_pos,
            total_false_neg), control_flow_ops.group(tp_op, fp_op, fn_op) 
Example #10
Source File: tpu_estimator.py    From transformer-xl with Apache License 2.0 6 votes vote down vote up
def _increase_eval_step_op(iterations_per_loop):
  """Returns an op to increase the eval step for TPU evaluation.

  Args:
    iterations_per_loop: Tensor. The number of eval steps running in TPU
        system before returning to CPU host for each `Session.run`.

  Returns:
    An operation
  """
  eval_step = evaluation._get_or_create_eval_step()  # pylint: disable=protected-access
  # Estimator evaluate increases 1 by default. So, we increase the difference.
  return state_ops.assign_add(
      eval_step,
      math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
      use_locking=True) 
Example #11
Source File: tpu_estimator.py    From embedding-as-service with MIT License 6 votes vote down vote up
def _increase_eval_step_op(iterations_per_loop):
  """Returns an op to increase the eval step for TPU evaluation.

  Args:
    iterations_per_loop: Tensor. The number of eval steps running in TPU system
      before returning to CPU host for each `Session.run`.

  Returns:
    An operation
  """
  eval_step = evaluation._get_or_create_eval_step()  # pylint: disable=protected-access
  # Estimator evaluate increases 1 by default. So, we increase the difference.
  return state_ops.assign_add(
      eval_step,
      math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
      use_locking=True) 
Example #12
Source File: dnn_linear_combined.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _get_train_ops(self, features, labels):
    """See base class."""

    features = self._get_feature_dict(features)
    features, labels = self._feature_engineering_fn(features, labels)
    logits = self._logits(features, is_training=True)

    def _make_training_op(training_loss):
      global_step = contrib_variables.get_global_step()
      assert global_step

      linear_train_step = self._linear_model.get_train_step(training_loss)
      dnn_train_step = (self._dnn_model.get_train_step(training_loss) if
                        self._dnn_model else [])
      with ops.control_dependencies(linear_train_step + dnn_train_step):
        with ops.get_default_graph().colocate_with(global_step):
          return state_ops.assign_add(global_step, 1).op

    model_fn_ops = self._head.head_ops(features, labels,
                                       estimator.ModeKeys.TRAIN,
                                       _make_training_op,
                                       logits=logits)
    return model_fn_ops.training_op, model_fn_ops.loss 
Example #13
Source File: learning_rate_decay_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testStaircase(self):
    initial_lr = 0.1
    k = 10
    decay_rate = 0.96
    step = gen_state_ops._variable(shape=[], dtype=dtypes.int32, 
        name="step", container="", shared_name="")
    assign_step = state_ops.assign(step, 0)
    increment_step = state_ops.assign_add(step, 1)
    decayed_lr = learning_rate_decay.inverse_time_decay(initial_lr,
                                                        step,
                                                        k,
                                                        decay_rate,
                                                        staircase=True)
    with self.test_session():
      assign_step.op.run()
      for i in range(k+1):
        expected = initial_lr / (1 + decay_rate * (i // k))
        self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
        increment_step.op.run() 
Example #14
Source File: learning_rate_decay_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testDecay(self):
    initial_lr = 0.1
    k = 10
    decay_rate = 0.96
    step = gen_state_ops._variable(shape=[], dtype=dtypes.int32, 
        name="step", container="", shared_name="")    
    assign_step = state_ops.assign(step, 0)
    increment_step = state_ops.assign_add(step, 1)
    decayed_lr = learning_rate_decay.inverse_time_decay(initial_lr,
                                                        step,
                                                        k,
                                                        decay_rate)
    with self.test_session():
      assign_step.op.run()
      for i in range(k+1):
        expected = initial_lr / (1 + i / k * decay_rate)
        self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
        increment_step.op.run() 
Example #15
Source File: tpu_estimator.py    From Chinese-XLNet with Apache License 2.0 6 votes vote down vote up
def _increase_eval_step_op(iterations_per_loop):
  """Returns an op to increase the eval step for TPU evaluation.

  Args:
    iterations_per_loop: Tensor. The number of eval steps running in TPU system
      before returning to CPU host for each `Session.run`.

  Returns:
    An operation
  """
  eval_step = evaluation._get_or_create_eval_step()  # pylint: disable=protected-access
  # Estimator evaluate increases 1 by default. So, we increase the difference.
  return state_ops.assign_add(
      eval_step,
      math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
      use_locking=True) 
Example #16
Source File: learning_rate_decay_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testStaircase(self):
    initial_lr = 0.1
    k = 10
    decay_rate = 0.96
    step = gen_state_ops._variable(shape=[], dtype=dtypes.int32, 
        name="step", container="", shared_name="")
    assign_step = state_ops.assign(step, 0)
    increment_step = state_ops.assign_add(step, 1)
    decayed_lr = learning_rate_decay.natural_exp_decay(initial_lr,
                                                       step,
                                                       k,
                                                       decay_rate,
                                                       staircase=True)
    with self.test_session():
      assign_step.op.run()
      for i in range(k+1):
        expected = initial_lr * math.exp(-decay_rate * (i // k))
        self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
        increment_step.op.run() 
Example #17
Source File: rf3.py    From deep-learning with MIT License 5 votes vote down vote up
def _get_train_ops(self, features, targets):
    """Method that builds model graph and returns trainer ops.
    Args:
      features: `Tensor` or `dict` of `Tensor` objects.
      targets: `Tensor` or `dict` of `Tensor` objects.
    Returns:
      Tuple of train `Operation` and loss `Tensor`.
    """
    features, spec = data_ops.ParseDataTensorOrDict(features)
    labels = data_ops.ParseLabelTensorOrDict(targets)

    graph_builder = self.graph_builder_class(
        self.params, device_assigner=self.device_assigner,
        **self.construction_args)

    epoch = None
    if self.data_feeder:
      epoch = self.data_feeder.make_epoch_variable()

    train = control_flow_ops.group(
        graph_builder.training_graph(
            features, labels, data_spec=spec, epoch=epoch,
            **self.training_args),
        state_ops.assign_add(contrib_framework.get_global_step(), 1))

    self.training_loss = graph_builder.training_loss(features, targets)

    return train, self.training_loss 
Example #18
Source File: graph_builder.py    From object_detection_kitti with Apache License 2.0 5 votes vote down vote up
def _IncrementCounter(self, counter):
    return state_ops.assign_add(counter, 1, use_locking=True) 
Example #19
Source File: adamw.py    From keras_imagenet with MIT License 5 votes vote down vote up
def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]
        wd = self.wd * self.wd_normalizer  # decoupled weight decay (4/6)

        lr = self.lr
        if self.initial_decay > 0:
            lr = lr * (1. /
                       (1. + self.decay *
                             math_ops.cast(self.iterations,
                                          K.dtype(self.decay))))
        eta_t = lr / self.init_lr    # decoupled weight decay (5/6)

        with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):
            t = math_ops.cast(self.iterations, K.floatx())
        """Bias corrections according to the Adam paper."""
        lr_t = lr * (K.sqrt(1. - math_ops.pow(self.beta_2, t)) /
                     (1. - math_ops.pow(self.beta_1, t)))

        ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        self.weights = [self.iterations] + ms + vs

        for p, g, m, v in zip(params, grads, ms, vs):
            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
            v_t = (self.beta_2 * v) + (1. - self.beta_2) * math_ops.square(g)
            p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
            p_t -= eta_t * wd * p  # decoupled weight decay (6/6)

            self.updates.append(K.update(m, m_t))
            self.updates.append(K.update(v, v_t))
            new_p = p_t

            # Apply constraints.
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)

            self.updates.append(K.update(p, new_p))
        return self.updates 
Example #20
Source File: graph_builder.py    From hands-detection with MIT License 5 votes vote down vote up
def _IncrementCounter(self, counter):
    return state_ops.assign_add(counter, 1, use_locking=True) 
Example #21
Source File: session_debug_testlib.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def testGraphPathFindingReverseRefEdgeWorks(self):
    with session.Session(config=no_rewrite_session_config()) as sess:
      v = variables.Variable(10.0, name="v")
      delta = variables.Variable(1.0, name="delta")
      inc_v = state_ops.assign_add(v, delta, name="inc_v")

      sess.run(variables.global_variables_initializer())
      _, dump = self._debug_run_and_get_dump(sess, inc_v)

      self.assertEqual(
          ["delta", "delta/read", "inc_v", "v"],
          dump.find_some_path("delta", "v", include_reversed_ref=True))
      self.assertIsNone(dump.find_some_path("delta", "v")) 
Example #22
Source File: backend.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def update_add(x, increment):
  """Update the value of `x` by adding `increment`.

  Arguments:
      x: A Variable.
      increment: A tensor of same shape as `x`.

  Returns:
      The variable `x` updated.
  """
  return state_ops.assign_add(x, increment) 
Example #23
Source File: variables.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def assign_add(self, delta, use_locking=False):
    """Adds a value to this variable.

     This is essentially a shortcut for `assign_add(self, delta)`.

    Args:
      delta: A `Tensor`. The value to add to this variable.
      use_locking: If `True`, use locking during the operation.

    Returns:
      A `Tensor` that will hold the new value of this variable after
      the addition has completed.
    """
    return state_ops.assign_add(self._variable, delta, use_locking=use_locking) 
Example #24
Source File: training_util.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _increment_global_step(increment, graph=None):
  graph = graph or ops.get_default_graph()
  global_step_tensor = get_global_step(graph)
  if global_step_tensor is None:
    raise ValueError(
        'Global step tensor should be created by '
        'tf.train.get_or_create_global_step before calling increment.')
  global_step_read_tensor = _get_or_create_global_step_read(graph)
  with graph.as_default() as g, g.name_scope(None):
    with ops.control_dependencies([global_step_read_tensor]):
      return state_ops.assign_add(global_step_tensor, increment) 
Example #25
Source File: graph_builder.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def _IncrementCounter(self, counter):
    return state_ops.assign_add(counter, 1, use_locking=True) 
Example #26
Source File: dumping_wrapper_test.py    From keras-lambda with MIT License 5 votes vote down vote up
def setUp(self):
    self.session_root = tempfile.mkdtemp()

    self.v = variables.Variable(10.0, dtype=dtypes.float32, name="v")
    self.delta = constant_op.constant(1.0, dtype=dtypes.float32, name="delta")
    self.eta = constant_op.constant(-1.4, dtype=dtypes.float32, name="eta")
    self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")
    self.dec_v = state_ops.assign_add(self.v, self.eta, name="dec_v")

    self.ph = array_ops.placeholder(dtypes.float32, shape=(), name="ph")
    self.inc_w_ph = state_ops.assign_add(self.v, self.ph, name="inc_w_ph")

    self.sess = session.Session()
    self.sess.run(self.v.initializer) 
Example #27
Source File: variables.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def assign_add(self, delta, use_locking=False):
    """Adds a value to this variable.

     This is essentially a shortcut for `assign_add(self, delta)`.

    Args:
      delta: A `Tensor`. The value to add to this variable.
      use_locking: If `True`, use locking during the operation.

    Returns:
      A `Tensor` that will hold the new value of this variable after
      the addition has completed.
    """
    return state_ops.assign_add(self._variable, delta, use_locking=use_locking) 
Example #28
Source File: graph_builder.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def _IncrementCounter(self, counter):
    return state_ops.assign_add(counter, 1, use_locking=True) 
Example #29
Source File: qhadam.py    From qhoptim with MIT License 5 votes vote down vote up
def _apply_dense_shared(self, grad, var):
        beta1_weight, beta2_weight = self._get_beta_weights()

        learning_rate_tensor = math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype)
        beta1_tensor = math_ops.cast(self._beta1_tensor, var.dtype.base_dtype)
        beta2_tensor = math_ops.cast(self._beta2_tensor, var.dtype.base_dtype)
        nu1_tensor = math_ops.cast(self._nu1_tensor, var.dtype.base_dtype)
        nu2_tensor = math_ops.cast(self._nu2_tensor, var.dtype.base_dtype)
        epsilon_tensor = math_ops.cast(self._epsilon_tensor, var.dtype.base_dtype)

        beta1_weight = math_ops.cast(beta1_weight, var.dtype.base_dtype) * beta1_tensor + 1.0
        beta2_weight = math_ops.cast(beta2_weight, var.dtype.base_dtype) * beta2_tensor + 1.0

        beta1_adj = 1.0 - (1.0 / beta1_weight)
        beta2_adj = 1.0 - (1.0 / beta2_weight)

        exp_avg = self.get_slot(var, "exp_avg")
        exp_avg_sq = self.get_slot(var, "exp_avg_sq")

        grad_sq = grad * grad

        exp_avg_tensor = state_ops.assign(
            exp_avg, beta1_adj * exp_avg + (1.0 - beta1_adj) * grad, use_locking=self._use_locking
        )
        exp_avg_sq_tensor = state_ops.assign(
            exp_avg_sq, beta2_adj * exp_avg_sq + (1.0 - beta2_adj) * grad_sq, use_locking=self._use_locking
        )

        avg_grad_tensor = nu1_tensor * exp_avg_tensor + (1.0 - nu1_tensor) * grad
        avg_grad_sq_tensor = nu2_tensor * exp_avg_sq_tensor + (1.0 - nu2_tensor) * grad_sq
        avg_grad_rms_tensor = math_ops.sqrt(avg_grad_sq_tensor)

        var_update = state_ops.assign_add(
            var,
            -learning_rate_tensor * avg_grad_tensor / (avg_grad_rms_tensor + epsilon_tensor),
            use_locking=self._use_locking,
        )

        return control_flow_ops.group(*[var_update, exp_avg_tensor, exp_avg_sq_tensor]) 
Example #30
Source File: graph_builder.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def _IncrementCounter(self, counter):
    return state_ops.assign_add(counter, 1, use_locking=True)