Python tensorflow.contrib.framework.get_global_step() Examples

The following are 9 code examples of tensorflow.contrib.framework.get_global_step(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.contrib.framework , or try the search function .
Example #1
Source File: random_forest.py    From lambda-packs with MIT License 5 votes vote down vote up
def before_run(self, run_context):
    return session_run_hook.SessionRunArgs(
        {'global_step': contrib_framework.get_global_step(),
         'current_loss': run_context.session.graph.get_operation_by_name(
             LOSS_NAME).outputs[0]}) 
Example #2
Source File: random_forest.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def before_run(self, run_context):
    return session_run_hook.SessionRunArgs(
        {'global_step': contrib_framework.get_global_step(),
         'current_loss': run_context.session.graph.get_operation_by_name(
             LOSS_NAME).outputs[0]}) 
Example #3
Source File: random_forest.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def before_run(self, run_context):
    return session_run_hook.SessionRunArgs(
        {'global_step': contrib_framework.get_global_step(),
         'current_loss': run_context.session.graph.get_operation_by_name(
             LOSS_NAME).outputs[0]}) 
Example #4
Source File: dynamic_rnn_estimator.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _loss_to_train_op(self, loss):
    """Map `loss` to a training op."""
    with ops.name_scope('loss_to_train_op'):
      trainable_variables = ops.get_default_graph().get_collection(
          ops.GraphKeys.TRAINABLE_VARIABLES)
      global_step = contrib_framework.get_global_step()
      gradients = self._optimizer.compute_gradients(
          loss=loss, var_list=trainable_variables)
      processed_gradients = self._process_gradients(gradients)
      return self._optimizer.apply_gradients(
          processed_gradients, global_step=global_step) 
Example #5
Source File: rf3.py    From deep-learning with MIT License 5 votes vote down vote up
def _get_train_ops(self, features, targets):
    """Method that builds model graph and returns trainer ops.
    Args:
      features: `Tensor` or `dict` of `Tensor` objects.
      targets: `Tensor` or `dict` of `Tensor` objects.
    Returns:
      Tuple of train `Operation` and loss `Tensor`.
    """
    features, spec = data_ops.ParseDataTensorOrDict(features)
    labels = data_ops.ParseLabelTensorOrDict(targets)

    graph_builder = self.graph_builder_class(
        self.params, device_assigner=self.device_assigner,
        **self.construction_args)

    epoch = None
    if self.data_feeder:
      epoch = self.data_feeder.make_epoch_variable()

    train = control_flow_ops.group(
        graph_builder.training_graph(
            features, labels, data_spec=spec, epoch=epoch,
            **self.training_args),
        state_ops.assign_add(contrib_framework.get_global_step(), 1))

    self.training_loss = graph_builder.training_loss(features, targets)

    return train, self.training_loss 
Example #6
Source File: random_forest.py    From keras-lambda with MIT License 5 votes vote down vote up
def before_run(self, run_context):
    return session_run_hook.SessionRunArgs(
        {'global_step': contrib_framework.get_global_step(),
         'current_loss': run_context.session.graph.get_operation_by_name(
             LOSS_NAME).outputs[0]}) 
Example #7
Source File: random_forest.py    From auto-alt-text-lambda-api with MIT License 4 votes vote down vote up
def get_model_fn(params, graph_builder_class, device_assigner,
                 weights_name=None, keys_name=None, num_trainers=1,
                 trainer_id=0):
  """Return a model function given a way to construct a graph builder."""
  def _model_fn(features, labels):
    """Function that returns predictions, training loss, and training op."""
    weights = None
    keys = None
    if weights_name and weights_name in features:
      weights = features.pop(weights_name)
    if keys_name and keys_name in features:
      keys = features.pop(keys_name)

    graph_builder = graph_builder_class(params, device_assigner=device_assigner)
    inference = {
        eval_metrics.INFERENCE_PROB_NAME:
            graph_builder.inference_graph(features)
    }
    if not params.regression:
      inference[eval_metrics.INFERENCE_PRED_NAME] = math_ops.argmax(
          inference[eval_metrics.INFERENCE_PROB_NAME], 1)
    if keys:
      inference[KEYS_NAME] = keys

    # labels might be None if we're doing prediction (which brings up the
    # question of why we force everything to adhere to a single model_fn).
    training_loss = None
    training_graph = None
    if labels is not None:
      training_loss = graph_builder.training_loss(
          features, labels, name=LOSS_NAME)
      training_graph = control_flow_ops.group(
          graph_builder.training_graph(
              features, labels, input_weights=weights,
              num_trainers=num_trainers,
              trainer_id=trainer_id),
          state_ops.assign_add(contrib_framework.get_global_step(), 1))
    # Put weights back in
    if weights is not None:
      features[weights_name] = weights
    return (inference, training_loss, training_graph)
  return _model_fn 
Example #8
Source File: random_forest.py    From deep_image_model with Apache License 2.0 4 votes vote down vote up
def get_model_fn(params, graph_builder_class, device_assigner,
                 weights_name=None, keys_name=None):
  """Return a model function given a way to construct a graph builder."""
  def _model_fn(features, labels):
    """Function that returns predictions, training loss, and training op."""
    weights = None
    keys = None
    if weights_name and weights_name in features:
      weights = features.pop(weights_name)
    if keys_name and keys_name in features:
      keys = features.pop(keys_name)
    processed_features, spec = data_ops.ParseDataTensorOrDict(features)
    _assert_float32(processed_features)
    if labels is not None:
      labels = data_ops.ParseLabelTensorOrDict(labels)
      _assert_float32(labels)

    graph_builder = graph_builder_class(params, device_assigner=device_assigner)
    inference = {eval_metrics.INFERENCE_PROB_NAME:
                 graph_builder.inference_graph(processed_features,
                                               data_spec=spec)}
    if not params.regression:
      inference[eval_metrics.INFERENCE_PRED_NAME] = math_ops.argmax(
          inference[eval_metrics.INFERENCE_PROB_NAME], 1)
    if keys:
      inference[KEYS_NAME] = keys

    # labels might be None if we're doing prediction (which brings up the
    # question of why we force everything to adhere to a single model_fn).
    training_loss = None
    training_graph = None
    if labels is not None:
      training_loss = graph_builder.training_loss(processed_features, labels,
                                                  data_spec=spec,
                                                  name=LOSS_NAME)
      training_graph = control_flow_ops.group(
          graph_builder.training_graph(
              processed_features, labels, data_spec=spec,
              input_weights=weights),
          state_ops.assign_add(contrib_framework.get_global_step(), 1))
    # Put weights back in
    if weights is not None:
      features[weights_name] = weights
    return (inference, training_loss, training_graph)
  return _model_fn 
Example #9
Source File: random_forest.py    From keras-lambda with MIT License 4 votes vote down vote up
def get_model_fn(params, graph_builder_class, device_assigner,
                 weights_name=None, keys_name=None, num_trainers=1,
                 trainer_id=0):
  """Return a model function given a way to construct a graph builder."""
  def _model_fn(features, labels):
    """Function that returns predictions, training loss, and training op."""
    weights = None
    keys = None
    if weights_name and weights_name in features:
      weights = features.pop(weights_name)
    if keys_name and keys_name in features:
      keys = features.pop(keys_name)

    graph_builder = graph_builder_class(params, device_assigner=device_assigner)
    inference = {
        eval_metrics.INFERENCE_PROB_NAME:
            graph_builder.inference_graph(features)
    }
    if not params.regression:
      inference[eval_metrics.INFERENCE_PRED_NAME] = math_ops.argmax(
          inference[eval_metrics.INFERENCE_PROB_NAME], 1)
    if keys:
      inference[KEYS_NAME] = keys

    # labels might be None if we're doing prediction (which brings up the
    # question of why we force everything to adhere to a single model_fn).
    training_loss = None
    training_graph = None
    if labels is not None:
      training_loss = graph_builder.training_loss(
          features, labels, name=LOSS_NAME)
      training_graph = control_flow_ops.group(
          graph_builder.training_graph(
              features, labels, input_weights=weights,
              num_trainers=num_trainers,
              trainer_id=trainer_id),
          state_ops.assign_add(contrib_framework.get_global_step(), 1))
    # Put weights back in
    if weights is not None:
      features[weights_name] = weights
    return (inference, training_loss, training_graph)
  return _model_fn