Python tensorflow.python.training.basic_session_run_hooks.StopAtStepHook() Examples

The following are 9 code examples of tensorflow.python.training.basic_session_run_hooks.StopAtStepHook(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.training.basic_session_run_hooks , or try the search function .
Example #1
Source File: evaluation_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def _train_model(self, checkpoint_dir, num_steps):
    """Trains a simple classification model.

    Note that the data has been configured such that after around 300 steps,
    the model has memorized the dataset (e.g. we can expect %100 accuracy).

    Args:
      checkpoint_dir: The directory where the checkpoint is written to.
      num_steps: The number of steps to train for.
    """
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = logistic_classifier(tf_inputs)
      loss = loss_ops.log_loss(tf_labels, tf_predictions)

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
      train_op = training.create_train_op(loss, optimizer)

      loss = training.train(
          train_op,
          checkpoint_dir,
          hooks=[basic_session_run_hooks.StopAtStepHook(num_steps)]) 
Example #2
Source File: training_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = batchnorm_classifier(tf_inputs)
      losses.log_loss(tf_labels, tf_predictions)
      total_loss = losses.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = training.create_train_op(total_loss, optimizer)

      loss = training.train(
          train_op,
          None,
          hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
          save_summaries_steps=None,
          save_checkpoint_secs=None)
      self.assertLess(loss, .1) 
Example #3
Source File: training_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def testCanAchieveZeroLoss(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = logistic_classifier(tf_inputs)
      losses.log_loss(tf_labels, tf_predictions)
      total_loss = losses.get_total_loss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
      train_op = training.create_train_op(total_loss, optimizer)

      loss = training.train(
          train_op,
          None,
          hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
          save_summaries_steps=None,
          save_checkpoint_secs=None)
      self.assertIsNotNone(loss)
      self.assertLess(loss, .015) 
Example #4
Source File: training_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def testTrainWithLocalVariable(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      local_multiplier = variables_lib.local_variable(1.0)

      tf_predictions = logistic_classifier(tf_inputs) * local_multiplier
      losses.log_loss(tf_labels, tf_predictions)
      total_loss = losses.get_total_loss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
      train_op = training.create_train_op(total_loss, optimizer)

      loss = training.train(
          train_op,
          None,
          hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
          save_summaries_steps=None,
          save_checkpoint_secs=None)
      self.assertIsNotNone(loss)
      self.assertLess(loss, .015) 
Example #5
Source File: estimator.py    From lambda-packs with MIT License 5 votes vote down vote up
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
          monitors=None, max_steps=None):
    # pylint: disable=g-doc-args,g-doc-return-or-yield
    """See `Trainable`.

    Raises:
      ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
      ValueError: If both `steps` and `max_steps` are not `None`.
    """
    if (steps is not None) and (max_steps is not None):
      raise ValueError('Can not provide both steps and max_steps.')
    _verify_input_args(x, y, input_fn, None, batch_size)
    if x is not None:
      SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
      return self

    if max_steps is not None:
      try:
        start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
        if max_steps <= start_step:
          logging.info('Skipping training since max_steps has already saved.')
          return self
      except:  # pylint: disable=bare-except
        pass

    hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
    if steps is not None or max_steps is not None:
      hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))

    loss = self._train_model(input_fn=input_fn, hooks=hooks)
    logging.info('Loss for final step: %s.', loss)
    return self 
Example #6
Source File: estimator.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
          monitors=None, max_steps=None):
    # pylint: disable=g-doc-args,g-doc-return-or-yield
    """See `Trainable`.

    Raises:
      ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
      ValueError: If both `steps` and `max_steps` are not `None`.
    """
    if (steps is not None) and (max_steps is not None):
      raise ValueError('Can not provide both steps and max_steps.')
    _verify_input_args(x, y, input_fn, None, batch_size)
    if x is not None:
      SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
      return self

    if max_steps is not None:
      try:
        start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
        if max_steps <= start_step:
          logging.info('Skipping training since max_steps has already saved.')
          return self
      except:  # pylint: disable=bare-except
        pass

    hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
    if steps is not None or max_steps is not None:
      hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))

    loss = self._train_model(input_fn=input_fn, hooks=hooks)
    logging.info('Loss for final step: %s.', loss)
    return self 
Example #7
Source File: evaluation_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def _train_model(self, checkpoint_dir, num_steps):
    """Trains a simple classification model.

    Note that the data has been configured such that after around 300 steps,
    the model has memorized the dataset (e.g. we can expect %100 accuracy).

    Args:
      checkpoint_dir: The directory where the checkpoint is written to.
      num_steps: The number of steps to train for.
    """
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = logistic_classifier(tf_inputs)
      loss = loss_ops.log_loss(tf_labels, tf_predictions)

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
      train_op = training.create_train_op(loss, optimizer)

      loss = training.train(
          train_op,
          checkpoint_dir,
          hooks=[basic_session_run_hooks.StopAtStepHook(num_steps)])

      if num_steps >= 300:
        assert loss < .015 
Example #8
Source File: estimator.py    From keras-lambda with MIT License 5 votes vote down vote up
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
          monitors=None, max_steps=None):
    # pylint: disable=g-doc-args,g-doc-return-or-yield
    """See `Trainable`.

    Raises:
      ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
      ValueError: If both `steps` and `max_steps` are not `None`.
    """
    if (steps is not None) and (max_steps is not None):
      raise ValueError('Can not provide both steps and max_steps.')
    _verify_input_args(x, y, input_fn, None, batch_size)
    if x is not None:
      SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
      return self

    if max_steps is not None:
      try:
        start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
        if max_steps <= start_step:
          logging.info('Skipping training since max_steps has already saved.')
          return self
      except:  # pylint: disable=bare-except
        pass

    hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
    if steps is not None or max_steps is not None:
      hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))

    loss = self._train_model(input_fn=input_fn, hooks=hooks)
    logging.info('Loss for final step: %s.', loss)
    return self 
Example #9
Source File: training_test.py    From tf-slim with Apache License 2.0 4 votes vote down vote up
def testTrainWithAlteredGradients(self):
    # Use the same learning rate but different gradient multipliers
    # to train two models. Model with equivalently larger learning
    # rate (i.e., learning_rate * gradient_multiplier) has smaller
    # training loss.
    multipliers = [1., 1000.]
    number_of_steps = 10
    learning_rate = 0.001

    # First, train the model with equivalently smaller learning rate.
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      train_op = self.create_train_op(
          learning_rate=learning_rate, gradient_multiplier=multipliers[0])

      loss0 = training.train(
          train_op,
          None,
          hooks=[
              basic_session_run_hooks.StopAtStepHook(num_steps=number_of_steps),
          ],
          save_checkpoint_secs=None,
          save_summaries_steps=None)
      self.assertIsNotNone(loss0)
      self.assertGreater(loss0, .5)

    # Second, train the model with equivalently larger learning rate.
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      train_op = self.create_train_op(
          learning_rate=learning_rate, gradient_multiplier=multipliers[1])

      loss1 = training.train(
          train_op,
          None,
          hooks=[
              basic_session_run_hooks.StopAtStepHook(num_steps=number_of_steps),
          ],
          save_checkpoint_secs=None,
          save_summaries_steps=None)
      self.assertIsNotNone(loss1)
      self.assertLess(loss1, .5)

    # The loss of the model trained with larger learning rate should
    # be smaller.
    self.assertGreater(loss0, loss1)