Python tensorflow.python.platform.tf_logging.fatal() Examples

The following are 24 code examples of tensorflow.python.platform.tf_logging.fatal(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.platform.tf_logging , or try the search function .
Example #1
Source File: network_units.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def get_layer_size(self, layer_name):
    if layer_name == 'logits':
      return self._component.num_actions

    if layer_name == 'last_layer':
      return self._hidden_layer_sizes[-1]

    if not layer_name.startswith('layer_'):
      logging.fatal(
          'Invalid layer name: "%s" Can only retrieve from "logits", '
          '"last_layer", and "layer_*".',
          layer_name)

    # NOTE(danielandor): Since get_layer_size is called before the
    # model has been built, we compute the layer size directly from
    # the hyperparameters rather than from self._layers.
    layer_index = int(layer_name.split('_')[1])
    return self._hidden_layer_sizes[layer_index] 
Example #2
Source File: network_units.py    From multilabel-image-classification-tensorflow with MIT License 6 votes vote down vote up
def get_layer_size(self, layer_name):
    if layer_name == 'logits':
      return self._component.num_actions

    if layer_name == 'last_layer':
      return self._hidden_layer_sizes[-1]

    if not layer_name.startswith('layer_'):
      logging.fatal('Invalid layer name: "%s" Can only retrieve from "logits", '
                    '"last_layer", and "layer_*".', layer_name)

    # NOTE(danielandor): Since get_layer_size is called before the
    # model has been built, we compute the layer size directly from
    # the hyperparameters rather than from self._layers.
    layer_index = int(layer_name.split('_')[1])
    return self._hidden_layer_sizes[layer_index] 
Example #3
Source File: network_units.py    From g-tensorflow-models with Apache License 2.0 6 votes vote down vote up
def get_layer_size(self, layer_name):
    if layer_name == 'logits':
      return self._component.num_actions

    if layer_name == 'last_layer':
      return self._hidden_layer_sizes[-1]

    if not layer_name.startswith('layer_'):
      logging.fatal('Invalid layer name: "%s" Can only retrieve from "logits", '
                    '"last_layer", and "layer_*".', layer_name)

    # NOTE(danielandor): Since get_layer_size is called before the
    # model has been built, we compute the layer size directly from
    # the hyperparameters rather than from self._layers.
    layer_index = int(layer_name.split('_')[1])
    return self._hidden_layer_sizes[layer_index] 
Example #4
Source File: network_units.py    From HumanRecognition with MIT License 6 votes vote down vote up
def get_layer_size(self, layer_name):
    if layer_name == 'logits':
      return self._component.num_actions

    if layer_name == 'last_layer':
      return self._hidden_layer_sizes[-1]

    if not layer_name.startswith('layer_'):
      logging.fatal(
          'Invalid layer name: "%s" Can only retrieve from "logits", '
          '"last_layer", and "layer_*".',
          layer_name)

    # NOTE(danielandor): Since get_layer_size is called before the
    # model has been built, we compute the layer size directly from
    # the hyperparameters rather than from self._layers.
    layer_index = int(layer_name.split('_')[1])
    return self._hidden_layer_sizes[layer_index] 
Example #5
Source File: network_units.py    From object_detection_with_tensorflow with MIT License 6 votes vote down vote up
def get_layer_size(self, layer_name):
    if layer_name == 'logits':
      return self._component.num_actions

    if layer_name == 'last_layer':
      return self._hidden_layer_sizes[-1]

    if not layer_name.startswith('layer_'):
      logging.fatal('Invalid layer name: "%s" Can only retrieve from "logits", '
                    '"last_layer", and "layer_*".', layer_name)

    # NOTE(danielandor): Since get_layer_size is called before the
    # model has been built, we compute the layer size directly from
    # the hyperparameters rather than from self._layers.
    layer_index = int(layer_name.split('_')[1])
    return self._hidden_layer_sizes[layer_index] 
Example #6
Source File: network_units.py    From object_detection_kitti with Apache License 2.0 6 votes vote down vote up
def get_layer_size(self, layer_name):
    if layer_name == 'logits':
      return self._component.num_actions

    if layer_name == 'last_layer':
      return self._hidden_layer_sizes[-1]

    if not layer_name.startswith('layer_'):
      logging.fatal(
          'Invalid layer name: "%s" Can only retrieve from "logits", '
          '"last_layer", and "layer_*".',
          layer_name)

    # NOTE(danielandor): Since get_layer_size is called before the
    # model has been built, we compute the layer size directly from
    # the hyperparameters rather than from self._layers.
    layer_index = int(layer_name.split('_')[1])
    return self._hidden_layer_sizes[layer_index] 
Example #7
Source File: network_units.py    From hands-detection with MIT License 6 votes vote down vote up
def get_layer_size(self, layer_name):
    if layer_name == 'logits':
      return self._component.num_actions

    if layer_name == 'last_layer':
      return self._hidden_layer_sizes[-1]

    if not layer_name.startswith('layer_'):
      logging.fatal(
          'Invalid layer name: "%s" Can only retrieve from "logits", '
          '"last_layer", and "layer_*".',
          layer_name)

    # NOTE(danielandor): Since get_layer_size is called before the
    # model has been built, we compute the layer size directly from
    # the hyperparameters rather than from self._layers.
    layer_index = int(layer_name.split('_')[1])
    return self._hidden_layer_sizes[layer_index] 
Example #8
Source File: network_units.py    From Gun-Detector with Apache License 2.0 6 votes vote down vote up
def get_layer_size(self, layer_name):
    if layer_name == 'logits':
      return self._component.num_actions

    if layer_name == 'last_layer':
      return self._hidden_layer_sizes[-1]

    if not layer_name.startswith('layer_'):
      logging.fatal('Invalid layer name: "%s" Can only retrieve from "logits", '
                    '"last_layer", and "layer_*".', layer_name)

    # NOTE(danielandor): Since get_layer_size is called before the
    # model has been built, we compute the layer size directly from
    # the hyperparameters rather than from self._layers.
    layer_index = int(layer_name.split('_')[1])
    return self._hidden_layer_sizes[layer_index] 
Example #9
Source File: network_units.py    From yolo_v2 with Apache License 2.0 6 votes vote down vote up
def get_layer_size(self, layer_name):
    if layer_name == 'logits':
      return self._component.num_actions

    if layer_name == 'last_layer':
      return self._hidden_layer_sizes[-1]

    if not layer_name.startswith('layer_'):
      logging.fatal('Invalid layer name: "%s" Can only retrieve from "logits", '
                    '"last_layer", and "layer_*".', layer_name)

    # NOTE(danielandor): Since get_layer_size is called before the
    # model has been built, we compute the layer size directly from
    # the hyperparameters rather than from self._layers.
    layer_index = int(layer_name.split('_')[1])
    return self._hidden_layer_sizes[layer_index] 
Example #10
Source File: tf_should_use.py    From lambda-packs with MIT License 5 votes vote down vote up
def must_use_result_or_fatal(fn):
  """Function wrapper that ensures the function's output is used.

  If the output is not used, a `tf.logging.fatal` error is raised.

  An output is marked as used if any of its attributes are read, modified, or
  updated.  Examples when the output is a `Tensor` include:

  - Using it in any capacity (e.g. `y = t + 0`, `sess.run(t)`)
  - Accessing a property (e.g. getting `t.name` or `t.op`).

  Note, certain behaviors cannot be tracked - for these the object may not
  be marked as used.  Examples include:

  - `t != 0`.  In this case, comparison is done on types / ids.
  - `isinstance(t, tf.Tensor)`.  Similar to above.

  Args:
    fn: The function to wrap.

  Returns:
    The wrapped function.
  """
  def wrapped(*args, **kwargs):
    return _add_should_use_warning(fn(*args, **kwargs), fatal_error=True)
  return tf_decorator.make_decorator(
      fn, wrapped, 'must_use_result_or_fatal',
      ((fn.__doc__ or '') +
       ('\n\n  '
        '**NOTE** The output of this function must be used.  If it is not, '
        'a fatal error will be raised.  To mark the output as used, '
        'call its .mark_used() method.'))) 
Example #11
Source File: tf_should_use.py    From lambda-packs with MIT License 5 votes vote down vote up
def _deleted(obj_id, fatal_error):
  obj = _REF_INFO[obj_id]
  del _REF_INFO[obj_id]
  if not obj.object_used:
    if fatal_error:
      logger = tf_logging.fatal
    else:
      logger = tf_logging.error
    logger(
        '==================================\n'
        'Object was never used (type %s):\n%s\nIf you want to mark it as '
        'used call its "mark_used()" method.\nIt was originally created '
        'here:\n%s\n'
        '==================================' %
        (obj.type_, obj.repr_, obj.creation_stack)) 
Example #12
Source File: graph_builder.py    From Gun-Detector with Apache License 2.0 4 votes vote down vote up
def _create_optimizer(hyperparams, learning_rate_var, step_var=None):
  """Creates an optimizer object for a given spec, learning rate and step var.

  Args:
    hyperparams: a GridPoint proto containing optimizer spec, particularly
      learning_method to determine optimizer class to use.
    learning_rate_var: a `tf.Tensor`, the learning rate.
    step_var: a `tf.Variable`, global training step.

  Returns:
    a `tf.train.Optimizer` object that was built.
  """
  if hyperparams.learning_method == 'gradient_descent':
    return tf.train.GradientDescentOptimizer(
        learning_rate_var, use_locking=True)
  elif hyperparams.learning_method == 'adam':
    return tf.train.AdamOptimizer(
        learning_rate_var,
        beta1=hyperparams.adam_beta1,
        beta2=hyperparams.adam_beta2,
        epsilon=hyperparams.adam_eps,
        use_locking=True)
  elif hyperparams.learning_method == 'lazyadam':
    return tf.contrib.opt.LazyAdamOptimizer(
        learning_rate_var,
        beta1=hyperparams.adam_beta1,
        beta2=hyperparams.adam_beta2,
        epsilon=hyperparams.adam_eps,
        use_locking=True)
  elif hyperparams.learning_method == 'momentum':
    return tf.train.MomentumOptimizer(
        learning_rate_var, hyperparams.momentum, use_locking=True)
  elif hyperparams.learning_method == 'composite':
    spec = hyperparams.composite_optimizer_spec
    optimizer1 = _create_optimizer(spec.method1, learning_rate_var, step_var)
    optimizer2 = _create_optimizer(spec.method2, learning_rate_var, step_var)
    if step_var is None:
      logging.fatal('step_var is required for CompositeOptimizer')
    switch = tf.less(step_var, spec.switch_after_steps)
    return composite_optimizer.CompositeOptimizer(
        optimizer1, optimizer2, switch, use_locking=True)
  else:
    logging.fatal('Unknown learning method (optimizer)') 
Example #13
Source File: export.py    From deep_image_model with Apache License 2.0 4 votes vote down vote up
def logistic_regression_signature_fn(examples, unused_features, predictions):
  """Creates logistic regression signature from given examples and predictions.

  Args:
    examples: `Tensor`.
    unused_features: `dict` of `Tensor`s.
    predictions: `Tensor` of shape [batch_size, 2] of predicted probabilities or
      dict that contains the probabilities tensor as in
      {'probabilities', `Tensor`}.

  Returns:
    Tuple of default regression signature and named signature.

  Raises:
    ValueError: If examples is `None`.
  """
  if examples is None:
    raise ValueError('examples cannot be None when using this signature fn.')

  if isinstance(predictions, dict):
    predictions_tensor = predictions['probabilities']
  else:
    predictions_tensor = predictions
  # predictions should have shape [batch_size, 2] where first column is P(Y=0|x)
  # while second column is P(Y=1|x). We are only interested in the second
  # column for inference.
  predictions_shape = predictions_tensor.get_shape()
  predictions_rank = len(predictions_shape)
  if predictions_rank != 2:
    logging.fatal(
        'Expected predictions to have rank 2, but received predictions with '
        'rank: {} and shape: {}'.format(predictions_rank, predictions_shape))
  if predictions_shape[1] != 2:
    logging.fatal(
        'Expected predictions to have 2nd dimension: 2, but received '
        'predictions with 2nd dimension: {} and shape: {}. Did you mean to use '
        'regression_signature_fn or classification_signature_fn_with_prob '
        'instead?'.format(predictions_shape[1], predictions_shape))

  positive_predictions = predictions_tensor[:, 1]
  default_signature = exporter.regression_signature(
      input_tensor=examples, output_tensor=positive_predictions)
  return default_signature, {}


# pylint: disable=protected-access 
Example #14
Source File: graph_builder.py    From hands-detection with MIT License 4 votes vote down vote up
def _create_optimizer(hyperparams, learning_rate_var, step_var=None):
  """Creates an optimizer object for a given spec, learning rate and step var.

  Args:
    hyperparams: a GridPoint proto containing optimizer spec, particularly
      learning_method to determine optimizer class to use.
    learning_rate_var: a `tf.Tensor`, the learning rate.
    step_var: a `tf.Variable`, global training step.

  Returns:
    a `tf.train.Optimizer` object that was built.
  """
  if hyperparams.learning_method == 'gradient_descent':
    return tf.train.GradientDescentOptimizer(
        learning_rate_var, use_locking=True)
  elif hyperparams.learning_method == 'adam':
    return tf.train.AdamOptimizer(
        learning_rate_var,
        beta1=hyperparams.adam_beta1,
        beta2=hyperparams.adam_beta2,
        epsilon=hyperparams.adam_eps,
        use_locking=True)
  elif hyperparams.learning_method == 'lazyadam':
    return tf.contrib.opt.LazyAdamOptimizer(
        learning_rate_var,
        beta1=hyperparams.adam_beta1,
        beta2=hyperparams.adam_beta2,
        epsilon=hyperparams.adam_eps,
        use_locking=True)
  elif hyperparams.learning_method == 'momentum':
    return tf.train.MomentumOptimizer(
        learning_rate_var, hyperparams.momentum, use_locking=True)
  elif hyperparams.learning_method == 'composite':
    spec = hyperparams.composite_optimizer_spec
    optimizer1 = _create_optimizer(spec.method1, learning_rate_var, step_var)
    optimizer2 = _create_optimizer(spec.method2, learning_rate_var, step_var)
    if step_var is None:
      logging.fatal('step_var is required for CompositeOptimizer')
    switch = tf.less(step_var, spec.switch_after_steps)
    return composite_optimizer.CompositeOptimizer(
        optimizer1, optimizer2, switch, use_locking=True)
  else:
    logging.fatal('Unknown learning method (optimizer)') 
Example #15
Source File: graph_builder.py    From yolo_v2 with Apache License 2.0 4 votes vote down vote up
def _create_optimizer(hyperparams, learning_rate_var, step_var=None):
  """Creates an optimizer object for a given spec, learning rate and step var.

  Args:
    hyperparams: a GridPoint proto containing optimizer spec, particularly
      learning_method to determine optimizer class to use.
    learning_rate_var: a `tf.Tensor`, the learning rate.
    step_var: a `tf.Variable`, global training step.

  Returns:
    a `tf.train.Optimizer` object that was built.
  """
  if hyperparams.learning_method == 'gradient_descent':
    return tf.train.GradientDescentOptimizer(
        learning_rate_var, use_locking=True)
  elif hyperparams.learning_method == 'adam':
    return tf.train.AdamOptimizer(
        learning_rate_var,
        beta1=hyperparams.adam_beta1,
        beta2=hyperparams.adam_beta2,
        epsilon=hyperparams.adam_eps,
        use_locking=True)
  elif hyperparams.learning_method == 'lazyadam':
    return tf.contrib.opt.LazyAdamOptimizer(
        learning_rate_var,
        beta1=hyperparams.adam_beta1,
        beta2=hyperparams.adam_beta2,
        epsilon=hyperparams.adam_eps,
        use_locking=True)
  elif hyperparams.learning_method == 'momentum':
    return tf.train.MomentumOptimizer(
        learning_rate_var, hyperparams.momentum, use_locking=True)
  elif hyperparams.learning_method == 'composite':
    spec = hyperparams.composite_optimizer_spec
    optimizer1 = _create_optimizer(spec.method1, learning_rate_var, step_var)
    optimizer2 = _create_optimizer(spec.method2, learning_rate_var, step_var)
    if step_var is None:
      logging.fatal('step_var is required for CompositeOptimizer')
    switch = tf.less(step_var, spec.switch_after_steps)
    return composite_optimizer.CompositeOptimizer(
        optimizer1, optimizer2, switch, use_locking=True)
  else:
    logging.fatal('Unknown learning method (optimizer)') 
Example #16
Source File: graph_builder.py    From object_detection_kitti with Apache License 2.0 4 votes vote down vote up
def _create_optimizer(hyperparams, learning_rate_var, step_var=None):
  """Creates an optimizer object for a given spec, learning rate and step var.

  Args:
    hyperparams: a GridPoint proto containing optimizer spec, particularly
      learning_method to determine optimizer class to use.
    learning_rate_var: a `tf.Tensor`, the learning rate.
    step_var: a `tf.Variable`, global training step.

  Returns:
    a `tf.train.Optimizer` object that was built.
  """
  if hyperparams.learning_method == 'gradient_descent':
    return tf.train.GradientDescentOptimizer(
        learning_rate_var, use_locking=True)
  elif hyperparams.learning_method == 'adam':
    return tf.train.AdamOptimizer(
        learning_rate_var,
        beta1=hyperparams.adam_beta1,
        beta2=hyperparams.adam_beta2,
        epsilon=hyperparams.adam_eps,
        use_locking=True)
  elif hyperparams.learning_method == 'lazyadam':
    return tf.contrib.opt.LazyAdamOptimizer(
        learning_rate_var,
        beta1=hyperparams.adam_beta1,
        beta2=hyperparams.adam_beta2,
        epsilon=hyperparams.adam_eps,
        use_locking=True)
  elif hyperparams.learning_method == 'momentum':
    return tf.train.MomentumOptimizer(
        learning_rate_var, hyperparams.momentum, use_locking=True)
  elif hyperparams.learning_method == 'composite':
    spec = hyperparams.composite_optimizer_spec
    optimizer1 = _create_optimizer(spec.method1, learning_rate_var, step_var)
    optimizer2 = _create_optimizer(spec.method2, learning_rate_var, step_var)
    if step_var is None:
      logging.fatal('step_var is required for CompositeOptimizer')
    switch = tf.less(step_var, spec.switch_after_steps)
    return composite_optimizer.CompositeOptimizer(
        optimizer1, optimizer2, switch, use_locking=True)
  else:
    logging.fatal('Unknown learning method (optimizer)') 
Example #17
Source File: export.py    From auto-alt-text-lambda-api with MIT License 4 votes vote down vote up
def logistic_regression_signature_fn(examples, unused_features, predictions):
  """Creates logistic regression signature from given examples and predictions.

  Args:
    examples: `Tensor`.
    unused_features: `dict` of `Tensor`s.
    predictions: `Tensor` of shape [batch_size, 2] of predicted probabilities or
      dict that contains the probabilities tensor as in
      {'probabilities', `Tensor`}.

  Returns:
    Tuple of default regression signature and named signature.

  Raises:
    ValueError: If examples is `None`.
  """
  if examples is None:
    raise ValueError('examples cannot be None when using this signature fn.')

  if isinstance(predictions, dict):
    predictions_tensor = predictions['probabilities']
  else:
    predictions_tensor = predictions
  # predictions should have shape [batch_size, 2] where first column is P(Y=0|x)
  # while second column is P(Y=1|x). We are only interested in the second
  # column for inference.
  predictions_shape = predictions_tensor.get_shape()
  predictions_rank = len(predictions_shape)
  if predictions_rank != 2:
    logging.fatal(
        'Expected predictions to have rank 2, but received predictions with '
        'rank: {} and shape: {}'.format(predictions_rank, predictions_shape))
  if predictions_shape[1] != 2:
    logging.fatal(
        'Expected predictions to have 2nd dimension: 2, but received '
        'predictions with 2nd dimension: {} and shape: {}. Did you mean to use '
        'regression_signature_fn or classification_signature_fn_with_prob '
        'instead?'.format(predictions_shape[1], predictions_shape))

  positive_predictions = predictions_tensor[:, 1]
  default_signature = exporter.regression_signature(
      input_tensor=examples, output_tensor=positive_predictions)
  return default_signature, {}


# pylint: disable=protected-access 
Example #18
Source File: graph_builder.py    From object_detection_with_tensorflow with MIT License 4 votes vote down vote up
def _create_optimizer(hyperparams, learning_rate_var, step_var=None):
  """Creates an optimizer object for a given spec, learning rate and step var.

  Args:
    hyperparams: a GridPoint proto containing optimizer spec, particularly
      learning_method to determine optimizer class to use.
    learning_rate_var: a `tf.Tensor`, the learning rate.
    step_var: a `tf.Variable`, global training step.

  Returns:
    a `tf.train.Optimizer` object that was built.
  """
  if hyperparams.learning_method == 'gradient_descent':
    return tf.train.GradientDescentOptimizer(
        learning_rate_var, use_locking=True)
  elif hyperparams.learning_method == 'adam':
    return tf.train.AdamOptimizer(
        learning_rate_var,
        beta1=hyperparams.adam_beta1,
        beta2=hyperparams.adam_beta2,
        epsilon=hyperparams.adam_eps,
        use_locking=True)
  elif hyperparams.learning_method == 'lazyadam':
    return tf.contrib.opt.LazyAdamOptimizer(
        learning_rate_var,
        beta1=hyperparams.adam_beta1,
        beta2=hyperparams.adam_beta2,
        epsilon=hyperparams.adam_eps,
        use_locking=True)
  elif hyperparams.learning_method == 'momentum':
    return tf.train.MomentumOptimizer(
        learning_rate_var, hyperparams.momentum, use_locking=True)
  elif hyperparams.learning_method == 'composite':
    spec = hyperparams.composite_optimizer_spec
    optimizer1 = _create_optimizer(spec.method1, learning_rate_var, step_var)
    optimizer2 = _create_optimizer(spec.method2, learning_rate_var, step_var)
    if step_var is None:
      logging.fatal('step_var is required for CompositeOptimizer')
    switch = tf.less(step_var, spec.switch_after_steps)
    return composite_optimizer.CompositeOptimizer(
        optimizer1, optimizer2, switch, use_locking=True)
  else:
    logging.fatal('Unknown learning method (optimizer)') 
Example #19
Source File: export.py    From lambda-packs with MIT License 4 votes vote down vote up
def logistic_regression_signature_fn(examples, unused_features, predictions):
  """Creates logistic regression signature from given examples and predictions.

  Args:
    examples: `Tensor`.
    unused_features: `dict` of `Tensor`s.
    predictions: `Tensor` of shape [batch_size, 2] of predicted probabilities or
      dict that contains the probabilities tensor as in
      {'probabilities', `Tensor`}.

  Returns:
    Tuple of default regression signature and named signature.

  Raises:
    ValueError: If examples is `None`.
  """
  if examples is None:
    raise ValueError('examples cannot be None when using this signature fn.')

  if isinstance(predictions, dict):
    predictions_tensor = predictions['probabilities']
  else:
    predictions_tensor = predictions
  # predictions should have shape [batch_size, 2] where first column is P(Y=0|x)
  # while second column is P(Y=1|x). We are only interested in the second
  # column for inference.
  predictions_shape = predictions_tensor.get_shape()
  predictions_rank = len(predictions_shape)
  if predictions_rank != 2:
    logging.fatal(
        'Expected predictions to have rank 2, but received predictions with '
        'rank: {} and shape: {}'.format(predictions_rank, predictions_shape))
  if predictions_shape[1] != 2:
    logging.fatal(
        'Expected predictions to have 2nd dimension: 2, but received '
        'predictions with 2nd dimension: {} and shape: {}. Did you mean to use '
        'regression_signature_fn or classification_signature_fn_with_prob '
        'instead?'.format(predictions_shape[1], predictions_shape))

  positive_predictions = predictions_tensor[:, 1]
  default_signature = exporter.regression_signature(
      input_tensor=examples, output_tensor=positive_predictions)
  return default_signature, {}


# pylint: disable=protected-access 
Example #20
Source File: graph_builder.py    From HumanRecognition with MIT License 4 votes vote down vote up
def _create_optimizer(hyperparams, learning_rate_var, step_var=None):
  """Creates an optimizer object for a given spec, learning rate and step var.

  Args:
    hyperparams: a GridPoint proto containing optimizer spec, particularly
      learning_method to determine optimizer class to use.
    learning_rate_var: a `tf.Tensor`, the learning rate.
    step_var: a `tf.Variable`, global training step.

  Returns:
    a `tf.train.Optimizer` object that was built.
  """
  if hyperparams.learning_method == 'gradient_descent':
    return tf.train.GradientDescentOptimizer(
        learning_rate_var, use_locking=True)
  elif hyperparams.learning_method == 'adam':
    return tf.train.AdamOptimizer(
        learning_rate_var,
        beta1=hyperparams.adam_beta1,
        beta2=hyperparams.adam_beta2,
        epsilon=hyperparams.adam_eps,
        use_locking=True)
  elif hyperparams.learning_method == 'lazyadam':
    return tf.contrib.opt.LazyAdamOptimizer(
        learning_rate_var,
        beta1=hyperparams.adam_beta1,
        beta2=hyperparams.adam_beta2,
        epsilon=hyperparams.adam_eps,
        use_locking=True)
  elif hyperparams.learning_method == 'momentum':
    return tf.train.MomentumOptimizer(
        learning_rate_var, hyperparams.momentum, use_locking=True)
  elif hyperparams.learning_method == 'composite':
    spec = hyperparams.composite_optimizer_spec
    optimizer1 = _create_optimizer(spec.method1, learning_rate_var, step_var)
    optimizer2 = _create_optimizer(spec.method2, learning_rate_var, step_var)
    if step_var is None:
      logging.fatal('step_var is required for CompositeOptimizer')
    switch = tf.less(step_var, spec.switch_after_steps)
    return composite_optimizer.CompositeOptimizer(
        optimizer1, optimizer2, switch, use_locking=True)
  else:
    logging.fatal('Unknown learning method (optimizer)') 
Example #21
Source File: graph_builder.py    From g-tensorflow-models with Apache License 2.0 4 votes vote down vote up
def _create_optimizer(hyperparams, learning_rate_var, step_var=None):
  """Creates an optimizer object for a given spec, learning rate and step var.

  Args:
    hyperparams: a GridPoint proto containing optimizer spec, particularly
      learning_method to determine optimizer class to use.
    learning_rate_var: a `tf.Tensor`, the learning rate.
    step_var: a `tf.Variable`, global training step.

  Returns:
    a `tf.train.Optimizer` object that was built.
  """
  if hyperparams.learning_method == 'gradient_descent':
    return tf.train.GradientDescentOptimizer(
        learning_rate_var, use_locking=True)
  elif hyperparams.learning_method == 'adam':
    return tf.train.AdamOptimizer(
        learning_rate_var,
        beta1=hyperparams.adam_beta1,
        beta2=hyperparams.adam_beta2,
        epsilon=hyperparams.adam_eps,
        use_locking=True)
  elif hyperparams.learning_method == 'lazyadam':
    return tf.contrib.opt.LazyAdamOptimizer(
        learning_rate_var,
        beta1=hyperparams.adam_beta1,
        beta2=hyperparams.adam_beta2,
        epsilon=hyperparams.adam_eps,
        use_locking=True)
  elif hyperparams.learning_method == 'momentum':
    return tf.train.MomentumOptimizer(
        learning_rate_var, hyperparams.momentum, use_locking=True)
  elif hyperparams.learning_method == 'composite':
    spec = hyperparams.composite_optimizer_spec
    optimizer1 = _create_optimizer(spec.method1, learning_rate_var, step_var)
    optimizer2 = _create_optimizer(spec.method2, learning_rate_var, step_var)
    if step_var is None:
      logging.fatal('step_var is required for CompositeOptimizer')
    switch = tf.less(step_var, spec.switch_after_steps)
    return composite_optimizer.CompositeOptimizer(
        optimizer1, optimizer2, switch, use_locking=True)
  else:
    logging.fatal('Unknown learning method (optimizer)') 
Example #22
Source File: graph_builder.py    From multilabel-image-classification-tensorflow with MIT License 4 votes vote down vote up
def _create_optimizer(hyperparams, learning_rate_var, step_var=None):
  """Creates an optimizer object for a given spec, learning rate and step var.

  Args:
    hyperparams: a GridPoint proto containing optimizer spec, particularly
      learning_method to determine optimizer class to use.
    learning_rate_var: a `tf.Tensor`, the learning rate.
    step_var: a `tf.Variable`, global training step.

  Returns:
    a `tf.train.Optimizer` object that was built.
  """
  if hyperparams.learning_method == 'gradient_descent':
    return tf.train.GradientDescentOptimizer(
        learning_rate_var, use_locking=True)
  elif hyperparams.learning_method == 'adam':
    return tf.train.AdamOptimizer(
        learning_rate_var,
        beta1=hyperparams.adam_beta1,
        beta2=hyperparams.adam_beta2,
        epsilon=hyperparams.adam_eps,
        use_locking=True)
  elif hyperparams.learning_method == 'lazyadam':
    return tf.contrib.opt.LazyAdamOptimizer(
        learning_rate_var,
        beta1=hyperparams.adam_beta1,
        beta2=hyperparams.adam_beta2,
        epsilon=hyperparams.adam_eps,
        use_locking=True)
  elif hyperparams.learning_method == 'momentum':
    return tf.train.MomentumOptimizer(
        learning_rate_var, hyperparams.momentum, use_locking=True)
  elif hyperparams.learning_method == 'composite':
    spec = hyperparams.composite_optimizer_spec
    optimizer1 = _create_optimizer(spec.method1, learning_rate_var, step_var)
    optimizer2 = _create_optimizer(spec.method2, learning_rate_var, step_var)
    if step_var is None:
      logging.fatal('step_var is required for CompositeOptimizer')
    switch = tf.less(step_var, spec.switch_after_steps)
    return composite_optimizer.CompositeOptimizer(
        optimizer1, optimizer2, switch, use_locking=True)
  else:
    logging.fatal('Unknown learning method (optimizer)') 
Example #23
Source File: graph_builder.py    From DOTA_models with Apache License 2.0 4 votes vote down vote up
def _create_optimizer(hyperparams, learning_rate_var, step_var=None):
  """Creates an optimizer object for a given spec, learning rate and step var.

  Args:
    hyperparams: a GridPoint proto containing optimizer spec, particularly
      learning_method to determine optimizer class to use.
    learning_rate_var: a `tf.Tensor`, the learning rate.
    step_var: a `tf.Variable`, global training step.

  Returns:
    a `tf.train.Optimizer` object that was built.
  """
  if hyperparams.learning_method == 'gradient_descent':
    return tf.train.GradientDescentOptimizer(
        learning_rate_var, use_locking=True)
  elif hyperparams.learning_method == 'adam':
    return tf.train.AdamOptimizer(
        learning_rate_var,
        beta1=hyperparams.adam_beta1,
        beta2=hyperparams.adam_beta2,
        epsilon=hyperparams.adam_eps,
        use_locking=True)
  elif hyperparams.learning_method == 'lazyadam':
    return tf.contrib.opt.LazyAdamOptimizer(
        learning_rate_var,
        beta1=hyperparams.adam_beta1,
        beta2=hyperparams.adam_beta2,
        epsilon=hyperparams.adam_eps,
        use_locking=True)
  elif hyperparams.learning_method == 'momentum':
    return tf.train.MomentumOptimizer(
        learning_rate_var, hyperparams.momentum, use_locking=True)
  elif hyperparams.learning_method == 'composite':
    spec = hyperparams.composite_optimizer_spec
    optimizer1 = _create_optimizer(spec.method1, learning_rate_var, step_var)
    optimizer2 = _create_optimizer(spec.method2, learning_rate_var, step_var)
    if step_var is None:
      logging.fatal('step_var is required for CompositeOptimizer')
    switch = tf.less(step_var, spec.switch_after_steps)
    return composite_optimizer.CompositeOptimizer(
        optimizer1, optimizer2, switch, use_locking=True)
  else:
    logging.fatal('Unknown learning method (optimizer)') 
Example #24
Source File: export.py    From keras-lambda with MIT License 4 votes vote down vote up
def logistic_regression_signature_fn(examples, unused_features, predictions):
  """Creates logistic regression signature from given examples and predictions.

  Args:
    examples: `Tensor`.
    unused_features: `dict` of `Tensor`s.
    predictions: `Tensor` of shape [batch_size, 2] of predicted probabilities or
      dict that contains the probabilities tensor as in
      {'probabilities', `Tensor`}.

  Returns:
    Tuple of default regression signature and named signature.

  Raises:
    ValueError: If examples is `None`.
  """
  if examples is None:
    raise ValueError('examples cannot be None when using this signature fn.')

  if isinstance(predictions, dict):
    predictions_tensor = predictions['probabilities']
  else:
    predictions_tensor = predictions
  # predictions should have shape [batch_size, 2] where first column is P(Y=0|x)
  # while second column is P(Y=1|x). We are only interested in the second
  # column for inference.
  predictions_shape = predictions_tensor.get_shape()
  predictions_rank = len(predictions_shape)
  if predictions_rank != 2:
    logging.fatal(
        'Expected predictions to have rank 2, but received predictions with '
        'rank: {} and shape: {}'.format(predictions_rank, predictions_shape))
  if predictions_shape[1] != 2:
    logging.fatal(
        'Expected predictions to have 2nd dimension: 2, but received '
        'predictions with 2nd dimension: {} and shape: {}. Did you mean to use '
        'regression_signature_fn or classification_signature_fn_with_prob '
        'instead?'.format(predictions_shape[1], predictions_shape))

  positive_predictions = predictions_tensor[:, 1]
  default_signature = exporter.regression_signature(
      input_tensor=examples, output_tensor=positive_predictions)
  return default_signature, {}


# pylint: disable=protected-access