Python tensorflow.python.training.optimizer.Optimizer() Examples

The following are 30 code examples of tensorflow.python.training.optimizer.Optimizer(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.training.optimizer , or try the search function .
Example #1
Source File: composite_optimizer.py    From object_detection_with_tensorflow with MIT License 6 votes vote down vote up
def __init__(self,
               optimizer1,
               optimizer2,
               switch,
               use_locking=False,
               name="Composite"):
    """Construct a new Composite optimizer.

    Args:
      optimizer1: A tf.python.training.optimizer.Optimizer object.
      optimizer2: A tf.python.training.optimizer.Optimizer object.
      switch: A tf.bool Tensor, selecting whether to use the first or the second
        optimizer.
      use_locking: Bool. If True apply use locks to prevent concurrent updates
        to variables.
      name: Optional name prefix for the operations created when applying
        gradients.  Defaults to "Composite".
    """
    super(CompositeOptimizer, self).__init__(use_locking, name)
    self._optimizer1 = optimizer1
    self._optimizer2 = optimizer2
    self._switch = switch 
Example #2
Source File: composite_optimizer.py    From multilabel-image-classification-tensorflow with MIT License 6 votes vote down vote up
def __init__(self,
               optimizer1,
               optimizer2,
               switch,
               use_locking=False,
               name="Composite"):
    """Construct a new Composite optimizer.

    Args:
      optimizer1: A tf.python.training.optimizer.Optimizer object.
      optimizer2: A tf.python.training.optimizer.Optimizer object.
      switch: A tf.bool Tensor, selecting whether to use the first or the second
        optimizer.
      use_locking: Bool. If True apply use locks to prevent concurrent updates
        to variables.
      name: Optional name prefix for the operations created when applying
        gradients.  Defaults to "Composite".
    """
    super(CompositeOptimizer, self).__init__(use_locking, name)
    self._optimizer1 = optimizer1
    self._optimizer2 = optimizer2
    self._switch = switch 
Example #3
Source File: composite_optimizer.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def __init__(self,
               optimizer1,
               optimizer2,
               switch,
               use_locking=False,
               name='Composite'):
    """Construct a new Composite optimizer.

    Args:
      optimizer1: A tf.python.training.optimizer.Optimizer object.
      optimizer2: A tf.python.training.optimizer.Optimizer object.
      switch: A tf.bool Tensor, selecting whether to use the first or the second
        optimizer.
      use_locking: Bool. If True apply use locks to prevent concurrent updates
        to variables.
      name: Optional name prefix for the operations created when applying
        gradients.  Defaults to "Composite".
    """
    super(CompositeOptimizer, self).__init__(use_locking, name)
    self._optimizer1 = optimizer1
    self._optimizer2 = optimizer2
    self._switch = switch 
Example #4
Source File: trainer.py    From FRU with MIT License 6 votes vote down vote up
def validate_trainop_names(self):
        """ Give names to all TrainOp, handle no names and duplicated names """
        t_len = len(self.train_ops)
        # Rename optimizers without name
        for i in range(t_len):
            if not self.train_ops[i].name:
                self.train_ops[i].name = 'Optimizer'
                self.train_ops[i].scope_name = 'Optimizer'
        # Handle duplicate names
        for i in range(t_len):
            dupl = 0
            for j in range(i+1, t_len):
                if not self.train_ops[i].name:
                    break
                if self.train_ops[i].name == self.train_ops[j].name:
                    if dupl == 0:
                        self.train_ops[i].name += '_' + str(dupl)
                        self.train_ops[i].scope_name = self.train_ops[i].name
                    dupl += 1
                    self.train_ops[j].name += '_' + str(dupl)
                    self.train_ops[j].scope_name = self.train_ops[j].name 
Example #5
Source File: composite_optimizer.py    From Gun-Detector with Apache License 2.0 6 votes vote down vote up
def __init__(self,
               optimizer1,
               optimizer2,
               switch,
               use_locking=False,
               name="Composite"):
    """Construct a new Composite optimizer.

    Args:
      optimizer1: A tf.python.training.optimizer.Optimizer object.
      optimizer2: A tf.python.training.optimizer.Optimizer object.
      switch: A tf.bool Tensor, selecting whether to use the first or the second
        optimizer.
      use_locking: Bool. If True apply use locks to prevent concurrent updates
        to variables.
      name: Optional name prefix for the operations created when applying
        gradients.  Defaults to "Composite".
    """
    super(CompositeOptimizer, self).__init__(use_locking, name)
    self._optimizer1 = optimizer1
    self._optimizer2 = optimizer2
    self._switch = switch 
Example #6
Source File: composite_optimizer.py    From yolo_v2 with Apache License 2.0 6 votes vote down vote up
def __init__(self,
               optimizer1,
               optimizer2,
               switch,
               use_locking=False,
               name="Composite"):
    """Construct a new Composite optimizer.

    Args:
      optimizer1: A tf.python.training.optimizer.Optimizer object.
      optimizer2: A tf.python.training.optimizer.Optimizer object.
      switch: A tf.bool Tensor, selecting whether to use the first or the second
        optimizer.
      use_locking: Bool. If True apply use locks to prevent concurrent updates
        to variables.
      name: Optional name prefix for the operations created when applying
        gradients.  Defaults to "Composite".
    """
    super(CompositeOptimizer, self).__init__(use_locking, name)
    self._optimizer1 = optimizer1
    self._optimizer2 = optimizer2
    self._switch = switch 
Example #7
Source File: moving_average_optimizer.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def __init__(self, opt, average_decay=0.9999, num_updates=None,
               sequential_update=True):
    """Construct a new MovingAverageOptimizer.

    Args:
      opt: A tf.Optimizer that will be used to compute and apply gradients.
      average_decay: Float.  Decay to use to maintain the moving averages
                     of trained variables.
                     See tf.train.ExponentialMovingAverage for details.
      num_updates: Optional count of number of updates applied to variables.
                   See tf.train.ExponentialMovingAverage for details.
      sequential_update: Bool. If False, will compute the moving average at the
                         same time as the model is updated, potentially doing
                         benign data races.
                         If True, will update the moving average after gradient
                         updates.
    """
    self._optimizer = opt
    self._ema = moving_averages.ExponentialMovingAverage(
        average_decay, num_updates=num_updates)
    self._variable_map = None
    self._sequential_update = sequential_update 
Example #8
Source File: composite_optimizer.py    From hands-detection with MIT License 6 votes vote down vote up
def __init__(self,
               optimizer1,
               optimizer2,
               switch,
               use_locking=False,
               name='Composite'):
    """Construct a new Composite optimizer.

    Args:
      optimizer1: A tf.python.training.optimizer.Optimizer object.
      optimizer2: A tf.python.training.optimizer.Optimizer object.
      switch: A tf.bool Tensor, selecting whether to use the first or the second
        optimizer.
      use_locking: Bool. If True apply use locks to prevent concurrent updates
        to variables.
      name: Optional name prefix for the operations created when applying
        gradients.  Defaults to "Composite".
    """
    super(CompositeOptimizer, self).__init__(use_locking, name)
    self._optimizer1 = optimizer1
    self._optimizer2 = optimizer2
    self._switch = switch 
Example #9
Source File: composite_optimizer.py    From object_detection_kitti with Apache License 2.0 6 votes vote down vote up
def __init__(self,
               optimizer1,
               optimizer2,
               switch,
               use_locking=False,
               name='Composite'):
    """Construct a new Composite optimizer.

    Args:
      optimizer1: A tf.python.training.optimizer.Optimizer object.
      optimizer2: A tf.python.training.optimizer.Optimizer object.
      switch: A tf.bool Tensor, selecting whether to use the first or the second
        optimizer.
      use_locking: Bool. If True apply use locks to prevent concurrent updates
        to variables.
      name: Optional name prefix for the operations created when applying
        gradients.  Defaults to "Composite".
    """
    super(CompositeOptimizer, self).__init__(use_locking, name)
    self._optimizer1 = optimizer1
    self._optimizer2 = optimizer2
    self._switch = switch 
Example #10
Source File: moving_average_optimizer.py    From lambda-packs with MIT License 6 votes vote down vote up
def __init__(self, opt, average_decay=0.9999, num_updates=None,
               sequential_update=True):
    """Construct a new MovingAverageOptimizer.

    Args:
      opt: A tf.Optimizer that will be used to compute and apply gradients.
      average_decay: Float.  Decay to use to maintain the moving averages
                     of trained variables.
                     See tf.train.ExponentialMovingAverage for details.
      num_updates: Optional count of number of updates applied to variables.
                   See tf.train.ExponentialMovingAverage for details.
      sequential_update: Bool. If False, will compute the moving average at the
                         same time as the model is updated, potentially doing
                         benign data races.
                         If True, will update the moving average after gradient
                         updates.
    """
    self._optimizer = opt
    self._ema = moving_averages.ExponentialMovingAverage(
        average_decay, num_updates=num_updates)
    self._variable_map = None
    self._sequential_update = sequential_update 
Example #11
Source File: composite_optimizer.py    From HumanRecognition with MIT License 6 votes vote down vote up
def __init__(self,
               optimizer1,
               optimizer2,
               switch,
               use_locking=False,
               name='Composite'):
    """Construct a new Composite optimizer.

    Args:
      optimizer1: A tf.python.training.optimizer.Optimizer object.
      optimizer2: A tf.python.training.optimizer.Optimizer object.
      switch: A tf.bool Tensor, selecting whether to use the first or the second
        optimizer.
      use_locking: Bool. If True apply use locks to prevent concurrent updates
        to variables.
      name: Optional name prefix for the operations created when applying
        gradients.  Defaults to "Composite".
    """
    super(CompositeOptimizer, self).__init__(use_locking, name)
    self._optimizer1 = optimizer1
    self._optimizer2 = optimizer2
    self._switch = switch 
Example #12
Source File: drop_stale_gradient_optimizer.py    From lambda-packs with MIT License 6 votes vote down vote up
def __init__(self,
               opt,
               staleness,
               use_locking=False,
               name="DropStaleGradient"):
    """Constructs a new DropStaleGradientOptimizer.

    Args:
      opt: The actual optimizer that will be used to compute and apply the
           gradients. Must be one of the Optimizer classes.
      staleness: The maximum staleness allowed for the optimizer.
      use_locking: If `True` use locks for clip update operations.
      name: Optional name prefix for the operations created when applying
            gradients. Defaults to "DropStaleGradient".
    """
    super(DropStaleGradientOptimizer, self).__init__(use_locking, name)
    self._opt = opt
    self._staleness = staleness 
Example #13
Source File: composite_optimizer.py    From g-tensorflow-models with Apache License 2.0 6 votes vote down vote up
def __init__(self,
               optimizer1,
               optimizer2,
               switch,
               use_locking=False,
               name="Composite"):
    """Construct a new Composite optimizer.

    Args:
      optimizer1: A tf.python.training.optimizer.Optimizer object.
      optimizer2: A tf.python.training.optimizer.Optimizer object.
      switch: A tf.bool Tensor, selecting whether to use the first or the second
        optimizer.
      use_locking: Bool. If True apply use locks to prevent concurrent updates
        to variables.
      name: Optional name prefix for the operations created when applying
        gradients.  Defaults to "Composite".
    """
    super(CompositeOptimizer, self).__init__(use_locking, name)
    self._optimizer1 = optimizer1
    self._optimizer2 = optimizer2
    self._switch = switch 
Example #14
Source File: moving_average_optimizer.py    From keras-lambda with MIT License 6 votes vote down vote up
def __init__(self, opt, average_decay=0.9999, num_updates=None,
               sequential_update=True):
    """Construct a new MovingAverageOptimizer.

    Args:
      opt: A tf.Optimizer that will be used to compute and apply gradients.
      average_decay: Float.  Decay to use to maintain the moving averages
                     of trained variables.
                     See tf.train.ExponentialMovingAverage for details.
      num_updates: Optional count of number of updates applied to variables.
                   See tf.train.ExponentialMovingAverage for details.
      sequential_update: Bool. If False, will compute the moving average at the
                         same time as the model is updated, potentially doing
                         benign data races.
                         If True, will update the moving average after gradient
                         updates.
    """
    self._optimizer = opt
    self._ema = moving_averages.ExponentialMovingAverage(
        average_decay, num_updates=num_updates)
    self._variable_map = None
    self._sequential_update = sequential_update 
Example #15
Source File: sparse_optimizers.py    From rigl with Apache License 2.0 5 votes vote down vote up
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    """Wraps the original apply_gradient of the optimizer.

    Args:
      grads_and_vars: List of (gradient, variable) pairs as returned by
        `compute_gradients()`.
      global_step: Optional `Variable` to increment by one after the
        variables have been updated.
      name: Optional name for the returned operation.  Default to the
        name passed to the `Optimizer` constructor.
    Returns:
      An `Operation` that applies the specified gradients. If `global_step`
      was not None, that operation also increments `global_step`.
    """
    pre_op = self._before_apply_gradients(grads_and_vars)
    with ops.control_dependencies([pre_op]):
      # Call this to create slots.
      _ = self._optimizer.apply_gradients(
          grads_and_vars, global_step=global_step, name=name)
      def apply_gradient_op():
        optimizer_update = self._optimizer.apply_gradients(
            grads_and_vars, global_step=global_step, name=name)
        return optimizer_update
      # We get the default one after calling the super.apply_gradient(), since
      # we want to preserve original behavior of the optimizer: don't increment
      # anything if no global_step is passed. But we need the global step for
      # the mask_update.
      global_step = (global_step if global_step is not None
                     else training_util.get_or_create_global_step())
      self._global_step = global_step
      return self.cond_mask_update_op(global_step, apply_gradient_op) 
Example #16
Source File: sparse_optimizers.py    From rigl with Apache License 2.0 5 votes vote down vote up
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    """Wraps the original apply_gradient of the optimizer.

    Args:
      grads_and_vars: List of (gradient, variable) pairs as returned by
        `compute_gradients()`.
      global_step: Optional `Variable` to increment by one after the
        variables have been updated.
      name: Optional name for the returned operation.  Default to the
        name passed to the `Optimizer` constructor.
    Returns:
      An `Operation` that applies the specified gradients. If `global_step`
      was not None, that operation also increments `global_step`.
    """
    pre_op = self._before_apply_gradients(grads_and_vars)
    with ops.control_dependencies([pre_op]):
      optimizer_update = self._optimizer.apply_gradients(
          grads_and_vars, global_step=global_step, name=name)
    # We get the default one after calling the super.apply_gradient(), since
    # we want to preserve original behavior of the optimizer: don't increment
    # anything if no global_step is passed. But we need the global step for
    # the mask_update.
    global_step = (global_step if global_step is not None
                   else training_util.get_or_create_global_step())
    self._global_step = global_step
    with ops.control_dependencies([optimizer_update]):
      return self.cond_mask_update_op(global_step, control_flow_ops.no_op) 
Example #17
Source File: optimizers.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def get(identifier):
  """Retrieves a Keras Optimizer instance.

  Arguments:
      identifier: Optimizer identifier, one of
          - String: name of an optimizer
          - Dictionary: configuration dictionary.
          - Keras Optimizer instance (it will be returned unchanged).
          - TensorFlow Optimizer instance
              (it will be wrapped as a Keras Optimizer).

  Returns:
      A Keras Optimizer instance.

  Raises:
      ValueError: If `identifier` cannot be interpreted.
  """
  # Wrap TF optimizer instances
  if isinstance(identifier, tf_optimizer_module.Optimizer):
    return TFOptimizer(identifier)
  if isinstance(identifier, dict):
    return deserialize(identifier)
  elif isinstance(identifier, six.string_types):
    config = {'class_name': str(identifier), 'config': {}}
    return deserialize(config)
  if isinstance(identifier, Optimizer):
    return identifier
  else:
    raise ValueError('Could not interpret optimizer identifier:', identifier) 
Example #18
Source File: optimizers.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def get_optimizer_instance(opt, learning_rate=None):
  """Returns an optimizer instance.

  Supports the following types for the given `opt`:
  * An `Optimizer` instance: Returns the given `opt`.
  * A string: Creates an `Optimizer` subclass with the given `learning_rate`.
    Supported strings:
    * 'Adagrad': Returns an `AdagradOptimizer`.
    * 'Adam': Returns an `AdamOptimizer`.
    * 'Ftrl': Returns an `FtrlOptimizer`.
    * 'RMSProp': Returns an `RMSPropOptimizer`.
    * 'SGD': Returns a `GradientDescentOptimizer`.

  Args:
    opt: An `Optimizer` instance, or string, as discussed above.
    learning_rate: A float. Only used if `opt` is a string.

  Returns:
    An `Optimizer` instance.

  Raises:
    ValueError: If `opt` is an unsupported string.
    ValueError: If `opt` is a supported string but `learning_rate` was not
      specified.
    ValueError: If `opt` is none of the above types.
  """
  if isinstance(opt, six.string_types):
    if opt in six.iterkeys(_OPTIMIZER_CLS_NAMES):
      if not learning_rate:
        raise ValueError('learning_rate must be specified when opt is string.')
      return _OPTIMIZER_CLS_NAMES[opt](learning_rate=learning_rate)
    raise ValueError(
        'Unsupported optimizer name: {}. Supported names are: {}'.format(
            opt, tuple(sorted(six.iterkeys(_OPTIMIZER_CLS_NAMES)))))
  if not isinstance(opt, optimizer_lib.Optimizer):
    raise ValueError(
        'The given object is not an Optimizer instance. Given: {}'.format(opt))
  return opt 
Example #19
Source File: optimizers.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def set_weights(self, weights):
    """Sets the weights of the optimizer, from Numpy arrays.

    Should only be called after computing the gradients
    (otherwise the optimizer has no weights).

    Arguments:
        weights: a list of Numpy arrays. The number
            of arrays and their shape must match
            number of the dimensions of the weights
            of the optimizer (i.e. it should match the
            output of `get_weights`).

    Raises:
        ValueError: in case of incompatible weight shapes.
    """
    params = self.weights
    weight_value_tuples = []
    param_values = K.batch_get_value(params)
    for pv, p, w in zip(param_values, params, weights):
      if pv.shape != w.shape:
        raise ValueError('Optimizer weight shape ' + str(pv.shape) +
                         ' not compatible with '
                         'provided weight shape ' + str(w.shape))
      weight_value_tuples.append((p, w))
    K.batch_set_value(weight_value_tuples) 
Example #20
Source File: variable_clipping_optimizer.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               opt,
               vars_to_clip_dims,
               max_norm,
               use_locking=False,
               colocate_clip_ops_with_vars=False,
               name="VariableClipping"):
    """Construct a new clip-norm optimizer.

    Args:
      opt: The actual optimizer that will be used to compute and apply the
        gradients. Must be one of the Optimizer classes.
      vars_to_clip_dims: A dict with keys as Variables and values as lists
        of dimensions along which to compute the L2-norm.  See
        `tf.clip_by_norm` for more details.
      max_norm: The L2-norm to clip to, for all variables specified.
      use_locking: If `True` use locks for clip update operations.
      colocate_clip_ops_with_vars: If `True`, try colocating the clip norm
        ops with the corresponding variable.
      name: Optional name prefix for the operations created when applying
        gradients.  Defaults to "VariableClipping".
    """
    super(VariableClippingOptimizer, self).__init__(use_locking, name)
    self._opt = opt
    # Defensive copy of input dict
    self._vars_to_clip_dims = {
        var: clip_dims[:] for var, clip_dims in vars_to_clip_dims.items()}
    self._max_norm = max_norm
    self._colocate_clip_ops_with_vars = colocate_clip_ops_with_vars 
Example #21
Source File: sync_replicas_optimizer.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def get_slot_names(self, *args, **kwargs):
    """Return a list of the names of slots created by the `Optimizer`.

    This simply wraps the get_slot_names() from the actual optimizer.

    Args:
      *args: Arguments for get_slot().
      **kwargs: Keyword arguments for get_slot().

    Returns:
      A list of strings.
    """
    return self._opt.get_slot_names(*args, **kwargs) 
Example #22
Source File: optimizers.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def deserialize(config, custom_objects=None):
  """Inverse of the `serialize` function.

  Arguments:
      config: Optimizer configuration dictionary.
      custom_objects: Optional dictionary mapping
          names (strings) to custom objects
          (classes and functions)
          to be considered during deserialization.

  Returns:
      A Keras Optimizer instance.
  """
  all_classes = {
      'sgd': SGD,
      'rmsprop': RMSprop,
      'adagrad': Adagrad,
      'adadelta': Adadelta,
      'adam': Adam,
      'adamax': Adamax,
      'nadam': Nadam,
      'tfoptimizer': TFOptimizer,
  }
  # Make deserialization case-insensitive for built-in optimizers.
  if config['class_name'].lower() in all_classes:
    config['class_name'] = config['class_name'].lower()
  return deserialize_keras_object(
      config,
      module_objects=all_classes,
      custom_objects=custom_objects,
      printable_module_name='optimizer') 
Example #23
Source File: sync_replicas_optimizer.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def get_slot_names(self, *args, **kwargs):
    """Return a list of the names of slots created by the `Optimizer`.

    This simply wraps the get_slot_names() from the actual optimizer.

    Args:
      *args: Arguments for get_slot().
      **kwargs: Keyword arguments for get_slot().

    Returns:
      A list of strings.
    """
    return self._opt.get_slot_names(*args, **kwargs) 
Example #24
Source File: sync_replicas_optimizer.py    From lambda-packs with MIT License 5 votes vote down vote up
def get_slot(self, *args, **kwargs):
    """Return a slot named "name" created for "var" by the Optimizer.

    This simply wraps the get_slot() from the actual optimizer.

    Args:
      *args: Arguments for get_slot().
      **kwargs: Keyword arguments for get_slot().

    Returns:
      The `Variable` for the slot if it was created, `None` otherwise.
    """
    return self._opt.get_slot(*args, **kwargs) 
Example #25
Source File: sync_replicas_optimizer.py    From lambda-packs with MIT License 5 votes vote down vote up
def get_slot_names(self, *args, **kwargs):
    """Return a list of the names of slots created by the `Optimizer`.

    This simply wraps the get_slot_names() from the actual optimizer.

    Args:
      *args: Arguments for get_slot().
      **kwargs: Keyword arguments for get_slot().

    Returns:
      A list of strings.
    """
    return self._opt.get_slot_names(*args, **kwargs) 
Example #26
Source File: optimizers.py    From lambda-packs with MIT License 5 votes vote down vote up
def set_weights(self, weights):
    """Sets the weights of the optimizer, from Numpy arrays.

    Should only be called after computing the gradients
    (otherwise the optimizer has no weights).

    Arguments:
        weights: a list of Numpy arrays. The number
            of arrays and their shape must match
            number of the dimensions of the weights
            of the optimizer (i.e. it should match the
            output of `get_weights`).

    Raises:
        ValueError: in case of incompatible weight shapes.
    """
    params = self.weights
    weight_value_tuples = []
    param_values = K.batch_get_value(params)
    for pv, p, w in zip(param_values, params, weights):
      if pv.shape != w.shape:
        raise ValueError('Optimizer weight shape ' + str(pv.shape) +
                         ' not compatible with '
                         'provided weight shape ' + str(w.shape))
      weight_value_tuples.append((p, w))
    K.batch_set_value(weight_value_tuples) 
Example #27
Source File: optimizers.py    From lambda-packs with MIT License 5 votes vote down vote up
def deserialize(config, custom_objects=None):
  """Inverse of the `serialize` function.

  Arguments:
      config: Optimizer configuration dictionary.
      custom_objects: Optional dictionary mapping
          names (strings) to custom objects
          (classes and functions)
          to be considered during deserialization.

  Returns:
      A Keras Optimizer instance.
  """
  all_classes = {
      'sgd': SGD,
      'rmsprop': RMSprop,
      'adagrad': Adagrad,
      'adadelta': Adadelta,
      'adam': Adam,
      'adamax': Adamax,
      'nadam': Nadam,
      'tfoptimizer': TFOptimizer,
  }
  # Make deserialization case-insensitive for built-in optimizers.
  if config['class_name'].lower() in all_classes:
    config['class_name'] = config['class_name'].lower()
  return deserialize_keras_object(
      config,
      module_objects=all_classes,
      custom_objects=custom_objects,
      printable_module_name='optimizer') 
Example #28
Source File: variable_clipping_optimizer.py    From lambda-packs with MIT License 5 votes vote down vote up
def __init__(self,
               opt,
               vars_to_clip_dims,
               max_norm,
               use_locking=False,
               colocate_clip_ops_with_vars=False,
               name="VariableClipping"):
    """Construct a new clip-norm optimizer.

    Args:
      opt: The actual optimizer that will be used to compute and apply the
        gradients. Must be one of the Optimizer classes.
      vars_to_clip_dims: A dict with keys as Variables and values as lists
        of dimensions along which to compute the L2-norm.  See
        `tf.clip_by_norm` for more details.
      max_norm: The L2-norm to clip to, for all variables specified.
      use_locking: If `True` use locks for clip update operations.
      colocate_clip_ops_with_vars: If `True`, try colocating the clip norm
        ops with the corresponding variable.
      name: Optional name prefix for the operations created when applying
        gradients.  Defaults to "VariableClipping".
    """
    super(VariableClippingOptimizer, self).__init__(use_locking, name)
    self._opt = opt
    # Defensive copy of input dict
    self._vars_to_clip_dims = {
        var: clip_dims[:] for var, clip_dims in vars_to_clip_dims.items()}
    self._max_norm = max_norm
    self._colocate_clip_ops_with_vars = colocate_clip_ops_with_vars 
Example #29
Source File: variable_clipping_optimizer.py    From keras-lambda with MIT License 5 votes vote down vote up
def __init__(self,
               opt,
               vars_to_clip_dims,
               max_norm,
               use_locking=False,
               colocate_clip_ops_with_vars=False,
               name="VariableClipping"):
    """Construct a new clip-norm optimizer.

    Args:
      opt: The actual optimizer that will be used to compute and apply the
        gradients. Must be one of the Optimizer classes.
      vars_to_clip_dims: A dict with keys as Variables and values as lists
        of dimensions along which to compute the L2-norm.  See
        `tf.clip_by_norm` for more details.
      max_norm: The L2-norm to clip to, for all variables specified.
      use_locking: If `True` use locks for clip update operations.
      colocate_clip_ops_with_vars: If `True`, try colocating the clip norm
        ops with the corresponding variable.
      name: Optional name prefix for the operations created when applying
        gradients.  Defaults to "VariableClipping".
    """
    super(VariableClippingOptimizer, self).__init__(use_locking, name)
    self._opt = opt
    # Defensive copy of input dict
    self._vars_to_clip_dims = {
        var: clip_dims[:] for var, clip_dims in vars_to_clip_dims.items()}
    self._max_norm = max_norm
    self._colocate_clip_ops_with_vars = colocate_clip_ops_with_vars 
Example #30
Source File: sync_replicas_optimizer.py    From keras-lambda with MIT License 5 votes vote down vote up
def get_slot_names(self, *args, **kwargs):
    """Return a list of the names of slots created by the `Optimizer`.

    This simply wraps the get_slot_names() from the actual optimizer.

    Args:
      *args: Arguments for get_slot().
      **kwargs: Keyword arguments for get_slot().

    Returns:
      A list of strings.
    """
    return self._opt.get_slot_names(*args, **kwargs)