Python tensorflow.python.training.optimizer.minimize() Examples

The following are 14 code examples of tensorflow.python.training.optimizer.minimize(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.training.optimizer , or try the search function .
Example #1
Source File: adamW.py    From Conditional_Density_Estimation with MIT License 6 votes vote down vote up
def minimize(self, loss, global_step=None, var_list=None,
               gate_gradients=optimizer.Optimizer.GATE_OP,
               aggregation_method=None, colocate_gradients_with_ops=False,
               name=None, grad_loss=None, decay_var_list=None):
    """Add operations to minimize `loss` by updating `var_list` with decay.
    This function is the same as Optimizer.minimize except that it allows to
    specify the variables that should be decayed using decay_var_list.
    If decay_var_list is None, all variables in var_list are decayed.
    For more information see the documentation of Optimizer.minimize.
    """
    self._decay_var_list = set(decay_var_list) if decay_var_list else False
    return super(DecoupledWeightDecayExtension, self).minimize(
        loss, global_step=global_step, var_list=var_list,
        gate_gradients=gate_gradients, aggregation_method=aggregation_method,
        colocate_gradients_with_ops=colocate_gradients_with_ops, name=name,
        grad_loss=grad_loss) 
Example #2
Source File: adam_weight_decay_exclude_utils.py    From BERT with Apache License 2.0 5 votes vote down vote up
def minimize(self, loss, global_step=None, var_list=None,
               gate_gradients=optimizer.Optimizer.GATE_OP,
               aggregation_method=None, colocate_gradients_with_ops=False,
               name=None, grad_loss=None):
    """Add operations to minimize `loss` by updating `var_list` with decay.

    This function is the same as Optimizer.minimize except that it allows to
    specify the variables that should be decayed using decay_var_list.
    If decay_var_list is None, all variables in var_list are decayed.

    For more information see the documentation of Optimizer.minimize.

    Args:
      loss: A `Tensor` containing the value to minimize.
      global_step: Optional `Variable` to increment by one after the
        variables have been updated.
      var_list: Optional list or tuple of `Variable` objects to update to
        minimize `loss`.  Defaults to the list of variables collected in
        the graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
      gate_gradients: How to gate the computation of gradients.  Can be
        `GATE_NONE`, `GATE_OP`, or  `GATE_GRAPH`.
      aggregation_method: Specifies the method used to combine gradient terms.
        Valid values are defined in the class `AggregationMethod`.
      colocate_gradients_with_ops: If True, try colocating gradients with
        the corresponding op.
      name: Optional name for the returned operation.
      grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
      decay_var_list: Optional list of decay variables.

    Returns:
      An Operation that updates the variables in `var_list`.  If `global_step`
      was not `None`, that operation also increments `global_step`.

    """

    return super(DecoupledWeightDecayExtension, self).minimize(
        loss, global_step=global_step, var_list=var_list,
        gate_gradients=gate_gradients, aggregation_method=aggregation_method,
        colocate_gradients_with_ops=colocate_gradients_with_ops, name=name,
        grad_loss=grad_loss) 
Example #3
Source File: adam_weight_decay_utils.py    From BERT with Apache License 2.0 5 votes vote down vote up
def __init__(self, weight_decay, **kwargs):
		"""Construct the extension class that adds weight decay to an optimizer.
		Args:
			weight_decay: A `Tensor` or a floating point value, the factor by which
				a variable is decayed in the update step.
			**kwargs: Optional list or tuple or set of `Variable` objects to
				decay.
		"""
		self._decay_var_list = None  # is set in minimize or apply_gradients
		self._weight_decay = weight_decay
		# The tensors are initialized in call to _prepare
		self._weight_decay_tensor = None
		super(DecoupledWeightDecayExtension, self).__init__(**kwargs) 
Example #4
Source File: adam_weight_decay_utils.py    From BERT with Apache License 2.0 5 votes vote down vote up
def minimize(self, loss, global_step=None, var_list=None,
							 gate_gradients=optimizer.Optimizer.GATE_OP,
							 aggregation_method=None, colocate_gradients_with_ops=False,
							 name=None, grad_loss=None, decay_var_list=None):
		"""Add operations to minimize `loss` by updating `var_list` with decay.
		This function is the same as Optimizer.minimize except that it allows to
		specify the variables that should be decayed using decay_var_list.
		If decay_var_list is None, all variables in var_list are decayed.
		For more information see the documentation of Optimizer.minimize.
		Args:
			loss: A `Tensor` containing the value to minimize.
			global_step: Optional `Variable` to increment by one after the
				variables have been updated.
			var_list: Optional list or tuple of `Variable` objects to update to
				minimize `loss`.  Defaults to the list of variables collected in
				the graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
			gate_gradients: How to gate the computation of gradients.  Can be
				`GATE_NONE`, `GATE_OP`, or  `GATE_GRAPH`.
			aggregation_method: Specifies the method used to combine gradient terms.
				Valid values are defined in the class `AggregationMethod`.
			colocate_gradients_with_ops: If True, try colocating gradients with
				the corresponding op.
			name: Optional name for the returned operation.
			grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
			decay_var_list: Optional list of decay variables.
		Returns:
			An Operation that updates the variables in `var_list`.  If `global_step`
			was not `None`, that operation also increments `global_step`.
		"""
		self._decay_var_list = set(decay_var_list) if decay_var_list else False
		return super(DecoupledWeightDecayExtension, self).minimize(
				loss, global_step=global_step, var_list=var_list,
				gate_gradients=gate_gradients, aggregation_method=aggregation_method,
				colocate_gradients_with_ops=colocate_gradients_with_ops, name=name,
				grad_loss=grad_loss) 
Example #5
Source File: optimizer.py    From QANet_dureader with MIT License 5 votes vote down vote up
def __init__(self, weight_decay, **kwargs):
    """Construct the extension class that adds weight decay to an optimizer.
    Args:
      weight_decay: A `Tensor` or a floating point value, the factor by which
        a variable is decayed in the update step.
      **kwargs: Optional list or tuple or set of `Variable` objects to
        decay.
    """
    self._decay_var_list = None  # is set in minimize or apply_gradients
    self._weight_decay = weight_decay
    # The tensors are initialized in call to _prepare
    self._weight_decay_tensor = None
    super(DecoupledWeightDecayExtension, self).__init__(**kwargs) 
Example #6
Source File: optimizer.py    From QANet_dureader with MIT License 5 votes vote down vote up
def minimize(self, loss, global_step=None, var_list=None,
               gate_gradients=optimizer.Optimizer.GATE_OP,
               aggregation_method=None, colocate_gradients_with_ops=False,
               name=None, grad_loss=None, decay_var_list=None):
    """Add operations to minimize `loss` by updating `var_list` with decay.
    This function is the same as Optimizer.minimize except that it allows to
    specify the variables that should be decayed using decay_var_list.
    If decay_var_list is None, all variables in var_list are decayed.
    For more information see the documentation of Optimizer.minimize.
    Args:
      loss: A `Tensor` containing the value to minimize.
      global_step: Optional `Variable` to increment by one after the
        variables have been updated.
      var_list: Optional list or tuple of `Variable` objects to update to
        minimize `loss`.  Defaults to the list of variables collected in
        the graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
      gate_gradients: How to gate the computation of gradients.  Can be
        `GATE_NONE`, `GATE_OP`, or  `GATE_GRAPH`.
      aggregation_method: Specifies the method used to combine gradient terms.
        Valid values are defined in the class `AggregationMethod`.
      colocate_gradients_with_ops: If True, try colocating gradients with
        the corresponding op.
      name: Optional name for the returned operation.
      grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
      decay_var_list: Optional list of decay variables.
    Returns:
      An Operation that updates the variables in `var_list`.  If `global_step`
      was not `None`, that operation also increments `global_step`.
    """
    self._decay_var_list = set(decay_var_list) if decay_var_list else False
    return super(DecoupledWeightDecayExtension, self).minimize(
        loss, global_step=global_step, var_list=var_list,
        gate_gradients=gate_gradients, aggregation_method=aggregation_method,
        colocate_gradients_with_ops=colocate_gradients_with_ops, name=name,
        grad_loss=grad_loss) 
Example #7
Source File: adamW.py    From Conditional_Density_Estimation with MIT License 5 votes vote down vote up
def __init__(self, weight_decay, **kwargs):
    """Construct the extension class that adds weight decay to an optimizer.
    Args:
      weight_decay: A `Tensor` or a floating point value, the factor by which
        a variable is decayed in the update step.
      decay_var_list: Optional list or tuple or set of `Variable` objects to
        decay.
    """
    self._decay_var_list = None  # is set in minimize or apply_gradients
    self._weight_decay = weight_decay
    # The tensors are initialized in call to _prepare
    self._weight_decay_tensor = None
    super(DecoupledWeightDecayExtension, self).__init__(**kwargs) 
Example #8
Source File: weight_decay_optimizers.py    From robust_audio_ae with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def __init__(self, weight_decay, **kwargs):
    """Construct the extension class that adds weight decay to an optimizer.

    Args:
      weight_decay: A `Tensor` or a floating point value, the factor by which
        a variable is decayed in the update step.
      **kwargs: Optional list or tuple or set of `Variable` objects to
        decay.
    """
    self._decay_var_list = None  # is set in minimize or apply_gradients
    self._weight_decay = weight_decay
    # The tensors are initialized in call to _prepare
    self._weight_decay_tensor = None
    super(DecoupledWeightDecayExtension, self).__init__(**kwargs) 
Example #9
Source File: weight_decay_optimizers.py    From robust_audio_ae with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def minimize(self, loss, global_step=None, var_list=None,
               gate_gradients=optimizer.Optimizer.GATE_OP,
               aggregation_method=None, colocate_gradients_with_ops=False,
               name=None, grad_loss=None, decay_var_list=None):
    """Add operations to minimize `loss` by updating `var_list` with decay.

    This function is the same as Optimizer.minimize except that it allows to
    specify the variables that should be decayed using decay_var_list.
    If decay_var_list is None, all variables in var_list are decayed.

    For more information see the documentation of Optimizer.minimize.

    Args:
      loss: A `Tensor` containing the value to minimize.
      global_step: Optional `Variable` to increment by one after the
        variables have been updated.
      var_list: Optional list or tuple of `Variable` objects to update to
        minimize `loss`.  Defaults to the list of variables collected in
        the graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
      gate_gradients: How to gate the computation of gradients.  Can be
        `GATE_NONE`, `GATE_OP`, or  `GATE_GRAPH`.
      aggregation_method: Specifies the method used to combine gradient terms.
        Valid values are defined in the class `AggregationMethod`.
      colocate_gradients_with_ops: If True, try colocating gradients with
        the corresponding op.
      name: Optional name for the returned operation.
      grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
      decay_var_list: Optional list of decay variables.

    Returns:
      An Operation that updates the variables in `var_list`.  If `global_step`
      was not `None`, that operation also increments `global_step`.

    """
    self._decay_var_list = set(decay_var_list) if decay_var_list else False
    return super(DecoupledWeightDecayExtension, self).minimize(
        loss, global_step=global_step, var_list=var_list,
        gate_gradients=gate_gradients, aggregation_method=aggregation_method,
        colocate_gradients_with_ops=colocate_gradients_with_ops, name=name,
        grad_loss=grad_loss) 
Example #10
Source File: adam_weight_decay_exclude_utils.py    From BERT with Apache License 2.0 4 votes vote down vote up
def extend_with_decoupled_weight_decay(base_optimizer):
  """Factory function returning an optimizer class with decoupled weight decay.

  Returns an optimizer class. An instance of the returned class computes the
  update step of `base_optimizer` and additionally decays the weights.
  E.g., the class returned by
  `extend_with_decoupled_weight_decay(tf.train.AdamOptimizer)` is equivalent to
  `tf.contrib.opt.AdamWOptimizer`.

  The API of the new optimizer class slightly differs from the API of the
  base optimizer:
  - The first argument to the constructor is the weight decay rate.
  - `minimize` and `apply_gradients` accept the optional keyword argument
    `decay_var_list`, which specifies the variables that should be decayed.
    If `None`, all variables that are optimized are decayed.

  Usage example:
  ```python
  # MyAdamW is a new class
  MyAdamW = extend_with_decoupled_weight_decay(tf.train.AdamOptimizer)
  # Create a MyAdamW object
  optimizer = MyAdamW(weight_decay=0.001, learning_rate=0.001)
  sess.run(optimizer.minimize(loss, decay_variables=[var1, var2]))

  Note that this extension decays weights BEFORE applying the update based
  on the gradient, i.e. this extension only has the desired behaviour for
  optimizers which do not depend on the value of'var' in the update step!
  ```

  Args:
    base_optimizer: An optimizer class that inherits from tf.train.Optimizer.

  Returns:
    A new optimizer class that inherits from DecoupledWeightDecayExtension
    and base_optimizer.
  """

  class OptimizerWithDecoupledWeightDecay(DecoupledWeightDecayExtension,
                                          base_optimizer):
    """Base_optimizer with decoupled weight decay.

    This class computes the update step of `base_optimizer` and
    additionally decays the variable with the weight decay being decoupled from
    the optimization steps w.r.t. to the loss function, as described by
    Loshchilov & Hutter (https://arxiv.org/pdf/1711.05101.pdf).
    For SGD variants, this simplifies hyperparameter search since
    it decouples the settings of weight decay and learning rate.
    For adaptive gradient algorithms, it regularizes variables with large
    gradients more than L2 regularization would, which was shown to yield
    better training loss and generalization error in the paper above.
    """

    def __init__(self, weight_decay, *args, **kwargs):
      # super delegation is necessary here
      # pylint: disable=useless-super-delegation
      super(OptimizerWithDecoupledWeightDecay, self).__init__(
          weight_decay, *args, **kwargs)
      # pylint: enable=useless-super-delegation

  return OptimizerWithDecoupledWeightDecay 
Example #11
Source File: adam_weight_decay_utils.py    From BERT with Apache License 2.0 4 votes vote down vote up
def extend_with_decoupled_weight_decay(base_optimizer):
	"""Factory function returning an optimizer class with decoupled weight decay.
	Returns an optimizer class. An instance of the returned class computes the
	update step of `base_optimizer` and additionally decays the weights.
	E.g., the class returned by
	`extend_with_decoupled_weight_decay(tf.train.AdamOptimizer)` is equivalent to
	`tf.contrib.opt.AdamWOptimizer`.
	The API of the new optimizer class slightly differs from the API of the
	base optimizer:
	- The first argument to the constructor is the weight decay rate.
	- `minimize` and `apply_gradients` accept the optional keyword argument
		`decay_var_list`, which specifies the variables that should be decayed.
		If `None`, all variables that are optimized are decayed.
	Usage example:
	```python
	# MyAdamW is a new class
	MyAdamW = extend_with_decoupled_weight_decay(tf.train.AdamOptimizer)
	# Create a MyAdamW object
	optimizer = MyAdamW(weight_decay=0.001, learning_rate=0.001)
	sess.run(optimizer.minimize(loss, decay_variables=[var1, var2]))
	Note that this extension decays weights BEFORE applying the update based
	on the gradient, i.e. this extension only has the desired behaviour for
	optimizers which do not depend on the value of'var' in the update step!
	```
	Args:
		base_optimizer: An optimizer class that inherits from tf.train.Optimizer.
	Returns:
		A new optimizer class that inherits from DecoupledWeightDecayExtension
		and base_optimizer.
	"""

	class OptimizerWithDecoupledWeightDecay(DecoupledWeightDecayExtension,
																					base_optimizer):
		"""Base_optimizer with decoupled weight decay.
		This class computes the update step of `base_optimizer` and
		additionally decays the variable with the weight decay being decoupled from
		the optimization steps w.r.t. to the loss function, as described by
		Loshchilov & Hutter (https://arxiv.org/pdf/1711.05101.pdf).
		For SGD variants, this simplifies hyperparameter search since
		it decouples the settings of weight decay and learning rate.
		For adaptive gradient algorithms, it regularizes variables with large
		gradients more than L2 regularization would, which was shown to yield
		better training loss and generalization error in the paper above.
		"""

		def __init__(self, weight_decay, *args, **kwargs):
			# super delegation is necessary here
			# pylint: disable=useless-super-delegation
			super(OptimizerWithDecoupledWeightDecay, self).__init__(
					weight_decay, *args, **kwargs)
			# pylint: enable=useless-super-delegation

	return OptimizerWithDecoupledWeightDecay


# @tf_export("contrib.opt.MomentumWOptimizer") 
Example #12
Source File: optimizer.py    From QANet_dureader with MIT License 4 votes vote down vote up
def extend_with_decoupled_weight_decay(base_optimizer):
  """Factory function returning an optimizer class with decoupled weight decay.
  Returns an optimizer class. An instance of the returned class computes the
  update step of `base_optimizer` and additionally decays the weights.
  E.g., the class returned by
  `extend_with_decoupled_weight_decay(tf.train.AdamOptimizer)` is equivalent to
  `tf.contrib.opt.AdamWOptimizer`.
  The API of the new optimizer class slightly differs from the API of the
  base optimizer:
  - The first argument to the constructor is the weight decay rate.
  - `minimize` and `apply_gradients` accept the optional keyword argument
    `decay_var_list`, which specifies the variables that should be decayed.
    If `None`, all variables that are optimized are decayed.
  Usage example:
  ```python
  # MyAdamW is a new class
  MyAdamW = extend_with_decoupled_weight_decay(tf.train.AdamOptimizer)
  # Create a MyAdamW object
  optimizer = MyAdamW(weight_decay=0.001, learning_rate=0.001)
  sess.run(optimizer.minimize(loss, decay_variables=[var1, var2]))
  Note that this extension decays weights BEFORE applying the update based
  on the gradient, i.e. this extension only has the desired behaviour for
  optimizers which do not depend on the value of'var' in the update step!
  ```
  Args:
    base_optimizer: An optimizer class that inherits from tf.train.Optimizer.
  Returns:
    A new optimizer class that inherits from DecoupledWeightDecayExtension
    and base_optimizer.
  """

  class OptimizerWithDecoupledWeightDecay(DecoupledWeightDecayExtension,
                                          base_optimizer):
    """Base_optimizer with decoupled weight decay.
    This class computes the update step of `base_optimizer` and
    additionally decays the variable with the weight decay being decoupled from
    the optimization steps w.r.t. to the loss function, as described by
    Loshchilov & Hutter (https://arxiv.org/pdf/1711.05101.pdf).
    For SGD variants, this simplifies hyperparameter search since
    it decouples the settings of weight decay and learning rate.
    For adaptive gradient algorithms, it regularizes variables with large
    gradients more than L2 regularization would, which was shown to yield
    better training loss and generalization error in the paper above.
    """

    def __init__(self, weight_decay, *args, **kwargs):
      # super delegation is necessary here
      # pylint: disable=useless-super-delegation
      super(OptimizerWithDecoupledWeightDecay, self).__init__(
          weight_decay, *args, **kwargs)
      # pylint: enable=useless-super-delegation

  return OptimizerWithDecoupledWeightDecay


# @tf_export("contrib.opt.MomentumWOptimizer") 
Example #13
Source File: adamW.py    From Conditional_Density_Estimation with MIT License 4 votes vote down vote up
def extend_with_decoupled_weight_decay(base_optimizer):
  """Factory function returning an optimizer class with decoupled weight decay.
  Returns an optimizer class. An instance of the returned class computes the
  update step of `base_optimizer` and additionally decays the weights.
  E.g., the class returned by
  `extend_with_decoupled_weight_decay(tf.train.AdamOptimizer)` is equivalent to
  `tf.contrib.opt.AdamWOptimizer`.
  The API of the new optimizer class slightly differs from the API of the
  base optimizer:
  - The first argument to the constructor is the weight decay rate.
  - `minimize` and `apply_gradients` accept the optional keyword argument
    `decay_var_list`, which specifies the variables that should be decayed.
    If `None`, all variables that are optimized are decayed.
  Usage example:
  ```python
  # MyAdamW is a new class
  MyAdamW = extend_with_decoupled_weight_decay(tf.train.AdamOptimizer)
  # Create a MyAdamW object
  optimizer = MyAdamW(weight_decay=0.001, learning_rate=0.001)
  sess.run(optimizer.minimize(loss, decay_variables=[var1, var2]))
  Note that this extension decays weights BEFORE applying the update based
  on the gradient, i.e. this extension only has the desired behaviour for
  optimizers which do not depend on the value of'var' in the update step!
  ```
  Args:
    base_optimizer: An optimizer class that inherits from tf.train.Optimizer.
  Returns:
    A new optimizer class that inherits from DecoupledWeightDecayExtension
    and base_optimizer.
  """
  class OptimizerWithDecoupledWeightDecay(DecoupledWeightDecayExtension,
                                          base_optimizer):
    """Base_optimizer with decoupled weight decay.
    This class computes the update step of `base_optimizer` and
    additionally decays the variable with the weight decay being decoupled from
    the optimization steps w.r.t. to the loss function, as described by
    Loshchilov & Hutter (https://arxiv.org/pdf/1711.05101.pdf).
    For SGD variants, this simplifies hyperparameter search since
    it decouples the settings of weight decay and learning rate.
    For adaptive gradient algorithms, it regularizes variables with large
    gradients more than L2 regularization would, which was shown to yield
    better training loss and generalization error in the paper above.
    """

    def __init__(self, weight_decay, *args, **kwargs):
      # super delegation is necessary here
      # pylint: disable=useless-super-delegation
      super(OptimizerWithDecoupledWeightDecay, self).__init__(
          weight_decay, *args, **kwargs)
      # pylint: enable=useless-super-delegation

  return OptimizerWithDecoupledWeightDecay 
Example #14
Source File: weight_decay_optimizers.py    From robust_audio_ae with BSD 2-Clause "Simplified" License 4 votes vote down vote up
def extend_with_decoupled_weight_decay(base_optimizer):
  """Factory function returning an optimizer class with decoupled weight decay.

  Returns an optimizer class. An instance of the returned class computes the
  update step of `base_optimizer` and additionally decays the weights.
  E.g., the class returned by
  `extend_with_decoupled_weight_decay(tf.train.AdamOptimizer)` is equivalent to
  `tf.contrib.opt.AdamWOptimizer`.

  The API of the new optimizer class slightly differs from the API of the
  base optimizer:
  - The first argument to the constructor is the weight decay rate.
  - `minimize` and `apply_gradients` accept the optional keyword argument
    `decay_var_list`, which specifies the variables that should be decayed.
    If `None`, all variables that are optimized are decayed.

  Usage example:
  ```python
  # MyAdamW is a new class
  MyAdamW = extend_with_decoupled_weight_decay(tf.train.AdamOptimizer)
  # Create a MyAdamW object
  optimizer = MyAdamW(weight_decay=0.001, learning_rate=0.001)
  sess.run(optimizer.minimize(loss, decay_variables=[var1, var2]))

  Note that this extension decays weights BEFORE applying the update based
  on the gradient, i.e. this extension only has the desired behaviour for
  optimizers which do not depend on the value of'var' in the update step!
  ```

  Args:
    base_optimizer: An optimizer class that inherits from tf.train.Optimizer.

  Returns:
    A new optimizer class that inherits from DecoupledWeightDecayExtension
    and base_optimizer.
  """

  class OptimizerWithDecoupledWeightDecay(DecoupledWeightDecayExtension,
                                          base_optimizer):
    """Base_optimizer with decoupled weight decay.

    This class computes the update step of `base_optimizer` and
    additionally decays the variable with the weight decay being decoupled from
    the optimization steps w.r.t. to the loss function, as described by
    Loshchilov & Hutter (https://arxiv.org/pdf/1711.05101.pdf).
    For SGD variants, this simplifies hyperparameter search since
    it decouples the settings of weight decay and learning rate.
    For adaptive gradient algorithms, it regularizes variables with large
    gradients more than L2 regularization would, which was shown to yield
    better training loss and generalization error in the paper above.
    """

    def __init__(self, weight_decay, *args, **kwargs):
      # super delegation is necessary here
      # pylint: disable=useless-super-delegation
      super(OptimizerWithDecoupledWeightDecay, self).__init__(
          weight_decay, *args, **kwargs)
      # pylint: enable=useless-super-delegation

  return OptimizerWithDecoupledWeightDecay