Python tensorflow.python.ops.clip_ops.clip_by_norm() Examples
The following are 28
code examples of tensorflow.python.ops.clip_ops.clip_by_norm().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.clip_ops
, or try the search function
.
Example #1
Source File: learning.py From tf-slim with Apache License 2.0 | 6 votes |
def clip_gradient_norms(gradients_to_variables, max_norm): """Clips the gradients by the given value. Args: gradients_to_variables: A list of gradient to variable pairs (tuples). max_norm: the maximum norm value. Returns: A list of clipped gradient to variable pairs. """ clipped_grads_and_vars = [] for grad, var in gradients_to_variables: if grad is not None: if isinstance(grad, ops.IndexedSlices): tmp = clip_ops.clip_by_norm(grad.values, max_norm) grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape) else: grad = clip_ops.clip_by_norm(grad, max_norm) clipped_grads_and_vars.append((grad, var)) return clipped_grads_and_vars
Example #2
Source File: variable_clipping_optimizer.py From keras-lambda with MIT License | 6 votes |
def _clip_sparse(self, grad, var): assert isinstance(grad, ops.IndexedSlices) clip_dims = self._vars_to_clip_dims[var] if 0 in clip_dims: logging.warning("Clipping norm across dims %s for %s is inefficient " "when including sparse dimension 0.", clip_dims, var.op.name) return self._clip_dense(var) with ops.colocate_with(var): var_subset = array_ops.gather(var, grad.indices) with self._maybe_colocate_with(var): normalized_var_subset = clip_ops.clip_by_norm( var_subset, self._max_norm, clip_dims) delta = ops.IndexedSlices( var_subset - normalized_var_subset, grad.indices, grad.dense_shape) with ops.colocate_with(var): return var.scatter_sub(delta, use_locking=self._use_locking)
Example #3
Source File: training.py From keras-lambda with MIT License | 6 votes |
def clip_gradient_norms(gradients_to_variables, max_norm): """Clips the gradients by the given value. Args: gradients_to_variables: A list of gradient to variable pairs (tuples). max_norm: the maximum norm value. Returns: A list of clipped gradient to variable pairs. """ clipped_grads_and_vars = [] for grad, var in gradients_to_variables: if grad is not None: if isinstance(grad, ops.IndexedSlices): tmp = clip_ops.clip_by_norm(grad.values, max_norm) grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape) else: grad = clip_ops.clip_by_norm(grad, max_norm) clipped_grads_and_vars.append((grad, var)) return clipped_grads_and_vars
Example #4
Source File: learning.py From keras-lambda with MIT License | 6 votes |
def clip_gradient_norms(gradients_to_variables, max_norm): """Clips the gradients by the given value. Args: gradients_to_variables: A list of gradient to variable pairs (tuples). max_norm: the maximum norm value. Returns: A list of clipped gradient to variable pairs. """ clipped_grads_and_vars = [] for grad, var in gradients_to_variables: if grad is not None: if isinstance(grad, ops.IndexedSlices): tmp = clip_ops.clip_by_norm(grad.values, max_norm) grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape) else: grad = clip_ops.clip_by_norm(grad, max_norm) clipped_grads_and_vars.append((grad, var)) return clipped_grads_and_vars
Example #5
Source File: learning.py From mtl-ssl with Apache License 2.0 | 6 votes |
def clip_gradient_norms(gradients_to_variables, max_norm): """Clips the gradients by the given value. Args: gradients_to_variables: A list of gradient to variable pairs (tuples). max_norm: the maximum norm value. Returns: A list of clipped gradient to variable pairs. """ clipped_grads_and_vars = [] for grad, var in gradients_to_variables: if grad is not None: if isinstance(grad, ops.IndexedSlices): tmp = clip_ops.clip_by_norm(grad.values, max_norm) grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape) else: grad = clip_ops.clip_by_norm(grad, max_norm) clipped_grads_and_vars.append((grad, var)) return clipped_grads_and_vars
Example #6
Source File: variable_clipping_optimizer.py From deep_image_model with Apache License 2.0 | 6 votes |
def _clip_sparse(self, grad, var): assert isinstance(grad, ops.IndexedSlices) clip_dims = self._vars_to_clip_dims[var] if 0 in clip_dims: logging.warning("Clipping norm across dims %s for %s is inefficient " "when including sparse dimension 0.", clip_dims, var.op.name) return self._clip_dense(var) with ops.colocate_with(var): var_subset = array_ops.gather(var.ref(), grad.indices) with self._maybe_colocate_with(var): normalized_var_subset = clip_ops.clip_by_norm( var_subset, self._max_norm, clip_dims) delta = ops.IndexedSlices( var_subset - normalized_var_subset, grad.indices, grad.dense_shape) with ops.colocate_with(var): return var.scatter_sub(delta, use_locking=self._use_locking)
Example #7
Source File: training.py From deep_image_model with Apache License 2.0 | 6 votes |
def clip_gradient_norms(gradients_to_variables, max_norm): """Clips the gradients by the given value. Args: gradients_to_variables: A list of gradient to variable pairs (tuples). max_norm: the maximum norm value. Returns: A list of clipped gradient to variable pairs. """ clipped_grads_and_vars = [] for grad, var in gradients_to_variables: if grad is not None: if isinstance(grad, ops.IndexedSlices): tmp = clip_ops.clip_by_norm(grad.values, max_norm) grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape) else: grad = clip_ops.clip_by_norm(grad, max_norm) clipped_grads_and_vars.append((grad, var)) return clipped_grads_and_vars
Example #8
Source File: learning.py From deep_image_model with Apache License 2.0 | 6 votes |
def clip_gradient_norms(gradients_to_variables, max_norm): """Clips the gradients by the given value. Args: gradients_to_variables: A list of gradient to variable pairs (tuples). max_norm: the maximum norm value. Returns: A list of clipped gradient to variable pairs. """ clipped_grads_and_vars = [] for grad, var in gradients_to_variables: if grad is not None: if isinstance(grad, ops.IndexedSlices): tmp = clip_ops.clip_by_norm(grad.values, max_norm) grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape) else: grad = clip_ops.clip_by_norm(grad, max_norm) clipped_grads_and_vars.append((grad, var)) return clipped_grads_and_vars
Example #9
Source File: training.py From tf-slim with Apache License 2.0 | 6 votes |
def clip_gradient_norms(gradients_to_variables, max_norm): """Clips the gradients by the given value. Args: gradients_to_variables: A list of gradient to variable pairs (tuples). max_norm: the maximum norm value. Returns: A list of clipped gradient to variable pairs. """ clipped_grads_and_vars = [] for grad, var in gradients_to_variables: if grad is not None: if isinstance(grad, ops.IndexedSlices): tmp = clip_ops.clip_by_norm(grad.values, max_norm) grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape) else: grad = clip_ops.clip_by_norm(grad, max_norm) clipped_grads_and_vars.append((grad, var)) return clipped_grads_and_vars
Example #10
Source File: learning.py From ctw-baseline with MIT License | 6 votes |
def clip_gradient_norms(gradients_to_variables, max_norm): """Clips the gradients by the given value. Args: gradients_to_variables: A list of gradient to variable pairs (tuples). max_norm: the maximum norm value. Returns: A list of clipped gradient to variable pairs. """ clipped_grads_and_vars = [] for grad, var in gradients_to_variables: if grad is not None: if isinstance(grad, ops.IndexedSlices): tmp = clip_ops.clip_by_norm(grad.values, max_norm) grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape) else: grad = clip_ops.clip_by_norm(grad, max_norm) clipped_grads_and_vars.append((grad, var)) return clipped_grads_and_vars
Example #11
Source File: learning.py From CVTron with Apache License 2.0 | 6 votes |
def clip_gradient_norms(gradients_to_variables, max_norm): """Clips the gradients by the given value. Args: gradients_to_variables: A list of gradient to variable pairs (tuples). max_norm: the maximum norm value. Returns: A list of clipped gradient to variable pairs. """ clipped_grads_and_vars = [] for grad, var in gradients_to_variables: if grad is not None: if isinstance(grad, ops.IndexedSlices): tmp = clip_ops.clip_by_norm(grad.values, max_norm) grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape) else: grad = clip_ops.clip_by_norm(grad, max_norm) clipped_grads_and_vars.append((grad, var)) return clipped_grads_and_vars
Example #12
Source File: learning.py From CVTron with Apache License 2.0 | 6 votes |
def clip_gradient_norms(gradients_to_variables, max_norm): """Clips the gradients by the given value. Args: gradients_to_variables: A list of gradient to variable pairs (tuples). max_norm: the maximum norm value. Returns: A list of clipped gradient to variable pairs. """ clipped_grads_and_vars = [] for grad, var in gradients_to_variables: if grad is not None: if isinstance(grad, ops.IndexedSlices): tmp = clip_ops.clip_by_norm(grad.values, max_norm) grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape) else: grad = clip_ops.clip_by_norm(grad, max_norm) clipped_grads_and_vars.append((grad, var)) return clipped_grads_and_vars
Example #13
Source File: learning.py From CVTron with Apache License 2.0 | 6 votes |
def clip_gradient_norms(gradients_to_variables, max_norm): """Clips the gradients by the given value. Args: gradients_to_variables: A list of gradient to variable pairs (tuples). max_norm: the maximum norm value. Returns: A list of clipped gradient to variable pairs. """ clipped_grads_and_vars = [] for grad, var in gradients_to_variables: if grad is not None: if isinstance(grad, ops.IndexedSlices): tmp = clip_ops.clip_by_norm(grad.values, max_norm) grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape) else: grad = clip_ops.clip_by_norm(grad, max_norm) clipped_grads_and_vars.append((grad, var)) return clipped_grads_and_vars
Example #14
Source File: variable_clipping_optimizer.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _clip_sparse(self, grad, var): assert isinstance(grad, ops.IndexedSlices) clip_dims = self._vars_to_clip_dims[var] if 0 in clip_dims: logging.warning("Clipping norm across dims %s for %s is inefficient " "when including sparse dimension 0.", clip_dims, var.op.name) return self._clip_dense(var) with ops.colocate_with(var): var_subset = array_ops.gather(var, grad.indices) with self._maybe_colocate_with(var): normalized_var_subset = clip_ops.clip_by_norm( var_subset, self._max_norm, clip_dims) delta = ops.IndexedSlices( var_subset - normalized_var_subset, grad.indices, grad.dense_shape) with ops.colocate_with(var): return var.scatter_sub(delta, use_locking=self._use_locking)
Example #15
Source File: training.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def clip_gradient_norms(gradients_to_variables, max_norm): """Clips the gradients by the given value. Args: gradients_to_variables: A list of gradient to variable pairs (tuples). max_norm: the maximum norm value. Returns: A list of clipped gradient to variable pairs. """ clipped_grads_and_vars = [] for grad, var in gradients_to_variables: if grad is not None: if isinstance(grad, ops.IndexedSlices): tmp = clip_ops.clip_by_norm(grad.values, max_norm) grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape) else: grad = clip_ops.clip_by_norm(grad, max_norm) clipped_grads_and_vars.append((grad, var)) return clipped_grads_and_vars
Example #16
Source File: learning.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def clip_gradient_norms(gradients_to_variables, max_norm): """Clips the gradients by the given value. Args: gradients_to_variables: A list of gradient to variable pairs (tuples). max_norm: the maximum norm value. Returns: A list of clipped gradient to variable pairs. """ clipped_grads_and_vars = [] for grad, var in gradients_to_variables: if grad is not None: if isinstance(grad, ops.IndexedSlices): tmp = clip_ops.clip_by_norm(grad.values, max_norm) grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape) else: grad = clip_ops.clip_by_norm(grad, max_norm) clipped_grads_and_vars.append((grad, var)) return clipped_grads_and_vars
Example #17
Source File: variable_clipping_optimizer.py From lambda-packs with MIT License | 6 votes |
def _clip_sparse(self, grad, var): assert isinstance(grad, ops.IndexedSlices) clip_dims = self._vars_to_clip_dims[var] if 0 in clip_dims: logging.warning("Clipping norm across dims %s for %s is inefficient " "when including sparse dimension 0.", clip_dims, var.op.name) return self._clip_dense(var) with ops.colocate_with(var): var_subset = array_ops.gather(var, grad.indices) with self._maybe_colocate_with(var): normalized_var_subset = clip_ops.clip_by_norm( var_subset, self._max_norm, clip_dims) delta = ops.IndexedSlices( var_subset - normalized_var_subset, grad.indices, grad.dense_shape) with ops.colocate_with(var): return var.scatter_sub(delta, use_locking=self._use_locking)
Example #18
Source File: training.py From lambda-packs with MIT License | 6 votes |
def clip_gradient_norms(gradients_to_variables, max_norm): """Clips the gradients by the given value. Args: gradients_to_variables: A list of gradient to variable pairs (tuples). max_norm: the maximum norm value. Returns: A list of clipped gradient to variable pairs. """ clipped_grads_and_vars = [] for grad, var in gradients_to_variables: if grad is not None: if isinstance(grad, ops.IndexedSlices): tmp = clip_ops.clip_by_norm(grad.values, max_norm) grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape) else: grad = clip_ops.clip_by_norm(grad, max_norm) clipped_grads_and_vars.append((grad, var)) return clipped_grads_and_vars
Example #19
Source File: learning.py From lambda-packs with MIT License | 6 votes |
def clip_gradient_norms(gradients_to_variables, max_norm): """Clips the gradients by the given value. Args: gradients_to_variables: A list of gradient to variable pairs (tuples). max_norm: the maximum norm value. Returns: A list of clipped gradient to variable pairs. """ clipped_grads_and_vars = [] for grad, var in gradients_to_variables: if grad is not None: if isinstance(grad, ops.IndexedSlices): tmp = clip_ops.clip_by_norm(grad.values, max_norm) grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape) else: grad = clip_ops.clip_by_norm(grad, max_norm) clipped_grads_and_vars.append((grad, var)) return clipped_grads_and_vars
Example #20
Source File: variable_clipping_optimizer.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _clip_dense(self, var): with self._maybe_colocate_with(var): updated_var_value = var._ref() # pylint: disable=protected-access normalized_var = clip_ops.clip_by_norm( updated_var_value, self._max_norm, self._vars_to_clip_dims[var]) delta = updated_var_value - normalized_var with ops.colocate_with(var): return var.assign_sub(delta, use_locking=self._use_locking)
Example #21
Source File: variable_clipping_optimizer.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def __init__(self, opt, vars_to_clip_dims, max_norm, use_locking=False, colocate_clip_ops_with_vars=False, name="VariableClipping"): """Construct a new clip-norm optimizer. Args: opt: The actual optimizer that will be used to compute and apply the gradients. Must be one of the Optimizer classes. vars_to_clip_dims: A dict with keys as Variables and values as lists of dimensions along which to compute the L2-norm. See `tf.clip_by_norm` for more details. max_norm: The L2-norm to clip to, for all variables specified. use_locking: If `True` use locks for clip update operations. colocate_clip_ops_with_vars: If `True`, try colocating the clip norm ops with the corresponding variable. name: Optional name prefix for the operations created when applying gradients. Defaults to "VariableClipping". """ super(VariableClippingOptimizer, self).__init__(use_locking, name) self._opt = opt # Defensive copy of input dict self._vars_to_clip_dims = { var: clip_dims[:] for var, clip_dims in vars_to_clip_dims.items()} self._max_norm = max_norm self._colocate_clip_ops_with_vars = colocate_clip_ops_with_vars
Example #22
Source File: variable_clipping_optimizer.py From deep_image_model with Apache License 2.0 | 5 votes |
def __init__(self, opt, vars_to_clip_dims, max_norm, use_locking=False, colocate_clip_ops_with_vars=False, name="VariableClipping"): """Construct a new clip-norm optimizer. Args: opt: The actual optimizer that will be used to compute and apply the gradients. Must be one of the Optimizer classes. vars_to_clip_dims: A dict with keys as Variables and values as lists of dimensions along which to compute the L2-norm. See `tf.clip_by_norm` for more details. max_norm: The L2-norm to clip to, for all variables specified. use_locking: If `True` use locks for clip update operations. colocate_clip_ops_with_vars: If `True`, try colocating the clip norm ops with the corresponding variable. name: Optional name prefix for the operations created when applying gradients. Defaults to "VariableClipping". """ super(VariableClippingOptimizer, self).__init__(use_locking, name) self._opt = opt # Defensive copy of input dict self._vars_to_clip_dims = { var: clip_dims[:] for var, clip_dims in vars_to_clip_dims.items()} self._max_norm = max_norm self._colocate_clip_ops_with_vars = colocate_clip_ops_with_vars
Example #23
Source File: variable_clipping_optimizer.py From deep_image_model with Apache License 2.0 | 5 votes |
def _clip_dense(self, var): with self._maybe_colocate_with(var): updated_var_value = array_ops.identity(var.ref()) normalized_var = clip_ops.clip_by_norm( updated_var_value, self._max_norm, self._vars_to_clip_dims[var]) delta = updated_var_value - normalized_var with ops.colocate_with(var): return var.assign_sub(delta, use_locking=self._use_locking)
Example #24
Source File: variable_clipping_optimizer.py From lambda-packs with MIT License | 5 votes |
def _clip_dense(self, var): with self._maybe_colocate_with(var): updated_var_value = var._ref() # pylint: disable=protected-access normalized_var = clip_ops.clip_by_norm( updated_var_value, self._max_norm, self._vars_to_clip_dims[var]) delta = updated_var_value - normalized_var with ops.colocate_with(var): return var.assign_sub(delta, use_locking=self._use_locking)
Example #25
Source File: variable_clipping_optimizer.py From lambda-packs with MIT License | 5 votes |
def __init__(self, opt, vars_to_clip_dims, max_norm, use_locking=False, colocate_clip_ops_with_vars=False, name="VariableClipping"): """Construct a new clip-norm optimizer. Args: opt: The actual optimizer that will be used to compute and apply the gradients. Must be one of the Optimizer classes. vars_to_clip_dims: A dict with keys as Variables and values as lists of dimensions along which to compute the L2-norm. See `tf.clip_by_norm` for more details. max_norm: The L2-norm to clip to, for all variables specified. use_locking: If `True` use locks for clip update operations. colocate_clip_ops_with_vars: If `True`, try colocating the clip norm ops with the corresponding variable. name: Optional name prefix for the operations created when applying gradients. Defaults to "VariableClipping". """ super(VariableClippingOptimizer, self).__init__(use_locking, name) self._opt = opt # Defensive copy of input dict self._vars_to_clip_dims = { var: clip_dims[:] for var, clip_dims in vars_to_clip_dims.items()} self._max_norm = max_norm self._colocate_clip_ops_with_vars = colocate_clip_ops_with_vars
Example #26
Source File: variable_clipping_optimizer.py From keras-lambda with MIT License | 5 votes |
def __init__(self, opt, vars_to_clip_dims, max_norm, use_locking=False, colocate_clip_ops_with_vars=False, name="VariableClipping"): """Construct a new clip-norm optimizer. Args: opt: The actual optimizer that will be used to compute and apply the gradients. Must be one of the Optimizer classes. vars_to_clip_dims: A dict with keys as Variables and values as lists of dimensions along which to compute the L2-norm. See `tf.clip_by_norm` for more details. max_norm: The L2-norm to clip to, for all variables specified. use_locking: If `True` use locks for clip update operations. colocate_clip_ops_with_vars: If `True`, try colocating the clip norm ops with the corresponding variable. name: Optional name prefix for the operations created when applying gradients. Defaults to "VariableClipping". """ super(VariableClippingOptimizer, self).__init__(use_locking, name) self._opt = opt # Defensive copy of input dict self._vars_to_clip_dims = { var: clip_dims[:] for var, clip_dims in vars_to_clip_dims.items()} self._max_norm = max_norm self._colocate_clip_ops_with_vars = colocate_clip_ops_with_vars
Example #27
Source File: variable_clipping_optimizer.py From keras-lambda with MIT License | 5 votes |
def _clip_dense(self, var): with self._maybe_colocate_with(var): updated_var_value = var._ref() # pylint: disable=protected-access normalized_var = clip_ops.clip_by_norm( updated_var_value, self._max_norm, self._vars_to_clip_dims[var]) delta = updated_var_value - normalized_var with ops.colocate_with(var): return var.assign_sub(delta, use_locking=self._use_locking)
Example #28
Source File: embedding_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 4 votes |
def _clip(params, ids, max_norm): """Helper function for _embedding_lookup_and_transform. This function optionally clips embeddings to an l2-norm of max_norm. Args: params: A `Tensor` of embeddings retrieved by `_gather`. ids: The `ids` argument that was passed to `_gather`. max_norm: If provided, the embeddings are l2-normalized to the value of max_norm. Returns: A `Tensor` with the same type as `params`. """ def _rank(x): """Helper function to retrieve the rank of a tensor. Args: x: Something convertible to `Tensor`. Returns: Either a pair `(rank, True)` where `rank` is an integer or a pair `(rank, False)` where `rank` is an integer `Tensor`. In either case, `rank` is the rank of `x`. """ rank = ops.convert_to_tensor(x).get_shape().ndims if rank: return rank, True else: return array_ops.rank(x), False if max_norm is None: return params ids_rank, ids_static = _rank(ids) params_rank, params_static = _rank(params) return clip_ops.clip_by_norm( params, max_norm, axes=(list(range(ids_rank, params_rank)) if ids_static and params_static else math_ops.range(ids_rank, params_rank)))