Python tensorflow.python.ops.array_ops.check_numerics() Examples
The following are 19
code examples of tensorflow.python.ops.array_ops.check_numerics().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.array_ops
, or try the search function
.
Example #1
Source File: numerics.py From deep_image_model with Apache License 2.0 | 6 votes |
def add_check_numerics_ops(): """Connect a `check_numerics` to every floating point tensor. `check_numerics` operations themselves are added for each `half`, `float`, or `double` tensor in the graph. For all ops in the graph, the `check_numerics` op for all of its (`half`, `float`, or `double`) inputs is guaranteed to run before the `check_numerics` op on any of its outputs. Returns: A `group` op depending on all `check_numerics` ops added. """ check_op = [] # This code relies on the ordering of ops in get_operations(). # The producer of a tensor always comes before that tensor's consumer in # this list. This is true because get_operations() returns ops in the order # added, and an op can only be added after its inputs are added. for op in ops.get_default_graph().get_operations(): for output in op.outputs: if output.dtype in [dtypes.float16, dtypes.float32, dtypes.float64]: message = op.name + ":" + str(output.value_index) with ops.control_dependencies(check_op): check_op = [array_ops.check_numerics(output, message=message)] return control_flow_ops.group(*check_op)
Example #2
Source File: numerics.py From keras-lambda with MIT License | 6 votes |
def add_check_numerics_ops(): """Connect a `check_numerics` to every floating point tensor. `check_numerics` operations themselves are added for each `half`, `float`, or `double` tensor in the graph. For all ops in the graph, the `check_numerics` op for all of its (`half`, `float`, or `double`) inputs is guaranteed to run before the `check_numerics` op on any of its outputs. Returns: A `group` op depending on all `check_numerics` ops added. """ check_op = [] # This code relies on the ordering of ops in get_operations(). # The producer of a tensor always comes before that tensor's consumer in # this list. This is true because get_operations() returns ops in the order # added, and an op can only be added after its inputs are added. for op in ops.get_default_graph().get_operations(): for output in op.outputs: if output.dtype in [dtypes.float16, dtypes.float32, dtypes.float64]: message = op.name + ":" + str(output.value_index) with ops.control_dependencies(check_op): check_op = [array_ops.check_numerics(output, message=message)] return control_flow_ops.group(*check_op)
Example #3
Source File: numerics.py From keras-lambda with MIT License | 6 votes |
def verify_tensor_all_finite(t, msg, name=None): """Assert that the tensor does not contain any NaN's or Inf's. Args: t: Tensor to check. msg: Message to log on failure. name: A name for this operation (optional). Returns: Same tensor as `t`. """ with ops.name_scope(name, "VerifyFinite", [t]) as name: t = ops.convert_to_tensor(t, name="t") with ops.colocate_with(t): verify_input = array_ops.check_numerics(t, message=msg) out = control_flow_ops.with_dependencies([verify_input], t) return out
Example #4
Source File: numerics.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def verify_tensor_all_finite(t, msg, name=None): """Assert that the tensor does not contain any NaN's or Inf's. Args: t: Tensor to check. msg: Message to log on failure. name: A name for this operation (optional). Returns: Same tensor as `t`. """ with ops.name_scope(name, "VerifyFinite", [t]) as name: t = ops.convert_to_tensor(t, name="t") with ops.colocate_with(t): verify_input = array_ops.check_numerics(t, message=msg) out = control_flow_ops.with_dependencies([verify_input], t) return out
Example #5
Source File: tpu_estimator.py From xlnet with Apache License 2.0 | 6 votes |
def _sync_variables_ops(ctx): """Create varriables synchronization ops. Gets the variables back from TPU nodes. This means the variables updated by TPU will now be *synced* to host memory. In BROADCAST mode, we skip this sync since the variables are ususally too big to transmit via RPC. Args: ctx: A `_InternalTPUContext` instance with mode. Returns: A list of sync ops. """ if not ctx.is_input_broadcast_with_iterators(): return [ array_ops.check_numerics(v.read_value(), 'Gradient for %s is NaN' % v.name).op for v in variables.trainable_variables() ] else: return [control_flow_ops.no_op()]
Example #6
Source File: tpu_estimator.py From embedding-as-service with MIT License | 6 votes |
def _sync_variables_ops(ctx): """Create varriables synchronization ops. Gets the variables back from TPU nodes. This means the variables updated by TPU will now be *synced* to host memory. In BROADCAST mode, we skip this sync since the variables are ususally too big to transmit via RPC. Args: ctx: A `_InternalTPUContext` instance with mode. Returns: A list of sync ops. """ if not ctx.is_input_broadcast_with_iterators(): return [ array_ops.check_numerics(v.read_value(), 'Gradient for %s is NaN' % v.name).op for v in variables.trainable_variables() ] else: return [control_flow_ops.no_op()]
Example #7
Source File: numerics.py From deep_image_model with Apache License 2.0 | 6 votes |
def verify_tensor_all_finite(t, msg, name=None): """Assert that the tensor does not contain any NaN's or Inf's. Args: t: Tensor to check. msg: Message to log on failure. name: A name for this operation (optional). Returns: Same tensor as `t`. """ with ops.name_scope(name, "VerifyFinite", [t]) as name: t = ops.convert_to_tensor(t, name="t") with ops.colocate_with(t): verify_input = array_ops.check_numerics(t, message=msg) out = control_flow_ops.with_dependencies([verify_input], t) return out
Example #8
Source File: tpu_estimator.py From Chinese-XLNet with Apache License 2.0 | 6 votes |
def _sync_variables_ops(ctx): """Create varriables synchronization ops. Gets the variables back from TPU nodes. This means the variables updated by TPU will now be *synced* to host memory. In BROADCAST mode, we skip this sync since the variables are ususally too big to transmit via RPC. Args: ctx: A `_InternalTPUContext` instance with mode. Returns: A list of sync ops. """ if not ctx.is_input_broadcast_with_iterators(): return [ array_ops.check_numerics(v.read_value(), 'Gradient for %s is NaN' % v.name).op for v in variables.trainable_variables() ] else: return [control_flow_ops.no_op()]
Example #9
Source File: numerics.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def add_check_numerics_ops(): """Connect a `check_numerics` to every floating point tensor. `check_numerics` operations themselves are added for each `half`, `float`, or `double` tensor in the graph. For all ops in the graph, the `check_numerics` op for all of its (`half`, `float`, or `double`) inputs is guaranteed to run before the `check_numerics` op on any of its outputs. Returns: A `group` op depending on all `check_numerics` ops added. """ check_op = [] # This code relies on the ordering of ops in get_operations(). # The producer of a tensor always comes before that tensor's consumer in # this list. This is true because get_operations() returns ops in the order # added, and an op can only be added after its inputs are added. for op in ops.get_default_graph().get_operations(): for output in op.outputs: if output.dtype in [dtypes.float16, dtypes.float32, dtypes.float64]: message = op.name + ":" + str(output.value_index) with ops.control_dependencies(check_op): check_op = [array_ops.check_numerics(output, message=message)] return control_flow_ops.group(*check_op)
Example #10
Source File: numerics.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def verify_tensor_all_finite(t, msg, name=None): """Assert that the tensor does not contain any NaN's or Inf's. Args: t: Tensor to check. msg: Message to log on failure. name: A name for this operation (optional). Returns: Same tensor as `t`. """ with ops.name_scope(name, "VerifyFinite", [t]) as name: t = ops.convert_to_tensor(t, name="t") with ops.colocate_with(t): verify_input = array_ops.check_numerics(t, message=msg) out = control_flow_ops.with_dependencies([verify_input], t) return out
Example #11
Source File: numerics.py From lambda-packs with MIT License | 6 votes |
def add_check_numerics_ops(): """Connect a `check_numerics` to every floating point tensor. `check_numerics` operations themselves are added for each `half`, `float`, or `double` tensor in the graph. For all ops in the graph, the `check_numerics` op for all of its (`half`, `float`, or `double`) inputs is guaranteed to run before the `check_numerics` op on any of its outputs. Returns: A `group` op depending on all `check_numerics` ops added. """ check_op = [] # This code relies on the ordering of ops in get_operations(). # The producer of a tensor always comes before that tensor's consumer in # this list. This is true because get_operations() returns ops in the order # added, and an op can only be added after its inputs are added. for op in ops.get_default_graph().get_operations(): for output in op.outputs: if output.dtype in [dtypes.float16, dtypes.float32, dtypes.float64]: message = op.name + ":" + str(output.value_index) with ops.control_dependencies(check_op): check_op = [array_ops.check_numerics(output, message=message)] return control_flow_ops.group(*check_op)
Example #12
Source File: numerics.py From lambda-packs with MIT License | 6 votes |
def verify_tensor_all_finite(t, msg, name=None): """Assert that the tensor does not contain any NaN's or Inf's. Args: t: Tensor to check. msg: Message to log on failure. name: A name for this operation (optional). Returns: Same tensor as `t`. """ with ops.name_scope(name, "VerifyFinite", [t]) as name: t = ops.convert_to_tensor(t, name="t") with ops.colocate_with(t): verify_input = array_ops.check_numerics(t, message=msg) out = control_flow_ops.with_dependencies([verify_input], t) return out
Example #13
Source File: array_grad.py From deep_image_model with Apache License 2.0 | 5 votes |
def _CheckNumericsGrad(_, grad): """Gradient for check_numerics op.""" return array_ops.check_numerics( grad, "Not a number (NaN) or infinity (Inf) values detected in gradient.")
Example #14
Source File: array_grad.py From lambda-packs with MIT License | 5 votes |
def _CheckNumericsGrad(_, grad): """Gradient for check_numerics op.""" return array_ops.check_numerics( grad, "Not a number (NaN) or infinity (Inf) values detected in gradient.")
Example #15
Source File: tpu_estimator.py From transformer-xl with Apache License 2.0 | 5 votes |
def _sync_variables_ops(): # Gets the variables back from TPU nodes. This means the variables updated # by TPU will now be *synced* to host memory. return [ array_ops.check_numerics(v.read_value(), 'Gradient for %s is NaN' % v.name).op for v in variables.trainable_variables() ]
Example #16
Source File: array_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _CheckNumericsGrad(_, grad): """Gradient for check_numerics op.""" return array_ops.check_numerics( grad, "Not a number (NaN) or infinity (Inf) values detected in gradient.")
Example #17
Source File: array_grad.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _CheckNumericsGrad(_, grad): """Gradient for check_numerics op.""" return array_ops.check_numerics( grad, "Not a number (NaN) or infinity (Inf) values detected in gradient.")
Example #18
Source File: numerics.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def add_check_numerics_ops(): """Connect a `check_numerics` to every floating point tensor. `check_numerics` operations themselves are added for each `half`, `float`, or `double` tensor in the graph. For all ops in the graph, the `check_numerics` op for all of its (`half`, `float`, or `double`) inputs is guaranteed to run before the `check_numerics` op on any of its outputs. Note: This API is not compatible with the use of @{tf.cond} or @{tf.while_loop}, and will raise a `ValueError` if you attempt to call it in such a graph. Returns: A `group` op depending on all `check_numerics` ops added. Raises: ValueError: If the graph contains any numeric operations in a control flow structure. """ check_op = [] # This code relies on the ordering of ops in get_operations(). # The producer of a tensor always comes before that tensor's consumer in # this list. This is true because get_operations() returns ops in the order # added, and an op can only be added after its inputs are added. for op in ops.get_default_graph().get_operations(): for output in op.outputs: if output.dtype in [dtypes.float16, dtypes.float32, dtypes.float64]: if op._get_control_flow_context() is not None: # pylint: disable=protected-access raise ValueError("`tf.add_check_numerics_ops() is not compatible " "with TensorFlow control flow operations such as " "`tf.cond()` or `tf.while_loop()`.") message = op.name + ":" + str(output.value_index) with ops.control_dependencies(check_op): check_op = [array_ops.check_numerics(output, message=message)] return control_flow_ops.group(*check_op)
Example #19
Source File: array_grad.py From keras-lambda with MIT License | 5 votes |
def _CheckNumericsGrad(_, grad): """Gradient for check_numerics op.""" return array_ops.check_numerics( grad, "Not a number (NaN) or infinity (Inf) values detected in gradient.")