Python tensorflow.python.ops.check_ops.assert_integer() Examples
The following are 5
code examples of tensorflow.python.ops.check_ops.assert_integer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.check_ops
, or try the search function
.
Example #1
Source File: hyperspherical_uniform.py From s-vae-tf with MIT License | 5 votes |
def __init__(self, dim, dtype=dtypes.float32, validate_args=False, allow_nan_stats=True, name="HypersphericalUniform"): """Initialize a batch of Hyperspherical Uniform distributions. Args: dim: Integer tensor, dimensionality of the distribution(s). Must be `dim > 0`. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Raises: InvalidArgumentError: if `dim > 0` and `validate_args=False`. """ parameters = locals() with ops.name_scope(name, values=[dim]): with ops.control_dependencies([check_ops.assert_positive(dim), check_ops.assert_integer(dim), check_ops.assert_scalar(dim)] if validate_args else []): self._dim = dim super(HypersphericalUniform, self).__init__( dtype=dtype, reparameterization_type=distribution.FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[], name=name)
Example #2
Source File: sampling_ops.py From lambda-packs with MIT License | 4 votes |
def _verify_input(tensor_list, labels, probs_list): """Verify that batched inputs are well-formed.""" checked_probs_list = [] for probs in probs_list: # Since number of classes shouldn't change at runtime, probalities shape # should be fully defined. probs.get_shape().assert_is_fully_defined() # Probabilities must be 1D. probs.get_shape().assert_has_rank(1) # Probabilities must be nonnegative and sum to one. tol = 1e-6 prob_sum = math_ops.reduce_sum(probs) checked_probs = control_flow_ops.with_dependencies([ check_ops.assert_non_negative(probs), check_ops.assert_less(prob_sum, 1.0 + tol), check_ops.assert_less(1.0 - tol, prob_sum) ], probs) checked_probs_list.append(checked_probs) # All probabilities should be the same length. prob_length = checked_probs_list[0].get_shape().num_elements() for checked_prob in checked_probs_list: if checked_prob.get_shape().num_elements() != prob_length: raise ValueError('Probability parameters must have the same length.') # Labels tensor should only have batch dimension. labels.get_shape().assert_has_rank(1) for tensor in tensor_list: # Data tensor should have a batch dimension. tensor_shape = tensor.get_shape().with_rank_at_least(1) # Data and label batch dimensions must be compatible. tensor_shape[0].assert_is_compatible_with(labels.get_shape()[0]) # Data and labels must have the same, strictly positive batch size. Since we # can't assume we know the batch size at graph creation, add runtime checks. labels_batch_size = array_ops.shape(labels)[0] lbl_assert = check_ops.assert_positive(labels_batch_size) # Make each tensor depend on its own checks. labels = control_flow_ops.with_dependencies([lbl_assert], labels) tensor_list = [ control_flow_ops.with_dependencies([ lbl_assert, check_ops.assert_equal(array_ops.shape(x)[0], labels_batch_size) ], x) for x in tensor_list ] # Label's classes must be integers 0 <= x < num_classes. labels = control_flow_ops.with_dependencies([ check_ops.assert_integer(labels), check_ops.assert_non_negative(labels), check_ops.assert_less(labels, math_ops.cast(prob_length, labels.dtype)) ], labels) return tensor_list, labels, checked_probs_list
Example #3
Source File: sampling_ops.py From auto-alt-text-lambda-api with MIT License | 4 votes |
def _verify_input(tensor_list, labels, probs_list): """Verify that batched inputs are well-formed.""" checked_probs_list = [] for probs in probs_list: # Since number of classes shouldn't change at runtime, probalities shape # should be fully defined. probs.get_shape().assert_is_fully_defined() # Probabilities must be 1D. probs.get_shape().assert_has_rank(1) # Probabilities must be nonnegative and sum to one. tol = 1e-6 prob_sum = math_ops.reduce_sum(probs) checked_probs = control_flow_ops.with_dependencies([ check_ops.assert_non_negative(probs), check_ops.assert_less(prob_sum, 1.0 + tol), check_ops.assert_less(1.0 - tol, prob_sum) ], probs) checked_probs_list.append(checked_probs) # All probabilities should be the same length. prob_length = checked_probs_list[0].get_shape().num_elements() for checked_prob in checked_probs_list: if checked_prob.get_shape().num_elements() != prob_length: raise ValueError('Probability parameters must have the same length.') # Labels tensor should only have batch dimension. labels.get_shape().assert_has_rank(1) for tensor in tensor_list: # Data tensor should have a batch dimension. tensor_shape = tensor.get_shape().with_rank_at_least(1) # Data and label batch dimensions must be compatible. tensor_shape[0].assert_is_compatible_with(labels.get_shape()[0]) # Data and labels must have the same, strictly positive batch size. Since we # can't assume we know the batch size at graph creation, add runtime checks. labels_batch_size = array_ops.shape(labels)[0] lbl_assert = check_ops.assert_positive(labels_batch_size) # Make each tensor depend on its own checks. labels = control_flow_ops.with_dependencies([lbl_assert], labels) tensor_list = [ control_flow_ops.with_dependencies([ lbl_assert, check_ops.assert_equal(array_ops.shape(x)[0], labels_batch_size) ], x) for x in tensor_list ] # Label's classes must be integers 0 <= x < num_classes. labels = control_flow_ops.with_dependencies([ check_ops.assert_integer(labels), check_ops.assert_non_negative(labels), check_ops.assert_less(labels, math_ops.cast(prob_length, labels.dtype)) ], labels) return tensor_list, labels, checked_probs_list
Example #4
Source File: sampling_ops.py From deep_image_model with Apache License 2.0 | 4 votes |
def _verify_input(tensor_list, labels, probs_list): """Verify that batched inputs are well-formed.""" checked_probs_list = [] for probs in probs_list: # Since number of classes shouldn't change at runtime, probalities shape # should be fully defined. probs.get_shape().assert_is_fully_defined() # Probabilities must be 1D. probs.get_shape().assert_has_rank(1) # Probabilities must be nonnegative and sum to one. tol = 1e-6 prob_sum = math_ops.reduce_sum(probs) checked_probs = control_flow_ops.with_dependencies( [check_ops.assert_non_negative(probs), check_ops.assert_less(prob_sum, 1.0 + tol), check_ops.assert_less(1.0 - tol, prob_sum)], probs) checked_probs_list.append(checked_probs) # All probabilities should be the same length. prob_length = checked_probs_list[0].get_shape().num_elements() for checked_prob in checked_probs_list: if checked_prob.get_shape().num_elements() != prob_length: raise ValueError('Probability parameters must have the same length.') # Labels tensor should only have batch dimension. labels.get_shape().assert_has_rank(1) for tensor in tensor_list: # Data tensor should have a batch dimension. tensor_shape = tensor.get_shape().with_rank_at_least(1) # Data and label batch dimensions must be compatible. tensor_shape[0].assert_is_compatible_with(labels.get_shape()[0]) # Data and labels must have the same, strictly positive batch size. Since we # can't assume we know the batch size at graph creation, add runtime checks. labels_batch_size = array_ops.shape(labels)[0] lbl_assert = check_ops.assert_positive(labels_batch_size) # Make each tensor depend on its own checks. labels = control_flow_ops.with_dependencies([lbl_assert], labels) tensor_list = [control_flow_ops.with_dependencies( [lbl_assert, check_ops.assert_equal(array_ops.shape(x)[0], labels_batch_size)], x) for x in tensor_list] # Label's classes must be integers 0 <= x < num_classes. labels = control_flow_ops.with_dependencies( [check_ops.assert_integer(labels), check_ops.assert_non_negative(labels), check_ops.assert_less(labels, math_ops.cast(prob_length, labels.dtype))], labels) return tensor_list, labels, checked_probs_list
Example #5
Source File: sampling_ops.py From keras-lambda with MIT License | 4 votes |
def _verify_input(tensor_list, labels, probs_list): """Verify that batched inputs are well-formed.""" checked_probs_list = [] for probs in probs_list: # Since number of classes shouldn't change at runtime, probalities shape # should be fully defined. probs.get_shape().assert_is_fully_defined() # Probabilities must be 1D. probs.get_shape().assert_has_rank(1) # Probabilities must be nonnegative and sum to one. tol = 1e-6 prob_sum = math_ops.reduce_sum(probs) checked_probs = control_flow_ops.with_dependencies([ check_ops.assert_non_negative(probs), check_ops.assert_less(prob_sum, 1.0 + tol), check_ops.assert_less(1.0 - tol, prob_sum) ], probs) checked_probs_list.append(checked_probs) # All probabilities should be the same length. prob_length = checked_probs_list[0].get_shape().num_elements() for checked_prob in checked_probs_list: if checked_prob.get_shape().num_elements() != prob_length: raise ValueError('Probability parameters must have the same length.') # Labels tensor should only have batch dimension. labels.get_shape().assert_has_rank(1) for tensor in tensor_list: # Data tensor should have a batch dimension. tensor_shape = tensor.get_shape().with_rank_at_least(1) # Data and label batch dimensions must be compatible. tensor_shape[0].assert_is_compatible_with(labels.get_shape()[0]) # Data and labels must have the same, strictly positive batch size. Since we # can't assume we know the batch size at graph creation, add runtime checks. labels_batch_size = array_ops.shape(labels)[0] lbl_assert = check_ops.assert_positive(labels_batch_size) # Make each tensor depend on its own checks. labels = control_flow_ops.with_dependencies([lbl_assert], labels) tensor_list = [ control_flow_ops.with_dependencies([ lbl_assert, check_ops.assert_equal(array_ops.shape(x)[0], labels_batch_size) ], x) for x in tensor_list ] # Label's classes must be integers 0 <= x < num_classes. labels = control_flow_ops.with_dependencies([ check_ops.assert_integer(labels), check_ops.assert_non_negative(labels), check_ops.assert_less(labels, math_ops.cast(prob_length, labels.dtype)) ], labels) return tensor_list, labels, checked_probs_list