Python tensorflow.python.ops.math_ops.reduce_any() Examples
The following are 18
code examples of tensorflow.python.ops.math_ops.reduce_any().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.math_ops
, or try the search function
.
Example #1
Source File: topn.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def insert(self, ids, scores): """Insert the ids and scores into the TopN.""" with ops.control_dependencies(self.last_ops): scatter_op = state_ops.scatter_update(self.id_to_score, ids, scores) larger_scores = math_ops.greater(scores, self.sl_scores[0]) def shortlist_insert(): larger_ids = array_ops.boolean_mask( math_ops.to_int64(ids), larger_scores) larger_score_values = array_ops.boolean_mask(scores, larger_scores) shortlist_ids, new_ids, new_scores = tensor_forest_ops.top_n_insert( self.sl_ids, self.sl_scores, larger_ids, larger_score_values) u1 = state_ops.scatter_update(self.sl_ids, shortlist_ids, new_ids) u2 = state_ops.scatter_update(self.sl_scores, shortlist_ids, new_scores) return control_flow_ops.group(u1, u2) # We only need to insert into the shortlist if there are any # scores larger than the threshold. cond_op = control_flow_ops.cond( math_ops.reduce_any(larger_scores), shortlist_insert, control_flow_ops.no_op) with ops.control_dependencies([cond_op]): self.last_ops = [scatter_op, cond_op]
Example #2
Source File: topn.py From keras-lambda with MIT License | 6 votes |
def insert(self, ids, scores): """Insert the ids and scores into the TopN.""" with ops.control_dependencies(self.last_ops): scatter_op = state_ops.scatter_update(self.id_to_score, ids, scores) larger_scores = math_ops.greater(scores, self.sl_scores[0]) def shortlist_insert(): larger_ids = array_ops.boolean_mask( math_ops.to_int64(ids), larger_scores) larger_score_values = array_ops.boolean_mask(scores, larger_scores) shortlist_ids, new_ids, new_scores = tensor_forest_ops.top_n_insert( self.sl_ids, self.sl_scores, larger_ids, larger_score_values) u1 = state_ops.scatter_update(self.sl_ids, shortlist_ids, new_ids) u2 = state_ops.scatter_update(self.sl_scores, shortlist_ids, new_scores) return control_flow_ops.group(u1, u2) # We only need to insert into the shortlist if there are any # scores larger than the threshold. cond_op = control_flow_ops.cond( math_ops.reduce_any(larger_scores), shortlist_insert, control_flow_ops.no_op) with ops.control_dependencies([cond_op]): self.last_ops = [scatter_op, cond_op]
Example #3
Source File: backend.py From lambda-packs with MIT License | 5 votes |
def any(x, axis=None, keepdims=False): """Bitwise reduction (logical OR). Arguments: x: Tensor or variable. axis: axis along which to perform the reduction. keepdims: whether the drop or broadcast the reduction axes. Returns: A uint8 tensor (0s and 1s). """ axis = _normalize_axis(axis, ndim(x)) x = math_ops.cast(x, dtypes_module.bool) return math_ops.reduce_any(x, reduction_indices=axis, keep_dims=keepdims)
Example #4
Source File: ops_test.py From keras-lambda with MIT License | 5 votes |
def test(self): result_lt = ops.reduce_any(self.bool_lt, {'channel'}) golden_lt = core.LabeledTensor( math_ops.reduce_any(self.bool_tensor, 1), [self.a0, self.a2, self.a3]) self.assertLabeledTensorsEqual(result_lt, golden_lt)
Example #5
Source File: ops_test.py From keras-lambda with MIT License | 5 votes |
def test_name(self): result_lt = ops.reduce_any(self.bool_lt, {'channel'}) self.assertIn('lt_reduce_any', result_lt.name)
Example #6
Source File: backend.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def any(x, axis=None, keepdims=False): """Bitwise reduction (logical OR). Arguments: x: Tensor or variable. axis: axis along which to perform the reduction. keepdims: whether the drop or broadcast the reduction axes. Returns: A uint8 tensor (0s and 1s). """ x = math_ops.cast(x, dtypes_module.bool) return math_ops.reduce_any(x, axis=axis, keep_dims=keepdims)
Example #7
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 5 votes |
def _test_reduce_any(data, keep_dims=None): """ One iteration of reduce_any """ return _test_reduce(math_ops.reduce_any, data, keep_dims)
Example #8
Source File: von_mises_fisher.py From s-vae-tf with MIT License | 5 votes |
def __while_loop(self, b, a, d, n, seed): def __cond(w, e, bool_mask, b, a, d): return math_ops.reduce_any(bool_mask) def __body(w_, e_, bool_mask, b, a, d): e = math_ops.cast(Beta((self.__mf - 1) / 2, (self.__mf - 1) / 2).sample( shape, seed=seed), dtype=self.dtype) u = random_ops.random_uniform(shape, dtype=self.dtype, seed=seed) w = (1 - (1 + b) * e) / (1 - (1 - b) * e) t = (2 * a * b) / (1 - (1 - b) * e) accept = gen_math_ops.greater(((self.__mf - 1) * math_ops.log(t) - t + d), math_ops.log(u)) reject = gen_math_ops.logical_not(accept) w_ = array_ops.where(gen_math_ops.logical_and(bool_mask, accept), w, w_) e_ = array_ops.where(gen_math_ops.logical_and(bool_mask, accept), e, e_) bool_mask = array_ops.where(gen_math_ops.logical_and(bool_mask, accept), reject, bool_mask) return w_, e_, bool_mask, b, a, d shape = array_ops.concat([[n], self.batch_shape_tensor()[:-1], [1]], 0) b, a, d = [gen_array_ops.tile(array_ops.expand_dims(e, axis=0), [n] + [1] * len(e.shape)) for e in (b, a, d)] w, e, bool_mask, b, a, d = control_flow_ops.while_loop(__cond, __body, [array_ops.zeros_like(b, dtype=self.dtype), array_ops.zeros_like(b, dtype=self.dtype), array_ops.ones_like(b, dtypes.bool), b, a, d]) return e, w
Example #9
Source File: metric_loss_ops.py From cluster-loss-tensorflow with BSD 2-Clause "Simplified" License | 5 votes |
def get_cluster_assignment(pairwise_distances, centroid_ids): """Assign data points to the neareset centroids. Tensorflow has numerical instability and doesn't always choose the data point with theoretically zero distance as it's nearest neighbor. Thus, for each centroid in centroid_ids, explicitly assign the centroid itself as the nearest centroid. This is done through the mask tensor and the constraint_vect tensor. Args: pairwise_distances: 2-D Tensor of pairwise distances. centroid_ids: 1-D Tensor of centroid indices. Returns: y_fixed: 1-D tensor of cluster assignment. """ predictions = math_ops.argmin( array_ops.gather(pairwise_distances, centroid_ids), dimension=0) batch_size = array_ops.shape(pairwise_distances)[0] # Deal with numerical instability mask = math_ops.reduce_any(array_ops.one_hot( centroid_ids, batch_size, True, False, axis=-1, dtype=dtypes.bool), axis=0) constraint_one_hot = math_ops.multiply( array_ops.one_hot(centroid_ids, batch_size, array_ops.constant(1, dtype=dtypes.int64), array_ops.constant(0, dtype=dtypes.int64), axis=0, dtype=dtypes.int64), math_ops.to_int64(math_ops.range(array_ops.shape(centroid_ids)[0]))) constraint_vect = math_ops.reduce_sum( array_ops.transpose(constraint_one_hot), axis=0) y_fixed = array_ops.where(mask, constraint_vect, predictions) return y_fixed
Example #10
Source File: metric_learning.py From tf-slim with Apache License 2.0 | 5 votes |
def get_cluster_assignment(pairwise_distances, centroid_ids): """Assign data points to the neareset centroids. Tensorflow has numerical instability and doesn't always choose the data point with theoretically zero distance as it's nearest neighbor. Thus, for each centroid in centroid_ids, explicitly assign the centroid itself as the nearest centroid. This is done through the mask tensor and the constraint_vect tensor. Args: pairwise_distances: 2-D Tensor of pairwise distances. centroid_ids: 1-D Tensor of centroid indices. Returns: y_fixed: 1-D tensor of cluster assignment. """ predictions = math_ops.argmin( array_ops.gather(pairwise_distances, centroid_ids), dimension=0) batch_size = array_ops.shape(pairwise_distances)[0] # Deal with numerical instability mask = math_ops.reduce_any(array_ops.one_hot( centroid_ids, batch_size, True, False, axis=-1, dtype=dtypes.bool), axis=0) constraint_one_hot = math_ops.multiply( array_ops.one_hot(centroid_ids, batch_size, array_ops.constant(1, dtype=dtypes.int64), array_ops.constant(0, dtype=dtypes.int64), axis=0, dtype=dtypes.int64), math_ops.cast(math_ops.range(array_ops.shape(centroid_ids)[0]), dtypes.int64)) constraint_vect = math_ops.reduce_sum( array_ops.transpose(constraint_one_hot), axis=0) y_fixed = array_ops.where(mask, constraint_vect, predictions) return y_fixed
Example #11
Source File: ops_test.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def test(self): result_lt = ops.reduce_any(self.bool_lt, {'channel'}) golden_lt = core.LabeledTensor( math_ops.reduce_any(self.bool_tensor, 1), [self.a0, self.a2, self.a3]) self.assertLabeledTensorsEqual(result_lt, golden_lt)
Example #12
Source File: ops_test.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def test_name(self): result_lt = ops.reduce_any(self.bool_lt, {'channel'}) self.assertIn('lt_reduce_any', result_lt.name)
Example #13
Source File: bijector.py From deep_image_model with Apache License 2.0 | 4 votes |
def _process_scale(self, scale, event_ndims): """Helper to __init__ which gets scale in batch-ready form. This function expands dimensions of `scale` according to the following table: event_ndims scale.ndims 0 1 0 [1]+S+[1,1] "silent error" 1 [ ]+S+[1,1] "silent error" 2 [ ]+S+[1,1] [1]+S+[ ] 3 [ ]+S+[1,1] [ ]+S+[ ] ... (same) (same) The idea is that we want to convert `scale` into something which can always work for, say, the left-hand argument of `batch_matmul`. Args: scale: `Tensor`. event_ndims: `Tensor` (0D, `int32`). Returns: scale: `Tensor` with dims expanded according to [above] table. batch_ndims: `Tensor` (0D, `int32`). The ndims of the `batch` portion. """ ndims = array_ops.rank(scale) left = math_ops.select( math_ops.reduce_any([ math_ops.reduce_all([ math_ops.equal(ndims, 0), math_ops.equal(event_ndims, 0) ]), math_ops.reduce_all([ math_ops.equal(ndims, 2), math_ops.equal(event_ndims, 1) ])]), 1, 0) right = math_ops.select(math_ops.equal(event_ndims, 0), 2, 0) pad = array_ops.concat(0, ( array_ops.ones([left], dtype=dtypes.int32), array_ops.shape(scale), array_ops.ones([right], dtype=dtypes.int32))) scale = array_ops.reshape(scale, pad) batch_ndims = ndims - 2 + right # For safety, explicitly zero-out the upper triangular part. scale = array_ops.matrix_band_part(scale, -1, 0) if self.validate_args: # matrix_band_part will fail if scale is not at least rank 2. shape = array_ops.shape(scale) assert_square = check_ops.assert_equal( shape[-2], shape[-1], message="Input must be a (batch of) square matrix.") # Assuming lower-triangular means we only need check diag != 0. diag = array_ops.matrix_diag_part(scale) is_non_singular = math_ops.logical_not( math_ops.reduce_any( math_ops.equal(diag, ops.convert_to_tensor(0, dtype=diag.dtype)))) assert_non_singular = control_flow_ops.Assert( is_non_singular, ["Singular matrix encountered", diag]) scale = control_flow_ops.with_dependencies( [assert_square, assert_non_singular], scale) return scale, batch_ndims
Example #14
Source File: kullback_leibler.py From deep_image_model with Apache License 2.0 | 4 votes |
def kl(dist_a, dist_b, allow_nan=False, name=None): """Get the KL-divergence KL(dist_a || dist_b). If there is no KL method registered specifically for `type(dist_a)` and `type(dist_b)`, then the class hierarchies of these types are searched. If one KL method is registered between any pairs of classes in these two parent hierarchies, it is used. If more than one such registered method exists, the method whose registered classes have the shortest sum MRO paths to the input types is used. If more than one such shortest path exists, the first method identified in the search is used (favoring a shorter MRO distance to `type(dist_a)`). Args: dist_a: The first distribution. dist_b: The second distribution. allow_nan: If `False` (default), a runtime error is raised if the KL returns NaN values for any batch entry of the given distributions. If `True`, the KL may return a NaN for the given entry. name: (optional) Name scope to use for created operations. Returns: A Tensor with the batchwise KL-divergence between dist_a and dist_b. Raises: NotImplementedError: If no KL method is defined for distribution types of dist_a and dist_b. """ kl_fn = _registered_kl(type(dist_a), type(dist_b)) if kl_fn is None: raise NotImplementedError( "No KL(dist_a || dist_b) registered for dist_a type %s and dist_b " "type %s" % ((type(dist_a).__name__, type(dist_b).__name__))) with ops.name_scope("KullbackLeibler"): kl_t = kl_fn(dist_a, dist_b, name=name) if allow_nan: return kl_t # Check KL for NaNs kl_t = array_ops.identity(kl_t, name="kl") with ops.control_dependencies([ control_flow_ops.Assert( math_ops.logical_not( math_ops.reduce_any(math_ops.is_nan(kl_t))), ["KL calculation between %s and %s returned NaN values " "(and was called with allow_nan=False). Values:" % (dist_a.name, dist_b.name), kl_t])]): return array_ops.identity(kl_t, name="checked_kl")
Example #15
Source File: kullback_leibler.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 4 votes |
def kl_divergence(distribution_a, distribution_b, allow_nan_stats=True, name=None): """Get the KL-divergence KL(distribution_a || distribution_b). If there is no KL method registered specifically for `type(distribution_a)` and `type(distribution_b)`, then the class hierarchies of these types are searched. If one KL method is registered between any pairs of classes in these two parent hierarchies, it is used. If more than one such registered method exists, the method whose registered classes have the shortest sum MRO paths to the input types is used. If more than one such shortest path exists, the first method identified in the search is used (favoring a shorter MRO distance to `type(distribution_a)`). Args: distribution_a: The first distribution. distribution_b: The second distribution. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Returns: A Tensor with the batchwise KL-divergence between `distribution_a` and `distribution_b`. Raises: NotImplementedError: If no KL method is defined for distribution types of `distribution_a` and `distribution_b`. """ kl_fn = _registered_kl(type(distribution_a), type(distribution_b)) if kl_fn is None: raise NotImplementedError( "No KL(distribution_a || distribution_b) registered for distribution_a " "type %s and distribution_b type %s" % (type(distribution_a).__name__, type(distribution_b).__name__)) with ops.name_scope("KullbackLeibler"): kl_t = kl_fn(distribution_a, distribution_b, name=name) if allow_nan_stats: return kl_t # Check KL for NaNs kl_t = array_ops.identity(kl_t, name="kl") with ops.control_dependencies([ control_flow_ops.Assert( math_ops.logical_not( math_ops.reduce_any(math_ops.is_nan(kl_t))), ["KL calculation between %s and %s returned NaN values " "(and was called with allow_nan_stats=False). Values:" % (distribution_a.name, distribution_b.name), kl_t])]): return array_ops.identity(kl_t, name="checked_kl")
Example #16
Source File: kullback_leibler.py From keras-lambda with MIT License | 4 votes |
def kl(dist_a, dist_b, allow_nan=False, name=None): """Get the KL-divergence KL(dist_a || dist_b). If there is no KL method registered specifically for `type(dist_a)` and `type(dist_b)`, then the class hierarchies of these types are searched. If one KL method is registered between any pairs of classes in these two parent hierarchies, it is used. If more than one such registered method exists, the method whose registered classes have the shortest sum MRO paths to the input types is used. If more than one such shortest path exists, the first method identified in the search is used (favoring a shorter MRO distance to `type(dist_a)`). Args: dist_a: The first distribution. dist_b: The second distribution. allow_nan: If `False` (default), a runtime error is raised if the KL returns NaN values for any batch entry of the given distributions. If `True`, the KL may return a NaN for the given entry. name: (optional) Name scope to use for created operations. Returns: A Tensor with the batchwise KL-divergence between dist_a and dist_b. Raises: NotImplementedError: If no KL method is defined for distribution types of dist_a and dist_b. """ kl_fn = _registered_kl(type(dist_a), type(dist_b)) if kl_fn is None: raise NotImplementedError( "No KL(dist_a || dist_b) registered for dist_a type %s and dist_b " "type %s" % ((type(dist_a).__name__, type(dist_b).__name__))) with ops.name_scope("KullbackLeibler"): kl_t = kl_fn(dist_a, dist_b, name=name) if allow_nan: return kl_t # Check KL for NaNs kl_t = array_ops.identity(kl_t, name="kl") with ops.control_dependencies([ control_flow_ops.Assert( math_ops.logical_not( math_ops.reduce_any(math_ops.is_nan(kl_t))), ["KL calculation between %s and %s returned NaN values " "(and was called with allow_nan=False). Values:" % (dist_a.name, dist_b.name), kl_t])]): return array_ops.identity(kl_t, name="checked_kl")
Example #17
Source File: kullback_leibler.py From auto-alt-text-lambda-api with MIT License | 4 votes |
def kl(dist_a, dist_b, allow_nan=False, name=None): """Get the KL-divergence KL(dist_a || dist_b). If there is no KL method registered specifically for `type(dist_a)` and `type(dist_b)`, then the class hierarchies of these types are searched. If one KL method is registered between any pairs of classes in these two parent hierarchies, it is used. If more than one such registered method exists, the method whose registered classes have the shortest sum MRO paths to the input types is used. If more than one such shortest path exists, the first method identified in the search is used (favoring a shorter MRO distance to `type(dist_a)`). Args: dist_a: The first distribution. dist_b: The second distribution. allow_nan: If `False` (default), a runtime error is raised if the KL returns NaN values for any batch entry of the given distributions. If `True`, the KL may return a NaN for the given entry. name: (optional) Name scope to use for created operations. Returns: A Tensor with the batchwise KL-divergence between dist_a and dist_b. Raises: NotImplementedError: If no KL method is defined for distribution types of dist_a and dist_b. """ kl_fn = _registered_kl(type(dist_a), type(dist_b)) if kl_fn is None: raise NotImplementedError( "No KL(dist_a || dist_b) registered for dist_a type %s and dist_b " "type %s" % ((type(dist_a).__name__, type(dist_b).__name__))) with ops.name_scope("KullbackLeibler"): kl_t = kl_fn(dist_a, dist_b, name=name) if allow_nan: return kl_t # Check KL for NaNs kl_t = array_ops.identity(kl_t, name="kl") with ops.control_dependencies([ control_flow_ops.Assert( math_ops.logical_not( math_ops.reduce_any(math_ops.is_nan(kl_t))), ["KL calculation between %s and %s returned NaN values " "(and was called with allow_nan=False). Values:" % (dist_a.name, dist_b.name), kl_t])]): return array_ops.identity(kl_t, name="checked_kl")
Example #18
Source File: kullback_leibler.py From lambda-packs with MIT License | 4 votes |
def kl_divergence(distribution_a, distribution_b, allow_nan_stats=True, name=None): """Get the KL-divergence KL(distribution_a || distribution_b). If there is no KL method registered specifically for `type(distribution_a)` and `type(distribution_b)`, then the class hierarchies of these types are searched. If one KL method is registered between any pairs of classes in these two parent hierarchies, it is used. If more than one such registered method exists, the method whose registered classes have the shortest sum MRO paths to the input types is used. If more than one such shortest path exists, the first method identified in the search is used (favoring a shorter MRO distance to `type(distribution_a)`). Args: distribution_a: The first distribution. distribution_b: The second distribution. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Returns: A Tensor with the batchwise KL-divergence between `distribution_a` and `distribution_b`. Raises: NotImplementedError: If no KL method is defined for distribution types of `distribution_a` and `distribution_b`. """ kl_fn = _registered_kl(type(distribution_a), type(distribution_b)) if kl_fn is None: raise NotImplementedError( "No KL(distribution_a || distribution_b) registered for distribution_a " "type %s and distribution_b type %s" % (type(distribution_a).__name__, type(distribution_b).__name__)) with ops.name_scope("KullbackLeibler"): kl_t = kl_fn(distribution_a, distribution_b, name=name) if allow_nan_stats: return kl_t # Check KL for NaNs kl_t = array_ops.identity(kl_t, name="kl") with ops.control_dependencies([ control_flow_ops.Assert( math_ops.logical_not( math_ops.reduce_any(math_ops.is_nan(kl_t))), ["KL calculation between %s and %s returned NaN values " "(and was called with allow_nan_stats=False). Values:" % (distribution_a.name, distribution_b.name), kl_t])]): return array_ops.identity(kl_t, name="checked_kl")