Python tensorflow.python.ops.math_ops.reduced_shape() Examples
The following are 26
code examples of tensorflow.python.ops.math_ops.reduced_shape().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.math_ops
, or try the search function
.
Example #1
Source File: math_grad.py From lambda-packs with MIT License | 6 votes |
def _SumGrad(op, grad): """Gradient for Sum.""" # Fast path for when reducing to a scalar and ndims is known: adds only # Reshape and Tile ops (and possibly a Shape). if (op.inputs[0].get_shape().ndims is not None and op.inputs[1].op.type == "Const"): rank = op.inputs[0].get_shape().ndims axes = tensor_util.MakeNdarray(op.inputs[1].op.get_attr("value")) if np.array_equal(axes, np.arange(rank)): # Reduce all dims. grad = array_ops.reshape(grad, [1] * rank) # If shape is not fully defined (but rank is), we use Shape. if op.inputs[0].get_shape().is_fully_defined(): input_shape = op.inputs[0].get_shape().as_list() else: input_shape = array_ops.shape(op.inputs[0]) return [array_ops.tile(grad, input_shape), None] input_shape = array_ops.shape(op.inputs[0]) output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1]) tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims) grad = array_ops.reshape(grad, output_shape_kept_dims) return [array_ops.tile(grad, tile_scaling), None]
Example #2
Source File: math_grad.py From lambda-packs with MIT License | 6 votes |
def _MinOrMaxGrad(op, grad): """Gradient for Min or Max. Amazingly it's precisely the same code.""" input_shape = array_ops.shape(op.inputs[0]) output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1]) y = op.outputs[0] y = array_ops.reshape(y, output_shape_kept_dims) grad = array_ops.reshape(grad, output_shape_kept_dims) # Compute the number of selected (maximum or minimum) elements in each # reduction dimension. If there are multiple minimum or maximum elements # then the gradient will be divided between them. indicators = math_ops.cast(math_ops.equal(y, op.inputs[0]), grad.dtype) num_selected = array_ops.reshape( math_ops.reduce_sum(indicators, op.inputs[1]), output_shape_kept_dims) return [math_ops.div(indicators, num_selected) * grad, None]
Example #3
Source File: math_grad.py From keras-lambda with MIT License | 6 votes |
def _MinOrMaxGrad(op, grad): """Gradient for Min or Max. Amazingly it's precisely the same code.""" input_shape = array_ops.shape(op.inputs[0]) output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1]) y = op.outputs[0] y = array_ops.reshape(y, output_shape_kept_dims) grad = array_ops.reshape(grad, output_shape_kept_dims) # Compute the number of selected (maximum or minimum) elements in each # reduction dimension. If there are multiple minimum or maximum elements # then the gradient will be divided between them. indicators = math_ops.cast(math_ops.equal(y, op.inputs[0]), grad.dtype) num_selected = array_ops.reshape( math_ops.reduce_sum(indicators, op.inputs[1]), output_shape_kept_dims) return [math_ops.div(indicators, num_selected) * grad, None]
Example #4
Source File: math_grad.py From keras-lambda with MIT License | 6 votes |
def _SumGrad(op, grad): """Gradient for Sum.""" # Fast path for when reducing to a scalar and ndims is known: adds only # Reshape and Tile ops (and possibly a Shape). if (op.inputs[0].get_shape().ndims is not None and op.inputs[1].op.type == "Const"): rank = op.inputs[0].get_shape().ndims axes = tensor_util.MakeNdarray(op.inputs[1].op.get_attr("value")) if np.array_equal(axes, np.arange(rank)): # Reduce all dims. grad = array_ops.reshape(grad, [1] * rank) # If shape is not fully defined (but rank is), we use Shape. if op.inputs[0].get_shape().is_fully_defined(): input_shape = op.inputs[0].get_shape().as_list() else: input_shape = array_ops.shape(op.inputs[0]) return [array_ops.tile(grad, input_shape), None] input_shape = array_ops.shape(op.inputs[0]) output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1]) tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims) grad = array_ops.reshape(grad, output_shape_kept_dims) return [array_ops.tile(grad, tile_scaling), None]
Example #5
Source File: math_grad.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _SumGrad(op, grad): """Gradient for Sum.""" # Fast path for when reducing to a scalar and ndims is known: adds only # Reshape and Tile ops (and possibly a Shape). if (op.inputs[0].get_shape().ndims is not None and op.inputs[1].op.type == "Const"): rank = op.inputs[0].get_shape().ndims axes = tensor_util.MakeNdarray(op.inputs[1].op.get_attr("value")) if np.array_equal(axes, np.arange(rank)): # Reduce all dims. grad = array_ops.reshape(grad, [1] * rank) # If shape is not fully defined (but rank is), we use Shape. if op.inputs[0].get_shape().is_fully_defined(): input_shape = op.inputs[0].get_shape().as_list() else: input_shape = array_ops.shape(op.inputs[0]) return [array_ops.tile(grad, input_shape), None] input_shape = array_ops.shape(op.inputs[0]) output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1]) tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims) grad = array_ops.reshape(grad, output_shape_kept_dims) return [array_ops.tile(grad, tile_scaling), None]
Example #6
Source File: math_grad.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _MinOrMaxGrad(op, grad): """Gradient for Min or Max. Amazingly it's precisely the same code.""" input_shape = array_ops.shape(op.inputs[0]) output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1]) y = op.outputs[0] y = array_ops.reshape(y, output_shape_kept_dims) grad = array_ops.reshape(grad, output_shape_kept_dims) # Compute the number of selected (maximum or minimum) elements in each # reduction dimension. If there are multiple minimum or maximum elements # then the gradient will be divided between them. indicators = math_ops.cast(math_ops.equal(y, op.inputs[0]), grad.dtype) num_selected = array_ops.reshape( math_ops.reduce_sum(indicators, op.inputs[1]), output_shape_kept_dims) return [math_ops.div(indicators, num_selected) * grad, None]
Example #7
Source File: math_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _MinOrMaxGrad(op, grad): """Gradient for Min or Max. Amazingly it's precisely the same code.""" input_shape = array_ops.shape(op.inputs[0]) output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1]) y = op.outputs[0] y = array_ops.reshape(y, output_shape_kept_dims) grad = array_ops.reshape(grad, output_shape_kept_dims) # Compute the number of selected (maximum or minimum) elements in each # reduction dimension. If there are multiple minimum or maximum elements # then the gradient will be divided between them. indicators = math_ops.cast(math_ops.equal(y, op.inputs[0]), grad.dtype) num_selected = array_ops.reshape( math_ops.reduce_sum(indicators, op.inputs[1]), output_shape_kept_dims) return [math_ops.div(indicators, num_selected) * grad, None]
Example #8
Source File: math_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _SumGrad(op, grad): """Gradient for Sum.""" # Fast path for when reducing to a scalar and ndims is known: adds only # Reshape and Tile ops (and possibly a Shape). if op.inputs[0].get_shape().ndims is not None: axes = tensor_util.constant_value(op.inputs[1]) if axes is not None: rank = op.inputs[0].get_shape().ndims if np.array_equal(axes, np.arange(rank)): # Reduce all dims. grad = array_ops.reshape(grad, [1] * rank) # If shape is not fully defined (but rank is), we use Shape. if op.inputs[0].get_shape().is_fully_defined(): input_shape = op.inputs[0].get_shape().as_list() else: input_shape = array_ops.shape(op.inputs[0]) return [array_ops.tile(grad, input_shape), None] input_shape = array_ops.shape(op.inputs[0]) # TODO(apassos) remove this once device placement for eager ops makes more # sense. with ops.colocate_with(input_shape): output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1]) tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims) grad = array_ops.reshape(grad, output_shape_kept_dims) return [array_ops.tile(grad, tile_scaling), None]
Example #9
Source File: math_grad.py From deep_image_model with Apache License 2.0 | 6 votes |
def _SumGrad(op, grad): """Gradient for Sum.""" # Fast path for when reducing to a scalar and ndims is known: adds only # Reshape and Tile ops (and possibly a Shape). if (op.inputs[0].get_shape().ndims is not None and op.inputs[1].op.type == "Const"): rank = op.inputs[0].get_shape().ndims axes = tensor_util.MakeNdarray(op.inputs[1].op.get_attr("value")) if np.array_equal(axes, np.arange(rank)): # Reduce all dims. grad = array_ops.reshape(grad, [1] * rank) # If shape is not fully defined (but rank is), we use Shape. if op.inputs[0].get_shape().is_fully_defined(): input_shape = op.inputs[0].get_shape().as_list() else: input_shape = array_ops.shape(op.inputs[0]) return [array_ops.tile(grad, input_shape), None] input_shape = array_ops.shape(op.inputs[0]) output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1]) tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims) grad = array_ops.reshape(grad, output_shape_kept_dims) return [array_ops.tile(grad, tile_scaling), None]
Example #10
Source File: math_grad.py From deep_image_model with Apache License 2.0 | 6 votes |
def _MinOrMaxGrad(op, grad): """Gradient for Min or Max. Amazingly it's precisely the same code.""" input_shape = array_ops.shape(op.inputs[0]) output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1]) y = op.outputs[0] y = array_ops.reshape(y, output_shape_kept_dims) grad = array_ops.reshape(grad, output_shape_kept_dims) # Compute the number of selected (maximum or minimum) elements in each # reduction dimension. If there are multiple minimum or maximum elements # then the gradient will be divided between them. indicators = math_ops.cast(math_ops.equal(y, op.inputs[0]), grad.dtype) num_selected = array_ops.reshape( math_ops.reduce_sum(indicators, op.inputs[1]), output_shape_kept_dims) return [math_ops.div(indicators, num_selected) * grad, None]
Example #11
Source File: math_grad.py From keras-lambda with MIT License | 5 votes |
def _ProdGrad(op, grad): """Gradient for Prod.""" # The gradient can be expressed by dividing the product by each entry of the # input tensor, but this approach can't deal with zeros in the input. # Here, we avoid this problem by composing the output as a product of two # cumprod operations. input_shape = array_ops.shape(op.inputs[0]) # Reshape reduction indices for the case where the parameter is a scalar reduction_indices = array_ops.reshape(op.inputs[1], [-1]) # Expand grad to full input shape output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1]) tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims) grad = array_ops.reshape(grad, output_shape_kept_dims) grad = array_ops.tile(grad, tile_scaling) # Pack all reduced dimensions into a single one, so we can perform the # cumprod ops. If the reduction dims list is empty, it defaults to float32, # so we need to cast here. We put all the shape-related ops on CPU to avoid # copying back and forth, and since listdiff is CPU only. with ops.device("/cpu:0"): reduced = math_ops.cast(reduction_indices, dtypes.int32) idx = math_ops.range(0, array_ops.rank(op.inputs[0])) other, _ = array_ops.setdiff1d(idx, reduced) perm = array_ops.concat([reduced, other], 0) reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced)) other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other)) permuted = array_ops.transpose(op.inputs[0], perm) permuted_shape = array_ops.shape(permuted) reshaped = array_ops.reshape(permuted, (reduced_num, other_num)) # Calculate product, leaving out the current entry left = math_ops.cumprod(reshaped, axis=0, exclusive=True) right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True) y = array_ops.reshape(left * right, permuted_shape) # Invert the transpose and reshape operations. # Make sure to set the statically known shape information through a reshape. out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm)) return array_ops.reshape(out, input_shape), None
Example #12
Source File: metrics_impl.py From lambda-packs with MIT License | 5 votes |
def _select_class_id(ids, selected_id): """Filter all but `selected_id` out of `ids`. Args: ids: `int64` `Tensor` or `SparseTensor` of IDs. selected_id: Int id to select. Returns: `SparseTensor` of same dimensions as `ids`. This contains only the entries equal to `selected_id`. """ ids = sparse_tensor.convert_to_tensor_or_sparse_tensor(ids) if isinstance(ids, sparse_tensor.SparseTensor): return sparse_ops.sparse_retain( ids, math_ops.equal(ids.values, selected_id)) # TODO(ptucker): Make this more efficient, maybe add a sparse version of # tf.equal and tf.reduce_any? # Shape of filled IDs is the same as `ids` with the last dim collapsed to 1. ids_shape = array_ops.shape(ids, out_type=dtypes.int64) ids_last_dim = array_ops.size(ids_shape) - 1 filled_selected_id_shape = math_ops.reduced_shape( ids_shape, array_ops.reshape(ids_last_dim, [1])) # Intersect `ids` with the selected ID. filled_selected_id = array_ops.fill( filled_selected_id_shape, math_ops.to_int64(selected_id)) result = sets.set_intersection(filled_selected_id, ids) return sparse_tensor.SparseTensor( indices=result.indices, values=result.values, dense_shape=ids_shape)
Example #13
Source File: sparse_grad.py From keras-lambda with MIT License | 5 votes |
def _SparseReduceSumGrad(op, out_grad): """Similar to gradient for the Sum Op (i.e. tf.reduce_sum()).""" sp_indices = op.inputs[0] sp_shape = op.inputs[2] output_shape_kept_dims = math_ops.reduced_shape(sp_shape, op.inputs[3]) out_grad_reshaped = array_ops.reshape(out_grad, output_shape_kept_dims) scale = sp_shape // math_ops.to_int64(output_shape_kept_dims) # (sparse_indices, sparse_values, sparse_shape, reduction_axes) return (None, array_ops.gather_nd(out_grad_reshaped, sp_indices // scale), None, None)
Example #14
Source File: metrics_impl.py From keras-lambda with MIT License | 5 votes |
def _select_class_id(ids, selected_id): """Filter all but `selected_id` out of `ids`. Args: ids: `int64` `Tensor` or `SparseTensor` of IDs. selected_id: Int id to select. Returns: `SparseTensor` of same dimensions as `ids`. This contains only the entries equal to `selected_id`. """ ids = sparse_tensor.convert_to_tensor_or_sparse_tensor(ids) if isinstance(ids, sparse_tensor.SparseTensor): return sparse_ops.sparse_retain( ids, math_ops.equal(ids.values, selected_id)) # TODO(ptucker): Make this more efficient, maybe add a sparse version of # tf.equal and tf.reduce_any? # Shape of filled IDs is the same as `ids` with the last dim collapsed to 1. ids_shape = array_ops.shape(ids, out_type=dtypes.int64) ids_last_dim = array_ops.size(ids_shape) - 1 filled_selected_id_shape = math_ops.reduced_shape( ids_shape, array_ops.reshape(ids_last_dim, [1])) # Intersect `ids` with the selected ID. filled_selected_id = array_ops.fill( filled_selected_id_shape, math_ops.to_int64(selected_id)) result = sets.set_intersection(filled_selected_id, ids) return sparse_tensor.SparseTensor( indices=result.indices, values=result.values, dense_shape=ids_shape)
Example #15
Source File: sparse_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _SparseReduceSumGrad(op, out_grad): """Similar to gradient for the Sum Op (i.e. tf.reduce_sum()).""" sp_indices = op.inputs[0] sp_shape = op.inputs[2] output_shape_kept_dims = math_ops.reduced_shape(sp_shape, op.inputs[3]) out_grad_reshaped = array_ops.reshape(out_grad, output_shape_kept_dims) scale = sp_shape // math_ops.to_int64(output_shape_kept_dims) # (sparse_indices, sparse_values, sparse_shape, reduction_axes) return (None, array_ops.gather_nd(out_grad_reshaped, sp_indices // scale), None, None)
Example #16
Source File: metrics_impl.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _select_class_id(ids, selected_id): """Filter all but `selected_id` out of `ids`. Args: ids: `int64` `Tensor` or `SparseTensor` of IDs. selected_id: Int id to select. Returns: `SparseTensor` of same dimensions as `ids`. This contains only the entries equal to `selected_id`. """ ids = sparse_tensor.convert_to_tensor_or_sparse_tensor(ids) if isinstance(ids, sparse_tensor.SparseTensor): return sparse_ops.sparse_retain( ids, math_ops.equal(ids.values, selected_id)) # TODO(ptucker): Make this more efficient, maybe add a sparse version of # tf.equal and tf.reduce_any? # Shape of filled IDs is the same as `ids` with the last dim collapsed to 1. ids_shape = array_ops.shape(ids, out_type=dtypes.int64) ids_last_dim = array_ops.size(ids_shape) - 1 filled_selected_id_shape = math_ops.reduced_shape( ids_shape, array_ops.reshape(ids_last_dim, [1])) # Intersect `ids` with the selected ID. filled_selected_id = array_ops.fill( filled_selected_id_shape, math_ops.to_int64(selected_id)) result = sets.set_intersection(filled_selected_id, ids) return sparse_tensor.SparseTensor( indices=result.indices, values=result.values, dense_shape=ids_shape)
Example #17
Source File: metric_ops.py From deep_image_model with Apache License 2.0 | 5 votes |
def _select_class_id(ids, selected_id): """Filter all but `selected_id` out of `ids`. Args: ids: `int64` `Tensor` or `SparseTensor` of IDs. selected_id: Int id to select. Returns: `SparseTensor` of same dimensions as `ids`. This contains only the entries equal to `selected_id`. """ if isinstance( ids, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)): return sparse_ops.sparse_retain( ids, math_ops.equal(ids.values, selected_id)) # TODO(ptucker): Make this more efficient, maybe add a sparse version of # tf.equal and tf.reduce_any? # Shape of filled IDs is the same as `ids` with the last dim collapsed to 1. ids_shape = array_ops.shape(ids, out_type=dtypes.int64) ids_last_dim = array_ops.size(ids_shape) - 1 filled_selected_id_shape = math_ops.reduced_shape( ids_shape, array_ops.reshape(ids_last_dim, [1])) # Intersect `ids` with the selected ID. filled_selected_id = array_ops.fill( filled_selected_id_shape, math_ops.to_int64(selected_id)) result = set_ops.set_intersection(filled_selected_id, ids) return sparse_tensor.SparseTensor( indices=result.indices, values=result.values, shape=ids_shape)
Example #18
Source File: sparse_grad.py From deep_image_model with Apache License 2.0 | 5 votes |
def _SparseReduceSumGrad(op, out_grad): """Similar to gradient for the Sum Op (i.e. tf.reduce_sum()).""" sp_indices = op.inputs[0] sp_shape = op.inputs[2] output_shape_kept_dims = math_ops.reduced_shape(sp_shape, op.inputs[3]) out_grad_reshaped = array_ops.reshape(out_grad, output_shape_kept_dims) scale = sp_shape // math_ops.to_int64(output_shape_kept_dims) # (sparse_indices, sparse_values, sparse_shape, reduction_axes) return (None, array_ops.gather_nd(out_grad_reshaped, sp_indices // scale), None, None)
Example #19
Source File: reduction_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testZeros(self): """Check that reduced_shape does the right thing with zero dimensions.""" with self.test_session(): self._check([0], [], [0]) self._check([0], [0], [1]) self._check([0, 3], [], [0, 3]) self._check([0, 3], [0], [1, 3]) self._check([0, 3], [1], [0, 1]) self._check([0, 3], [0, 1], [1, 1]) self._check([3, 0], [], [3, 0]) self._check([3, 0], [0], [1, 0]) self._check([3, 0], [1], [3, 1]) self._check([3, 0], [0, 1], [1, 1])
Example #20
Source File: reduction_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def _check(self, shape, axes, result): output = math_ops.reduced_shape(shape, axes=axes) self.assertAllEqual(output.eval(), result)
Example #21
Source File: math_grad.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _ProdGrad(op, grad): """Gradient for Prod.""" # The gradient can be expressed by dividing the product by each entry of the # input tensor, but this approach can't deal with zeros in the input. # Here, we avoid this problem by composing the output as a product of two # cumprod operations. input_shape = array_ops.shape(op.inputs[0]) # Reshape reduction indices for the case where the parameter is a scalar reduction_indices = array_ops.reshape(op.inputs[1], [-1]) # Expand grad to full input shape output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1]) tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims) grad = array_ops.reshape(grad, output_shape_kept_dims) grad = array_ops.tile(grad, tile_scaling) # Pack all reduced dimensions into a single one, so we can perform the # cumprod ops. If the reduction dims list is empty, it defaults to float32, # so we need to cast here. We put all the shape-related ops on CPU to avoid # copying back and forth, and since listdiff is CPU only. with ops.device("/cpu:0"): reduced = math_ops.cast(reduction_indices, dtypes.int32) idx = math_ops.range(0, array_ops.rank(op.inputs[0])) other, _ = array_ops.setdiff1d(idx, reduced) perm = array_ops.concat([reduced, other], 0) reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced)) other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other)) permuted = array_ops.transpose(op.inputs[0], perm) permuted_shape = array_ops.shape(permuted) reshaped = array_ops.reshape(permuted, (reduced_num, other_num)) # Calculate product, leaving out the current entry left = math_ops.cumprod(reshaped, axis=0, exclusive=True) right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True) y = array_ops.reshape(left * right, permuted_shape) # Invert the transpose and reshape operations. # Make sure to set the statically known shape information through a reshape. out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm)) return array_ops.reshape(out, input_shape), None
Example #22
Source File: sparse_grad.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _SparseReduceSumGrad(op, out_grad): """Similar to gradient for the Sum Op (i.e. tf.reduce_sum()).""" sp_indices = op.inputs[0] sp_shape = op.inputs[2] output_shape_kept_dims = math_ops.reduced_shape(sp_shape, op.inputs[3]) out_grad_reshaped = array_ops.reshape(out_grad, output_shape_kept_dims) scale = sp_shape // math_ops.to_int64(output_shape_kept_dims) # (sparse_indices, sparse_values, sparse_shape, reduction_axes) return (None, array_ops.gather_nd(out_grad_reshaped, sp_indices // scale), None, None)
Example #23
Source File: metrics_impl.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _select_class_id(ids, selected_id): """Filter all but `selected_id` out of `ids`. Args: ids: `int64` `Tensor` or `SparseTensor` of IDs. selected_id: Int id to select. Returns: `SparseTensor` of same dimensions as `ids`. This contains only the entries equal to `selected_id`. """ ids = sparse_tensor.convert_to_tensor_or_sparse_tensor(ids) if isinstance(ids, sparse_tensor.SparseTensor): return sparse_ops.sparse_retain( ids, math_ops.equal(ids.values, selected_id)) # TODO(ptucker): Make this more efficient, maybe add a sparse version of # tf.equal and tf.reduce_any? # Shape of filled IDs is the same as `ids` with the last dim collapsed to 1. ids_shape = array_ops.shape(ids, out_type=dtypes.int64) ids_last_dim = array_ops.size(ids_shape) - 1 filled_selected_id_shape = math_ops.reduced_shape( ids_shape, array_ops.reshape(ids_last_dim, [1])) # Intersect `ids` with the selected ID. filled_selected_id = array_ops.fill( filled_selected_id_shape, math_ops.to_int64(selected_id)) result = sets.set_intersection(filled_selected_id, ids) return sparse_tensor.SparseTensor( indices=result.indices, values=result.values, dense_shape=ids_shape)
Example #24
Source File: math_grad.py From lambda-packs with MIT License | 5 votes |
def _ProdGrad(op, grad): """Gradient for Prod.""" # The gradient can be expressed by dividing the product by each entry of the # input tensor, but this approach can't deal with zeros in the input. # Here, we avoid this problem by composing the output as a product of two # cumprod operations. input_shape = array_ops.shape(op.inputs[0]) # Reshape reduction indices for the case where the parameter is a scalar reduction_indices = array_ops.reshape(op.inputs[1], [-1]) # Expand grad to full input shape output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1]) tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims) grad = array_ops.reshape(grad, output_shape_kept_dims) grad = array_ops.tile(grad, tile_scaling) # Pack all reduced dimensions into a single one, so we can perform the # cumprod ops. If the reduction dims list is empty, it defaults to float32, # so we need to cast here. We put all the shape-related ops on CPU to avoid # copying back and forth, and since listdiff is CPU only. with ops.device("/cpu:0"): reduced = math_ops.cast(reduction_indices, dtypes.int32) idx = math_ops.range(0, array_ops.rank(op.inputs[0])) other, _ = array_ops.setdiff1d(idx, reduced) perm = array_ops.concat([reduced, other], 0) reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced)) other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other)) permuted = array_ops.transpose(op.inputs[0], perm) permuted_shape = array_ops.shape(permuted) reshaped = array_ops.reshape(permuted, (reduced_num, other_num)) # Calculate product, leaving out the current entry left = math_ops.cumprod(reshaped, axis=0, exclusive=True) right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True) y = array_ops.reshape(left * right, permuted_shape) # Invert the transpose and reshape operations. # Make sure to set the statically known shape information through a reshape. out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm)) return array_ops.reshape(out, input_shape), None
Example #25
Source File: sparse_grad.py From lambda-packs with MIT License | 5 votes |
def _SparseReduceSumGrad(op, out_grad): """Similar to gradient for the Sum Op (i.e. tf.reduce_sum()).""" sp_indices = op.inputs[0] sp_shape = op.inputs[2] output_shape_kept_dims = math_ops.reduced_shape(sp_shape, op.inputs[3]) out_grad_reshaped = array_ops.reshape(out_grad, output_shape_kept_dims) scale = sp_shape // math_ops.to_int64(output_shape_kept_dims) # (sparse_indices, sparse_values, sparse_shape, reduction_axes) return (None, array_ops.gather_nd(out_grad_reshaped, sp_indices // scale), None, None)
Example #26
Source File: math_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 4 votes |
def _ProdGrad(op, grad): """Gradient for Prod.""" # The gradient can be expressed by dividing the product by each entry of the # input tensor, but this approach can't deal with zeros in the input. # Here, we avoid this problem by composing the output as a product of two # cumprod operations. input_shape = array_ops.shape(op.inputs[0]) # Reshape reduction indices for the case where the parameter is a scalar reduction_indices = array_ops.reshape(op.inputs[1], [-1]) # Expand grad to full input shape output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1]) tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims) grad = array_ops.reshape(grad, output_shape_kept_dims) grad = array_ops.tile(grad, tile_scaling) # Pack all reduced dimensions into a single one, so we can perform the # cumprod ops. If the reduction dims list is empty, it defaults to float32, # so we need to cast here. We put all the shape-related ops on CPU to avoid # copying back and forth, and since listdiff is CPU only. with ops.device("/cpu:0"): rank = array_ops.rank(op.inputs[0]) reduction_indices = (reduction_indices + rank) % rank reduced = math_ops.cast(reduction_indices, dtypes.int32) idx = math_ops.range(0, rank) other, _ = array_ops.setdiff1d(idx, reduced) perm = array_ops.concat([reduced, other], 0) reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced)) other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other)) permuted = array_ops.transpose(op.inputs[0], perm) permuted_shape = array_ops.shape(permuted) reshaped = array_ops.reshape(permuted, (reduced_num, other_num)) # Calculate product, leaving out the current entry left = math_ops.cumprod(reshaped, axis=0, exclusive=True) right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True) y = array_ops.reshape(left * right, permuted_shape) # Invert the transpose and reshape operations. # Make sure to set the statically known shape information through a reshape. out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm)) return array_ops.reshape(out, input_shape), None