Python tensorflow.python.ops.math_ops.not_equal() Examples
The following are 30
code examples of tensorflow.python.ops.math_ops.not_equal().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.math_ops
, or try the search function
.
Example #1
Source File: math_grad.py From lambda-packs with MIT License | 6 votes |
def _PowGrad(op, grad): """Returns grad * (y*x^(y-1), z*log(x)).""" x = op.inputs[0] y = op.inputs[1] z = op.outputs[0] sx = array_ops.shape(x) sy = array_ops.shape(y) rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy) x = math_ops.conj(x) y = math_ops.conj(y) z = math_ops.conj(z) gx = array_ops.reshape( math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx) # Avoid false singularity at x = 0 if x.dtype.is_complex: # real(x) < 0 is fine for the complex case log_x = array_ops.where( math_ops.not_equal(x, 0), math_ops.log(x), array_ops.zeros_like(x)) else: # There's no sensible real value to return if x < 0, so return 0 log_x = array_ops.where(x > 0, math_ops.log(x), array_ops.zeros_like(x)) gy = array_ops.reshape(math_ops.reduce_sum(grad * z * log_x, ry), sy) return gx, gy
Example #2
Source File: layers.py From tensornets with MIT License | 6 votes |
def dense_to_sparse(tensor, eos_token=0, outputs_collections=None, scope=None): """Converts a dense tensor into a sparse tensor. An example use would be to convert dense labels to sparse ones so that they can be fed to the ctc_loss. Args: tensor: An `int` `Tensor` to be converted to a `Sparse`. eos_token: An integer. It is part of the target label that signifies the end of a sentence. outputs_collections: Collection to add the outputs. scope: Optional scope for name_scope. """ with variable_scope.variable_scope(scope, 'dense_to_sparse', [tensor]) as sc: tensor = ops.convert_to_tensor(tensor) indices = array_ops.where( math_ops.not_equal(tensor, constant_op.constant(eos_token, tensor.dtype))) values = array_ops.gather_nd(tensor, indices) shape = array_ops.shape(tensor, out_type=dtypes.int64) outputs = sparse_tensor.SparseTensor(indices, values, shape) return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
Example #3
Source File: math_grad.py From keras-lambda with MIT License | 6 votes |
def _PowGrad(op, grad): """Returns grad * (y*x^(y-1), z*log(x)).""" x = op.inputs[0] y = op.inputs[1] z = op.outputs[0] sx = array_ops.shape(x) sy = array_ops.shape(y) rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy) x = math_ops.conj(x) y = math_ops.conj(y) z = math_ops.conj(z) gx = array_ops.reshape( math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx) # Avoid false singularity at x = 0 if x.dtype.is_complex: # real(x) < 0 is fine for the complex case log_x = array_ops.where( math_ops.not_equal(x, 0), math_ops.log(x), array_ops.zeros_like(x)) else: # There's no sensible real value to return if x < 0, so return 0 log_x = array_ops.where(x > 0, math_ops.log(x), array_ops.zeros_like(x)) gy = array_ops.reshape(math_ops.reduce_sum(grad * z * log_x, ry), sy) return gx, gy
Example #4
Source File: math_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _PowGrad(op, grad): """Returns grad * (y*x^(y-1), z*log(x)).""" x = op.inputs[0] y = op.inputs[1] z = op.outputs[0] sx = array_ops.shape(x) sy = array_ops.shape(y) rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy) x = math_ops.conj(x) y = math_ops.conj(y) z = math_ops.conj(z) gx = array_ops.reshape( math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx) # Avoid false singularity at x = 0 if x.dtype.is_complex: # real(x) < 0 is fine for the complex case log_x = array_ops.where( math_ops.not_equal(x, 0), math_ops.log(x), array_ops.zeros_like(x)) else: # There's no sensible real value to return if x < 0, so return 0 log_x = array_ops.where(x > 0, math_ops.log(x), array_ops.zeros_like(x)) gy = array_ops.reshape(math_ops.reduce_sum(grad * z * log_x, ry), sy) return gx, gy
Example #5
Source File: math_grad.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _PowGrad(op, grad): """Returns grad * (y*x^(y-1), z*log(x)).""" x = op.inputs[0] y = op.inputs[1] z = op.outputs[0] sx = array_ops.shape(x) sy = array_ops.shape(y) rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy) x = math_ops.conj(x) y = math_ops.conj(y) z = math_ops.conj(z) gx = array_ops.reshape( math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx) # Avoid false singularity at x = 0 if x.dtype.is_complex: # real(x) < 0 is fine for the complex case log_x = array_ops.where( math_ops.not_equal(x, 0), math_ops.log(x), array_ops.zeros_like(x)) else: # There's no sensible real value to return if x < 0, so return 0 log_x = array_ops.where(x > 0, math_ops.log(x), array_ops.zeros_like(x)) gy = array_ops.reshape(math_ops.reduce_sum(grad * z * log_x, ry), sy) return gx, gy
Example #6
Source File: layers.py From tf-slim with Apache License 2.0 | 6 votes |
def dense_to_sparse(tensor, eos_token=0, outputs_collections=None, scope=None): """Converts a dense tensor into a sparse tensor. An example use would be to convert dense labels to sparse ones so that they can be fed to the ctc_loss. Args: tensor: An `int` `Tensor` to be converted to a `Sparse`. eos_token: An integer. It is part of the target label that signifies the end of a sentence. outputs_collections: Collection to add the outputs. scope: Optional scope for name_scope. """ with variable_scope.variable_scope(scope, 'dense_to_sparse', [tensor]) as sc: tensor = ops.convert_to_tensor(tensor) indices = array_ops.where( math_ops.not_equal(tensor, constant_op.constant(eos_token, tensor.dtype))) values = array_ops.gather_nd(tensor, indices) shape = array_ops.shape(tensor, out_type=dtypes.int64) outputs = sparse_tensor.SparseTensor(indices, values, shape) return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
Example #7
Source File: sparse_ops.py From tf-slim with Apache License 2.0 | 6 votes |
def dense_to_sparse_tensor(dense_tensor, ignore_value=None): """Converts dense `Tensor` to `SparseTensor`, dropping `ignore_value` cells. Args: dense_tensor: A `Tensor`. ignore_value: Entries in `dense_tensor` equal to this value will be absent from the return `SparseTensor`. If `None`, default value of `dense_tensor` dtype will be used (e.g. '' for `str`, 0 for `int`). Returns: A `SparseTensor` with the same shape as `dense_tensor`. Raises: ValueError: when `dense_tensor`'s rank is `None`. """ with ops.name_scope("DenseToSparseTensor"): dense_tensor = ops.convert_to_tensor(dense_tensor) ignore_value = _ignore_value_tensor(dense_tensor.dtype, ignore_value) indices = array_ops.where( math_ops.not_equal(dense_tensor, ignore_value), name="indices") return sparse_tensor.SparseTensor( indices=indices, values=array_ops.gather_nd(dense_tensor, indices, name="values"), dense_shape=array_ops.shape( dense_tensor, out_type=dtypes.int64, name="dense_shape"))
Example #8
Source File: math_grad.py From deep_image_model with Apache License 2.0 | 6 votes |
def _PowGrad(op, grad): """Returns grad * (y*x^(y-1), z*log(x)).""" x = op.inputs[0] y = op.inputs[1] z = op.outputs[0] sx = array_ops.shape(x) sy = array_ops.shape(y) rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy) x = math_ops.conj(x) y = math_ops.conj(y) z = math_ops.conj(z) gx = array_ops.reshape( math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx) # Avoid false singularity at x = 0 if x.dtype.is_complex: # real(x) < 0 is fine for the complex case log_x = math_ops.select( math_ops.not_equal(x, 0), math_ops.log(x), array_ops.zeros_like(x)) else: # There's no sensible real value to return if x < 0, so return 0 log_x = math_ops.select(x > 0, math_ops.log(x), array_ops.zeros_like(x)) gy = array_ops.reshape( math_ops.reduce_sum(grad * z * log_x, ry), sy) return gx, gy
Example #9
Source File: core_test.py From keras-lambda with MIT License | 5 votes |
def setUp(self): super(CoreBinaryOpsTest, self).setUp() self.x_probs_broadcast_tensor = array_ops.reshape( self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size]) self.channel_probs_broadcast_tensor = array_ops.reshape( self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size]) # == and != are not element-wise for tf.Tensor, so they shouldn't be # elementwise for LabeledTensor, either. self.ops = [ ('add', operator.add, math_ops.add, core.add), ('sub', operator.sub, math_ops.subtract, core.sub), ('mul', operator.mul, math_ops.multiply, core.mul), ('div', operator.truediv, math_ops.div, core.div), ('mod', operator.mod, math_ops.mod, core.mod), ('pow', operator.pow, math_ops.pow, core.pow_function), ('equal', None, math_ops.equal, core.equal), ('less', operator.lt, math_ops.less, core.less), ('less_equal', operator.le, math_ops.less_equal, core.less_equal), ('not_equal', None, math_ops.not_equal, core.not_equal), ('greater', operator.gt, math_ops.greater, core.greater), ('greater_equal', operator.ge, math_ops.greater_equal, core.greater_equal), ] self.test_lt_1 = self.x_probs_lt self.test_lt_2 = self.channel_probs_lt self.test_lt_1_broadcast = self.x_probs_broadcast_tensor self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor self.broadcast_axes = [self.a0, self.a1, self.a3]
Example #10
Source File: sequence_queueing_state_saver.py From keras-lambda with MIT License | 5 votes |
def _check_multiple_of(value, multiple_of): """Checks that value `value` is a non-zero multiple of `multiple_of`. Args: value: an int32 scalar Tensor. multiple_of: an int or int32 scalar Tensor. Returns: new_value: an int32 scalar Tensor matching `value`, but which includes an assertion that `value` is a multiple of `multiple_of`. """ assert isinstance(value, ops.Tensor) with ops.control_dependencies([ control_flow_ops.Assert( math_ops.logical_and( math_ops.equal(math_ops.mod(value, multiple_of), 0), math_ops.not_equal(value, 0)), [ string_ops.string_join([ "Tensor %s should be a multiple of: " % value.name, string_ops.as_string(multiple_of), ", but saw value: ", string_ops.as_string(value), ". Consider setting pad=True." ]) ]) ]): new_value = array_ops.identity(value, name="multiple_of_checked") return new_value
Example #11
Source File: backend.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def not_equal(x, y): """Element-wise inequality between two tensors. Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return math_ops.not_equal(x, y)
Example #12
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 5 votes |
def test_forward_rel_ops(): t1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) t2 = np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]]) _test_forward_rel_op([t1, t2], math_ops.less) _test_forward_rel_op([t1, t2], math_ops.greater) _test_forward_rel_op([t1, t2], math_ops.less_equal) _test_forward_rel_op([t1, t2], math_ops.greater_equal) _test_forward_rel_op([t1, t2], math_ops.equal) _test_forward_rel_op([t1, t2], math_ops.not_equal) ####################################################################### # ExpandDims # ----------
Example #13
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 5 votes |
def _test_not_equal(data): """ One iteration of not_equal""" return _test_elemwise(math_ops.not_equal, data) ####################################################################### # Squared_difference # ------------------
Example #14
Source File: embeddings.py From tavolo with MIT License | 5 votes |
def compute_mask(self, inputs, mask=None): if not self.mask_zero: return None return math_ops.not_equal(inputs, 0)
Example #15
Source File: embeddings.py From tavolo with MIT License | 5 votes |
def compute_mask(self, inputs, mask=None): if not self.mask_zero: return None return math_ops.not_equal(inputs, 0)
Example #16
Source File: decoder_conv.py From conv-ensemble-str with Apache License 2.0 | 5 votes |
def finalize(self, output, final_state, sequence_lengths): # Gather according to beam search result # now predicted_ids is [M, N/B] predicted_ids = beam_search.gather_tree(output.predicted_ids, output.beam_parent_ids) # TODO(Shancheng): pay attention beam_width = output.beam_parent_ids.get_shape().as_list() parent_ids = tf.concat([tf.zeros([1, beam_width[-1]], dtype=tf.int32), output.beam_parent_ids[:-1, :]], 0) # now logits is [M, N/B, C] logits = beam_search.gather_tree(output.logits, parent_ids) # now attention scores is [M, N/B, L, H, W] attention_scores = beam_search.gather_tree(output.attention_scores, parent_ids) # orginal length is the length of ungathered logits sequence_lengths = math_ops.not_equal(predicted_ids, self.end_token) sequence_lengths = tf.to_int32(sequence_lengths) sequence_lengths = tf.reduce_sum(sequence_lengths, axis=0) + 1 # choose the top score item predicted_ids = predicted_ids[:, 0:1] logits = logits[:, 0:1] attention_scores = attention_scores[:, 0:1] # mask out length = sequence_lengths[0] logits = logits[0:length, :] attention_scores = attention_scores[0:length, :] final_outputs = DecoderOutput( logits=self._padding(logits), predicted_ids=self._padding(predicted_ids), attention_scores=attention_scores) return final_outputs, final_state
Example #17
Source File: sequence_queueing_state_saver.py From deep_image_model with Apache License 2.0 | 5 votes |
def _check_multiple_of(value, multiple_of): """Checks that value `value` is a non-zero multiple of `multiple_of`. Args: value: an int32 scalar Tensor. multiple_of: an int or int32 scalar Tensor. Returns: new_value: an int32 scalar Tensor matching `value`, but which includes an assertion that `value` is a multiple of `multiple_of`. """ assert isinstance(value, ops.Tensor) with ops.control_dependencies([ control_flow_ops.Assert( math_ops.logical_and( math_ops.equal(math_ops.mod(value, multiple_of), 0), math_ops.not_equal(value, 0)), [string_ops.string_join( ["Tensor %s should be a multiple of: " % value.name, string_ops.as_string(multiple_of), ", but saw value: ", string_ops.as_string(value), ". Consider setting pad=True."])])]): new_value = array_ops.identity( value, name="multiple_of_checked") return new_value
Example #18
Source File: test_forward.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def test_forward_rel_ops(): t1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) t2 = np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]]) _test_forward_rel_op([t1, t2], math_ops.less) _test_forward_rel_op([t1, t2], math_ops.greater) _test_forward_rel_op([t1, t2], math_ops.less_equal) _test_forward_rel_op([t1, t2], math_ops.greater_equal) _test_forward_rel_op([t1, t2], math_ops.equal) _test_forward_rel_op([t1, t2], math_ops.not_equal) ####################################################################### # Main # ----
Example #19
Source File: core_test.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def setUp(self): super(CoreBinaryOpsTest, self).setUp() self.x_probs_broadcast_tensor = array_ops.reshape( self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size]) self.channel_probs_broadcast_tensor = array_ops.reshape( self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size]) # == and != are not element-wise for tf.Tensor, so they shouldn't be # elementwise for LabeledTensor, either. self.ops = [ ('add', operator.add, math_ops.add, core.add), ('sub', operator.sub, math_ops.subtract, core.sub), ('mul', operator.mul, math_ops.multiply, core.mul), ('div', operator.truediv, math_ops.div, core.div), ('mod', operator.mod, math_ops.mod, core.mod), ('pow', operator.pow, math_ops.pow, core.pow_function), ('equal', None, math_ops.equal, core.equal), ('less', operator.lt, math_ops.less, core.less), ('less_equal', operator.le, math_ops.less_equal, core.less_equal), ('not_equal', None, math_ops.not_equal, core.not_equal), ('greater', operator.gt, math_ops.greater, core.greater), ('greater_equal', operator.ge, math_ops.greater_equal, core.greater_equal), ] self.test_lt_1 = self.x_probs_lt self.test_lt_2 = self.channel_probs_lt self.test_lt_1_broadcast = self.x_probs_broadcast_tensor self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor self.broadcast_axes = [self.a0, self.a1, self.a3]
Example #20
Source File: sequence_queueing_state_saver.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _check_multiple_of(value, multiple_of): """Checks that value `value` is a non-zero multiple of `multiple_of`. Args: value: an int32 scalar Tensor. multiple_of: an int or int32 scalar Tensor. Returns: new_value: an int32 scalar Tensor matching `value`, but which includes an assertion that `value` is a multiple of `multiple_of`. """ assert isinstance(value, ops.Tensor) with ops.control_dependencies([ control_flow_ops.Assert( math_ops.logical_and( math_ops.equal(math_ops.mod(value, multiple_of), 0), math_ops.not_equal(value, 0)), [ string_ops.string_join([ "Tensor %s should be a multiple of: " % value.name, string_ops.as_string(multiple_of), ", but saw value: ", string_ops.as_string(value), ". Consider setting pad=True." ]) ]) ]): new_value = array_ops.identity(value, name="multiple_of_checked") return new_value
Example #21
Source File: sequence_queueing_state_saver.py From lambda-packs with MIT License | 5 votes |
def _check_multiple_of(value, multiple_of): """Checks that value `value` is a non-zero multiple of `multiple_of`. Args: value: an int32 scalar Tensor. multiple_of: an int or int32 scalar Tensor. Returns: new_value: an int32 scalar Tensor matching `value`, but which includes an assertion that `value` is a multiple of `multiple_of`. """ assert isinstance(value, ops.Tensor) with ops.control_dependencies([ control_flow_ops.Assert( math_ops.logical_and( math_ops.equal(math_ops.mod(value, multiple_of), 0), math_ops.not_equal(value, 0)), [ string_ops.string_join([ "Tensor %s should be a multiple of: " % value.name, string_ops.as_string(multiple_of), ", but saw value: ", string_ops.as_string(value), ". Consider setting pad=True." ]) ]) ]): new_value = array_ops.identity(value, name="multiple_of_checked") return new_value
Example #22
Source File: backend.py From lambda-packs with MIT License | 5 votes |
def not_equal(x, y): """Element-wise inequality between two tensors. Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return math_ops.not_equal(x, y)
Example #23
Source File: sparse_ops.py From deep_image_model with Apache License 2.0 | 4 votes |
def dense_to_sparse_tensor(dense_tensor, ignore_value=None): """Converts a dense Tensor to a SparseTensor, dropping ignore_value cells. Args: dense_tensor: A `Tensor`. ignore_value: Entries in `dense_tensor` equal to this value will be absent from the return `SparseTensor`. If `None`, default value of dense_tensor's dtype will be used (e.g. '' for `str`, 0 for `int`). Returns: A `SparseTensor` with the same shape as `dense_tensor`. Raises: ValueError: when `dense_tensor`'s rank is `None`. """ with ops.name_scope("DenseToSparseTensor"): dense_t = ops.convert_to_tensor(dense_tensor) if dense_t.get_shape().ndims is None: # TODO(b/32318825): Implement dense_to_sparse_tensor for undefined rank. raise ValueError("dense_tensor.get_shape() should be defined, got None.") if ignore_value is None: if dense_t.dtype == dtypes.string: # Exception due to TF strings are converted to numpy objects by default. ignore_value = "" else: ignore_value = dense_t.dtype.as_numpy_dtype() dense_shape = math_ops.cast(array_ops.shape(dense_t), dtypes.int64) indices = array_ops.where( math_ops.not_equal(dense_t, math_ops.cast(ignore_value, dense_t.dtype))) index_dims = len(dense_t.get_shape()) # Flattens the tensor and indices for use with gather. flat_tensor = array_ops.reshape(dense_t, [-1]) flat_indices = indices[:, index_dims - 1] # Computes the correct flattened indices for 2d (or higher) tensors. if index_dims > 1: higher_dims = indices[:, :index_dims - 1] shape_multipliers = array_ops.pack( _multiplier_helper(array_ops.unpack(dense_shape)[1:])) offsets = math_ops.reduce_sum( math_ops.mul(higher_dims, shape_multipliers), reduction_indices=[1]) flat_indices = math_ops.add(flat_indices, offsets) values = array_ops.gather(flat_tensor, flat_indices) return sparse_tensor.SparseTensor(indices, values, dense_shape)
Example #24
Source File: sparse_ops.py From auto-alt-text-lambda-api with MIT License | 4 votes |
def dense_to_sparse_tensor(dense_tensor, ignore_value=None): """Converts a dense Tensor to a SparseTensor, dropping ignore_value cells. Args: dense_tensor: A `Tensor`. ignore_value: Entries in `dense_tensor` equal to this value will be absent from the return `SparseTensor`. If `None`, default value of dense_tensor's dtype will be used (e.g. '' for `str`, 0 for `int`). Returns: A `SparseTensor` with the same shape as `dense_tensor`. Raises: ValueError: when `dense_tensor`'s rank is `None`. """ with ops.name_scope("DenseToSparseTensor"): dense_t = ops.convert_to_tensor(dense_tensor) if dense_t.get_shape().ndims is None: # TODO(b/32318825): Implement dense_to_sparse_tensor for undefined rank. raise ValueError("dense_tensor.get_shape() should be defined, got None.") if ignore_value is None: if dense_t.dtype == dtypes.string: # Exception due to TF strings are converted to numpy objects by default. ignore_value = "" else: ignore_value = dense_t.dtype.as_numpy_dtype() dense_shape = math_ops.cast(array_ops.shape(dense_t), dtypes.int64) indices = array_ops.where( math_ops.not_equal(dense_t, math_ops.cast(ignore_value, dense_t.dtype))) index_dims = len(dense_t.get_shape()) # Flattens the tensor and indices for use with gather. flat_tensor = array_ops.reshape(dense_t, [-1]) flat_indices = indices[:, index_dims - 1] # Computes the correct flattened indices for 2d (or higher) tensors. if index_dims > 1: higher_dims = indices[:, :index_dims - 1] shape_multipliers = array_ops.stack( _multiplier_helper(array_ops.unstack(dense_shape)[1:])) offsets = math_ops.reduce_sum( math_ops.multiply(higher_dims, shape_multipliers), reduction_indices=[1]) flat_indices = math_ops.add(flat_indices, offsets) values = array_ops.gather(flat_tensor, flat_indices) return sparse_tensor.SparseTensor(indices, values, dense_shape)
Example #25
Source File: image_ops_impl.py From auto-alt-text-lambda-api with MIT License | 4 votes |
def decode_image(contents, channels=None, name=None): """Convenience function for `decode_gif`, `decode_jpeg`, and `decode_png`. Detects whether an image is a GIF, JPEG, or PNG, and performs the appropriate operation to convert the input bytes `string` into a `Tensor` of type `uint8`. Note: `decode_gif` returns a 4-D array `[num_frames, height, width, 3]`, as opposed to `decode_jpeg` and `decode_png`, which return 3-D arrays `[height, width, num_channels]`. Make sure to take this into account when constructing your graph if you are intermixing GIF files with JPEG and/or PNG files. Args: contents: 0-D `string`. The encoded image bytes. channels: An optional `int`. Defaults to `0`. Number of color channels for the decoded image. name: A name for the operation (optional) Returns: `Tensor` with type `uint8` with shape `[height, width, num_channels]` for JPEG and PNG images and shape `[num_frames, height, width, 3]` for GIF images. """ with ops.name_scope(name, 'decode_image') as scope: if channels not in (None, 0, 1, 3): raise ValueError('channels must be in (None, 0, 1, 3)') substr = string_ops.substr(contents, 0, 4) def _gif(): # Create assert op to check that bytes are GIF decodable is_gif = math_ops.equal(substr, b'\x47\x49\x46\x38', name='is_gif') decode_msg = 'Unable to decode bytes as JPEG, PNG, or GIF' assert_decode = control_flow_ops.Assert(is_gif, [decode_msg]) # Create assert to make sure that channels is not set to 1 # Already checked above that channels is in (None, 0, 1, 3) gif_channels = 0 if channels is None else channels good_channels = math_ops.not_equal(gif_channels, 1, name='check_channels') channels_msg = 'Channels must be in (None, 0, 3) when decoding GIF images' assert_channels = control_flow_ops.Assert(good_channels, [channels_msg]) with ops.control_dependencies([assert_decode, assert_channels]): return gen_image_ops.decode_gif(contents) def _png(): return gen_image_ops.decode_png(contents, channels) def check_png(): is_png = math_ops.equal(substr, b'\211PNG', name='is_png') return control_flow_ops.cond(is_png, _png, _gif, name='cond_png') def _jpeg(): return gen_image_ops.decode_jpeg(contents, channels) is_jpeg = math_ops.equal(substr, b'\xff\xd8\xff\xe0', name='is_jpeg') return control_flow_ops.cond(is_jpeg, _jpeg, check_png, name='cond_jpeg')
Example #26
Source File: lookup_ops.py From lambda-packs with MIT License | 4 votes |
def lookup(self, keys, name=None): """Looks up `keys` in the table, outputs the corresponding values. It assigns out-of-vocabulary keys to buckets based in their hashes. Args: keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`. name: Optional name for the op. Returns: A `SparseTensor` if keys are sparse, otherwise a dense `Tensor`. Raises: TypeError: when `keys` doesn't match the table key data type. """ if keys.dtype != self._key_dtype: raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." % (self._key_dtype, keys.dtype)) values = keys if isinstance(keys, sparse_tensor.SparseTensor): values = keys.values if self._table and (self._table.key_dtype.base_dtype == dtypes.int64): values = math_ops.to_int64(values) if self._num_oov_buckets == 0: ids = self._table.lookup(values, name=name) else: # TODO(yleon): Consider moving this functionality to its own kernel. with ops.name_scope(name, "%s_Lookup" % self.name) as scope: str_to_hash_bucket = self._get_string_to_hash_bucket_fn( self._hasher_spec) buckets = str_to_hash_bucket( _as_string(values), num_buckets=self._num_oov_buckets, name="hash_bucket") if self._table: ids = self._table.lookup(values) buckets = math_ops.add(buckets, self._table.size()) is_id_non_default = math_ops.not_equal(ids, self._table.default_value) ids = array_ops.where(is_id_non_default, ids, buckets, name=scope) else: ids = buckets if isinstance(keys, sparse_tensor.SparseTensor): return sparse_tensor.SparseTensor(keys.indices, ids, keys.dense_shape) return ids
Example #27
Source File: lookup_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 4 votes |
def lookup(self, keys, name=None): """Looks up `keys` in the table, outputs the corresponding values. It assigns out-of-vocabulary keys to buckets based in their hashes. Args: keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`. name: Optional name for the op. Returns: A `SparseTensor` if keys are sparse, otherwise a dense `Tensor`. Raises: TypeError: when `keys` doesn't match the table key data type. """ if keys.dtype != self._key_dtype: raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." % (self._key_dtype, keys.dtype)) values = keys if isinstance(keys, sparse_tensor.SparseTensor): values = keys.values if self._table and (self._table.key_dtype.base_dtype == dtypes.int64): values = math_ops.to_int64(values) if self._num_oov_buckets == 0: ids = self._table.lookup(values, name=name) else: # TODO(yleon): Consider moving this functionality to its own kernel. with ops.name_scope(name, "%s_Lookup" % self.name) as scope: str_to_hash_bucket = self._get_string_to_hash_bucket_fn( self._hasher_spec) buckets = str_to_hash_bucket( _as_string(values), num_buckets=self._num_oov_buckets, name="hash_bucket") if self._table: ids = self._table.lookup(values) buckets = math_ops.add(buckets, self._table.size()) is_id_non_default = math_ops.not_equal(ids, self._table.default_value) ids = array_ops.where(is_id_non_default, ids, buckets, name=scope) else: ids = buckets if isinstance(keys, sparse_tensor.SparseTensor): return sparse_tensor.SparseTensor(keys.indices, ids, keys.dense_shape) return ids
Example #28
Source File: feature_column.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 4 votes |
def _to_sparse_input(input_tensor, ignore_value=None): """Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells. If `input_tensor` is already a `SparseTensor`, just return it. Args: input_tensor: A string or integer `Tensor`. ignore_value: Entries in `dense_tensor` equal to this value will be absent from the resulting `SparseTensor`. If `None`, default value of `dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`). Returns: A `SparseTensor` with the same shape as `input_tensor`. Raises: ValueError: when `input_tensor`'s rank is `None`. """ input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor( input_tensor) if isinstance(input_tensor, sparse_tensor_lib.SparseTensor): return input_tensor with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value,)): input_rank = input_tensor.get_shape().ndims if input_rank is None: # TODO(b/32318825): Implement dense_to_sparse_tensor for undefined rank. raise ValueError('Undefined input_tensor shape.') if ignore_value is None: ignore_value = '' if input_tensor.dtype == dtypes.string else -1 dense_shape = math_ops.cast(array_ops.shape(input_tensor), dtypes.int64) indices = array_ops.where(math_ops.not_equal( input_tensor, math_ops.cast(ignore_value, input_tensor.dtype))) # Flattens the tensor and indices for use with gather. flat_tensor = array_ops.reshape(input_tensor, [-1]) flat_indices = indices[:, input_rank - 1] # Computes the correct flattened indices for 2d (or higher) tensors. if input_rank > 1: higher_dims = indices[:, :input_rank - 1] shape_offsets = array_ops.stack( _shape_offsets(array_ops.unstack(dense_shape)[1:])) offsets = math_ops.reduce_sum( math_ops.multiply(higher_dims, shape_offsets), reduction_indices=[1]) flat_indices = math_ops.add(flat_indices, offsets) values = array_ops.gather(flat_tensor, flat_indices) return sparse_tensor_lib.SparseTensor(indices, values, dense_shape)
Example #29
Source File: image_ops_impl.py From keras-lambda with MIT License | 4 votes |
def decode_image(contents, channels=None, name=None): """Convenience function for `decode_gif`, `decode_jpeg`, and `decode_png`. Detects whether an image is a GIF, JPEG, or PNG, and performs the appropriate operation to convert the input bytes `string` into a `Tensor` of type `uint8`. Note: `decode_gif` returns a 4-D array `[num_frames, height, width, 3]`, as opposed to `decode_jpeg` and `decode_png`, which return 3-D arrays `[height, width, num_channels]`. Make sure to take this into account when constructing your graph if you are intermixing GIF files with JPEG and/or PNG files. Args: contents: 0-D `string`. The encoded image bytes. channels: An optional `int`. Defaults to `0`. Number of color channels for the decoded image. name: A name for the operation (optional) Returns: `Tensor` with type `uint8` with shape `[height, width, num_channels]` for JPEG and PNG images and shape `[num_frames, height, width, 3]` for GIF images. """ with ops.name_scope(name, 'decode_image') as scope: if channels not in (None, 0, 1, 3): raise ValueError('channels must be in (None, 0, 1, 3)') substr = string_ops.substr(contents, 0, 4) def _gif(): # Create assert op to check that bytes are GIF decodable is_gif = math_ops.equal(substr, b'\x47\x49\x46\x38', name='is_gif') decode_msg = 'Unable to decode bytes as JPEG, PNG, or GIF' assert_decode = control_flow_ops.Assert(is_gif, [decode_msg]) # Create assert to make sure that channels is not set to 1 # Already checked above that channels is in (None, 0, 1, 3) gif_channels = 0 if channels is None else channels good_channels = math_ops.not_equal(gif_channels, 1, name='check_channels') channels_msg = 'Channels must be in (None, 0, 3) when decoding GIF images' assert_channels = control_flow_ops.Assert(good_channels, [channels_msg]) with ops.control_dependencies([assert_decode, assert_channels]): return gen_image_ops.decode_gif(contents) def _png(): return gen_image_ops.decode_png(contents, channels) def check_png(): is_png = math_ops.equal(substr, b'\211PNG', name='is_png') return control_flow_ops.cond(is_png, _png, _gif, name='cond_png') def _jpeg(): return gen_image_ops.decode_jpeg(contents, channels) is_jpeg = math_ops.equal(substr, b'\xff\xd8\xff\xe0', name='is_jpeg') return control_flow_ops.cond(is_jpeg, _jpeg, check_png, name='cond_jpeg')
Example #30
Source File: feature_column.py From lambda-packs with MIT License | 4 votes |
def _to_sparse_input(input_tensor, ignore_value=None): """Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells. If `input_tensor` is already a `SparseTensor`, just return it. Args: input_tensor: A string or integer `Tensor`. ignore_value: Entries in `dense_tensor` equal to this value will be absent from the resulting `SparseTensor`. If `None`, default value of `dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`). Returns: A `SparseTensor` with the same shape as `input_tensor`. Raises: ValueError: when `input_tensor`'s rank is `None`. """ input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor( input_tensor) if isinstance(input_tensor, sparse_tensor_lib.SparseTensor): return input_tensor with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value,)): input_rank = input_tensor.get_shape().ndims if input_rank is None: # TODO(b/32318825): Implement dense_to_sparse_tensor for undefined rank. raise ValueError('Undefined input_tensor shape.') if ignore_value is None: ignore_value = '' if input_tensor.dtype == dtypes.string else -1 dense_shape = math_ops.cast(array_ops.shape(input_tensor), dtypes.int64) indices = array_ops.where(math_ops.not_equal( input_tensor, math_ops.cast(ignore_value, input_tensor.dtype))) # Flattens the tensor and indices for use with gather. flat_tensor = array_ops.reshape(input_tensor, [-1]) flat_indices = indices[:, input_rank - 1] # Computes the correct flattened indices for 2d (or higher) tensors. if input_rank > 1: higher_dims = indices[:, :input_rank - 1] shape_offsets = array_ops.stack( _shape_offsets(array_ops.unstack(dense_shape)[1:])) offsets = math_ops.reduce_sum( math_ops.multiply(higher_dims, shape_offsets), reduction_indices=[1]) flat_indices = math_ops.add(flat_indices, offsets) values = array_ops.gather(flat_tensor, flat_indices) return sparse_tensor_lib.SparseTensor(indices, values, dense_shape)