Python tensorflow.uint32() Examples

The following are 12 code examples of tensorflow.uint32(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: decoder.py    From im2latex with Apache License 2.0 6 votes vote down vote up
def get_embeddings(formula, E, dim, start_token, batch_size):
    """Returns the embedding of the n-1 first elements in the formula concat
    with the start token

    Args:
        formula: (tf.placeholder) tf.uint32
        E: tf.Variable (matrix)
        dim: (int) dimension of embeddings
        start_token: tf.Variable
        batch_size: tf variable extracted from placeholder

    Returns:
        embeddings_train: tensor

    """
    formula_ = tf.nn.embedding_lookup(E, formula)
    start_token_ = tf.reshape(start_token, [1, 1, dim])
    start_tokens = tf.tile(start_token_, multiples=[batch_size, 1, 1])
    embeddings = tf.concat([start_tokens, formula_[:, :-1, :]], axis=1)

    return embeddings 
Example #2
Source File: tfrecord_test.py    From nobrainer with Apache License 2.0 6 votes vote down vote up
def test__dtype_to_bytes():
    np_tf_dt = [
        (np.uint8, tf.uint8, b"uint8"),
        (np.uint16, tf.uint16, b"uint16"),
        (np.uint32, tf.uint32, b"uint32"),
        (np.uint64, tf.uint64, b"uint64"),
        (np.int8, tf.int8, b"int8"),
        (np.int16, tf.int16, b"int16"),
        (np.int32, tf.int32, b"int32"),
        (np.int64, tf.int64, b"int64"),
        (np.float16, tf.float16, b"float16"),
        (np.float32, tf.float32, b"float32"),
        (np.float64, tf.float64, b"float64"),
    ]

    for npd, tfd, dt in np_tf_dt:
        npd = np.dtype(npd)
        assert tfrecord._dtype_to_bytes(npd) == dt
        assert tfrecord._dtype_to_bytes(tfd) == dt

    assert tfrecord._dtype_to_bytes("float32") == b"float32"
    assert tfrecord._dtype_to_bytes("foobar") == b"foobar" 
Example #3
Source File: tensorflow_util.py    From MedicalDataAugmentationTool with GNU General Public License v3.0 6 votes vote down vote up
def reduce_mean_support_empty(input, keepdims=False):
    return tf.cond(tf.size(input) > 0, lambda: tf.reduce_mean(input, keepdims=keepdims), lambda: tf.zeros_like(input))


# def bit_tensor_list(input):
#     assert input.dtype in [tf.uint8, tf.uint16, tf.uint32, tf.uint64], 'unsupported data type, must be uint*'
#     num_bits = 0
#     if input.dtype == tf.int8:
#         num_bits = 8
#     elif input.dtype == tf.int16:
#         num_bits = 16
#     elif input.dtype == tf.uint32:
#         num_bits = 32
#     elif input.dtype == tf.uint64:
#         num_bits = 64
#     bit_tensors = []
#     for i in range(num_bits):
#         current_bit = 1 << i
#         current_bit_tensor = tf.bitwise.bitwise_and(input, current_bit) == 1
#         bit_tensors.append(current_bit_tensor)
#     print(bit_tensors)
#     return bit_tensors 
Example #4
Source File: mod.py    From onnx-tensorflow with Apache License 2.0 5 votes vote down vote up
def args_check(cls, node, **kwargs):
    unsupported_dtype = [
        tf.int8, tf.int16, tf.uint8, tf.uint16, tf.uint32, tf.uint64
    ]
    x = kwargs["tensor_dict"][node.inputs[0]]
    y = kwargs["tensor_dict"][node.inputs[1]]
    if x.dtype in unsupported_dtype:
      exception.OP_UNSUPPORTED_EXCEPT("Mod Dividend in " + str(x.dtype),
                                      "Tensorflow")
    if y.dtype in unsupported_dtype:
      exception.OP_UNSUPPORTED_EXCEPT("Mod Divisor in " + str(y.dtype),
                                      "Tensorflow") 
Example #5
Source File: clip.py    From onnx-tensorflow with Apache License 2.0 5 votes vote down vote up
def _common(cls, node, **kwargs):
    tensor_dict = kwargs["tensor_dict"]
    x = tensor_dict[node.inputs[0]]
    x_dtype = x.dtype

    if cls.SINCE_VERSION < 11:
      # min/max were required and passed as attributes
      clip_value_min = node.attrs.get("min", tf.reduce_min(x))
      clip_value_max = node.attrs.get("max", tf.reduce_max(x))
    else:
      # min/max are optional and passed as inputs
      clip_value_min = tensor_dict[node.inputs[1]] if len(
          node.inputs) > 1 and node.inputs[1] != "" else x_dtype.min
      clip_value_max = tensor_dict[node.inputs[2]] if len(
          node.inputs) > 2 and node.inputs[2] != "" else x_dtype.max

    # tf.clip_by_value doesn't support uint8, uint16, uint32, int8 and int16
    # dtype for x, therefore need to upcast it to tf.int32 or tf.int64
    if x_dtype in [tf.uint8, tf.uint16, tf.uint32, tf.int8, tf.int16]:
      cast_to = tf.int64 if x_dtype == tf.uint32 else tf.int32
      x = tf.cast(x, cast_to)
      clip_value_min = tf.cast(clip_value_min, cast_to)
      clip_value_max = tf.cast(clip_value_max, cast_to)
      y = tf.clip_by_value(x, clip_value_min, clip_value_max)
      y = tf.cast(y, x_dtype)
    else:
      y = tf.clip_by_value(x, clip_value_min, clip_value_max)

    return [y] 
Example #6
Source File: tf_utils.py    From transform with Apache License 2.0 5 votes vote down vote up
def reduce_batch_minus_min_and_max(x, reduce_instance_dims):
  """Computes the -min and max of a tensor x.

  Args:
    x: A `tf.Tensor`.
    reduce_instance_dims: A bool indicating whether this should collapse the
      batch and instance dimensions to arrive at a single scalar output, or only
      collapse the batch dimension and outputs a vector of the same shape as the
      input.

  Returns:
    The computed `tf.Tensor`s (batch -min, batch max) pair.
  """
  output_dtype = x.dtype

  if x.dtype == tf.uint8 or x.dtype == tf.uint16:
    x = tf.cast(x, tf.int32)

  elif x.dtype == tf.uint32 or x.dtype == tf.uint64:
    raise TypeError('Tensor type %r is not supported' % x.dtype)

  if reduce_instance_dims:
    if isinstance(x, tf.SparseTensor):
      x = x.values

    x_batch_max = tf.reduce_max(input_tensor=x)
    x_batch_minus_min = tf.reduce_max(input_tensor=tf.zeros_like(x) - x)
    x_batch_minus_min, x_batch_max = assert_same_shape(x_batch_minus_min,
                                                       x_batch_max)
  elif isinstance(x, tf.SparseTensor):
    x_batch_minus_min, x_batch_max = (
        _sparse_minus_reduce_min_and_reduce_max(x))
  else:
    x_batch_max = tf.reduce_max(input_tensor=x, axis=0)
    x_batch_minus_min = tf.reduce_max(input_tensor=0 - x, axis=0)

  # TODO(b/112309021): Remove workaround once tf.reduce_max of a tensor of all
  # NaNs produces -inf.
  return (_inf_to_nan(x_batch_minus_min, output_dtype),
          _inf_to_nan(x_batch_max, output_dtype)) 
Example #7
Source File: analyzers.py    From transform with Apache License 2.0 5 votes vote down vote up
def sum(x, reduce_instance_dims=True, name=None):  # pylint: disable=redefined-builtin
  """Computes the sum of the values of a `Tensor` over the whole dataset.

  Args:
    x: A `Tensor` or `SparseTensor`. Its type must be floating point
        (float{16|32|64}),integral (int{8|16|32|64}), or
        unsigned integral (uint{8|16})
    reduce_instance_dims: By default collapses the batch and instance dimensions
        to arrive at a single scalar output. If False, only collapses the batch
        dimension and outputs a vector of the same shape as the input.
    name: (Optional) A name for this operation.

  Returns:
    A `Tensor` containing the sum. If `x` is float32 or float64, the sum will
    have the same type as `x`. If `x` is float16, the output is cast to float32.
    If `x` is integral, the output is cast to [u]int64. If `x` is sparse and
    reduce_inst_dims is False will return 0 in place where column has no values
    across batches.

  Raises:
    TypeError: If the type of `x` is not supported.
  """
  with tf.compat.v1.name_scope(name, 'sum'):
    if reduce_instance_dims:
      if isinstance(x, tf.SparseTensor):
        x = x.values
      x = tf.reduce_sum(input_tensor=x)
    elif isinstance(x, tf.SparseTensor):
      if x.dtype == tf.uint8 or x.dtype == tf.uint16:
        x = tf.cast(x, tf.int64)
      elif x.dtype == tf.uint32 or x.dtype == tf.uint64:
        TypeError('Data type %r is not supported' % x.dtype)
      x = tf.sparse.reduce_sum(x, axis=0)
    else:
      x = tf.reduce_sum(input_tensor=x, axis=0)
    output_dtype, sum_fn = _sum_combine_fn_and_dtype(x.dtype)
    return _numeric_combine([x], sum_fn, reduce_instance_dims,
                            [output_dtype])[0] 
Example #8
Source File: utils.py    From seed_rl with Apache License 2.0 5 votes vote down vote up
def __init__(self, encoded_shape, original_shape):
    self._value_specs = (tf.TensorSpec(encoded_shape, tf.uint32),)
    self.original_shape = original_shape 
Example #9
Source File: tensorflow_util.py    From MedicalDataAugmentationTool with GNU General Public License v3.0 5 votes vote down vote up
def masked_bit(input, bit_index):
    """
    Returns a boolean tensor, where values are true, on which the bit on bit_index is True.
    :param input: The input tensor to check.
    :param bit_index: The bit index which will be compared with bitwise and. (LSB 0 order)
    :return: The tensor.
    """
    assert input.dtype in [tf.int8, tf.int16, tf.int32, tf.int64, tf.uint8, tf.uint16, tf.uint32, tf.uint64], 'unsupported data type, must be *int*'
    current_bit = tf.bitwise.left_shift(tf.constant(1, dtype=input.dtype), tf.cast(bit_index, dtype=input.dtype))
    return tf.greater(tf.bitwise.bitwise_and(input, current_bit), 0) 
Example #10
Source File: tensor_adapter_test.py    From tfx-bsl with Apache License 2.0 5 votes vote down vote up
def test2DSparseTensor(self):
    tensor_representation = text_format.Parse(
        """
        sparse_tensor {
          value_column_name: "values"
          index_column_names: ["d0", "d1"]
          dense_shape {
            dim {
              size: 10
            }
            dim {
              size: 20
            }
          }
        }
        """, schema_pb2.TensorRepresentation())
    record_batch = pa.RecordBatch.from_arrays([
        pa.array([[1], None, [2], [3, 4, 5], []], type=pa.list_(pa.int64())),
        # Also test that the index column can be of an integral type other
        # than int64.
        pa.array([[9], None, [9], [7, 8, 9], []], type=pa.list_(pa.uint32())),
        pa.array([[0], None, [0], [0, 1, 2], []], type=pa.list_(pa.int64()))
    ], ["values", "d0", "d1"])
    adapter = tensor_adapter.TensorAdapter(
        tensor_adapter.TensorAdapterConfig(record_batch.schema,
                                           {"output": tensor_representation}))
    converted = adapter.ToBatchTensors(record_batch)
    self.assertLen(converted, 1)
    self.assertIn("output", converted)
    actual_output = converted["output"]
    self.assertIsInstance(actual_output,
                          (tf.SparseTensor, tf.compat.v1.SparseTensorValue))
    self.assertSparseAllEqual(
        tf.compat.v1.SparseTensorValue(
            dense_shape=[5, 10, 20],
            indices=[[0, 9, 0], [2, 9, 0], [3, 7, 0], [3, 8, 1], [3, 9, 2]],
            values=tf.convert_to_tensor([1, 2, 3, 4, 5], dtype=tf.int64)),
        actual_output)

    self.assertAdapterCanProduceNonEagerInEagerMode(adapter, record_batch) 
Example #11
Source File: tf_utils.py    From transform with Apache License 2.0 4 votes vote down vote up
def reduce_batch_minus_min_and_max_per_key(x, key):
  """Computes the -min and max of a tensor x.

  Args:
    x: A `tf.Tensor` or `SparseTensor`.
    key: A `Tensor` or `SparseTensor`.
        Must meet one of the following conditions:
        1. Both x and key are dense,
        2. Both x and key are sparse and `key` must exactly match `x` in
        everything except values,
        3. The axis=1 index of each x matches its index of dense key.
  Returns:
    A 3-tuple containing the `Tensor`s (key_vocab, min_per_key, max_per_key).
  """
  output_dtype = x.dtype

  if x.dtype == tf.uint8 or x.dtype == tf.uint16:
    x = tf.cast(x, tf.int32)

  elif x.dtype == tf.uint32 or x.dtype == tf.uint64:
    raise TypeError('Tensor type %r is not supported' % x.dtype)

  x, key = _validate_and_get_dense_value_key_inputs(x, key)

  def get_batch_max_per_key(tensor, key_uniques, dtype):  # pylint: disable=missing-docstring
    if tensor.get_shape().ndims < 2:
      row_maxes = tensor
    else:
      row_maxes = tf.reduce_max(
          tensor, axis=tf.range(1, tensor.get_shape().ndims))
    batch_max = tf.math.unsorted_segment_max(
        row_maxes, key_uniques.idx, tf.size(input=key_uniques.y))

    # TODO(b/112309021): Remove workaround once tf.reduce_max of a tensor of all
    # NaNs produces -inf.
    return _inf_to_nan(batch_max, dtype)

  unique = tf.unique_with_counts(key, out_idx=tf.int64)
  x_batch_maxes = get_batch_max_per_key(x, unique, output_dtype)
  x_batch_minus_mins = get_batch_max_per_key(-x, unique, output_dtype)

  x_batch_minus_mins, x_batch_maxes = assert_same_shape(x_batch_minus_mins,
                                                        x_batch_maxes)

  return (unique.y, x_batch_minus_mins, x_batch_maxes) 
Example #12
Source File: utils.py    From seed_rl with Apache License 2.0 4 votes vote down vote up
def tpu_encode(ts):
  """Encodes a nest of Tensors in a suitable way for TPUs.

  TPUs do not support tf.uint8, tf.uint16 and other data types. Furthermore,
  the speed of transfer and device reshapes depend on the shape of the data.
  This function tries to optimize the data encoding for a number of use cases.

  Should be used on CPU before sending data to TPU and in conjunction with
  `tpu_decode` after the data is transferred.

  Args:
    ts: A tf.nest of Tensors.

  Returns:
    A tf.nest of encoded Tensors.
  """

  def visit(t):  
    num_elements = t.shape.num_elements()
    # We need a multiple of 128 elements: encoding reduces the number of
    # elements by a factor 4 (packing uint8s into uint32s), and first thing
    # decode does is to reshape with a 32 minor-most dimension.
    if (t.dtype == tf.uint8 and num_elements is not None and
        num_elements % 128 == 0):
      # For details of these transformations, see b/137182262.
      x = tf.xla.experimental.compile(
          lambda x: tf.transpose(x, list(range(1, t.shape.rank)) + [0]), [t])[0]
      x = tf.reshape(x, [-1, 4])
      x = tf.bitcast(x, tf.uint32)
      x = tf.reshape(x, [-1])
      return TPUEncodedUInt8(x, t.shape)
    elif t.dtype == tf.uint8:
      logging.warning('Inefficient uint8 transfer with shape: %s', t.shape)
      return tf.cast(t, tf.bfloat16)
    elif t.dtype == tf.uint16:
      return tf.cast(t, tf.int32)
    elif (t.dtype == tf.float32 and t.shape.rank > 1 and not
          (num_divisible(t.shape.dims, 128) >= 1 and
           num_divisible(t.shape.dims, 8) >= 2)):
      x = tf.reshape(t, [-1])
      return TPUEncodedF32(x, t.shape)
    else:
      return t

  return tf.nest.map_structure(visit, ts)