Python tensorflow.compat.v2.split() Examples

The following are 10 code examples of tensorflow.compat.v2.split(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v2 , or try the search function .
Example #1
Source File: dual_quaternion_test.py    From graphics with Apache License 2.0 6 votes vote down vote up
def test_conjugate_preset(self):
    """Tests if the conjugate function is providing correct results."""
    x_init = test_helpers.generate_preset_test_dual_quaternions()
    x = tf.convert_to_tensor(value=x_init)
    y = tf.convert_to_tensor(value=x_init)

    x = dual_quaternion.conjugate(x)
    x_real, x_dual = tf.split(x, (4, 4), axis=-1)

    y_real, y_dual = tf.split(y, (4, 4), axis=-1)
    xyz_y_real, w_y_real = tf.split(y_real, (3, 1), axis=-1)
    xyz_y_dual, w_y_dual = tf.split(y_dual, (3, 1), axis=-1)
    y_real = tf.concat((-xyz_y_real, w_y_real), axis=-1)
    y_dual = tf.concat((-xyz_y_dual, w_y_dual), axis=-1)

    self.assertAllEqual(x_real, y_real)
    self.assertAllEqual(x_dual, y_dual) 
Example #2
Source File: extensions.py    From trax with Apache License 2.0 6 votes vote down vote up
def split(state, num):
  """Creates new independent RNG states from an existing state.

  Args:
    state: the existing state.
    num: the number of the new states.

  Returns:
    A tuple of new states.
  """
  state = tf_np.asarray(state, dtype=_RNG_KEY_DTYPE)
  state = _key2seed(state)
  try:
    states = tf.random.experimental.stateless_split(state, num)
  except AttributeError as e:  # pylint: disable=unused-variable
    # TODO(afrozm): For TF < 2.3 we need to do this. Delete once 2.3 launches.
    states = stateless_split(state, num)
  states = tf.unstack(states, num)
  states = tf.nest.map_structure(_seed2key, states)
  return states 
Example #3
Source File: dual_quaternion.py    From graphics with Apache License 2.0 5 votes vote down vote up
def conjugate(dual_quaternion, name=None):
  """Computes the conjugate of a dual quaternion.

  Note:
    In the following, A1 to An are optional batch dimensions.

  Args:
    dual_quaternion: A tensor of shape `[A1, ..., An, 8]`, where the last
    dimension represents a normalized dual quaternion.
    name: A name for this op that defaults to "dual_quaternion_conjugate".

  Returns:
    A tensor of shape `[A1, ..., An, 8]`, where the last dimension represents
    a normalized dual quaternion.

  Raises:
    ValueError: If the shape of `dual_quaternion` is not supported.
  """
  with tf.compat.v1.name_scope(name, "dual_quaternion_conjugate",
                               [dual_quaternion]):
    dual_quaternion = tf.convert_to_tensor(value=dual_quaternion)

    shape.check_static(
        tensor=dual_quaternion, tensor_name="dual_quaternion",
        has_dim_equals=(-1, 8))

    quaternion_real, quaternion_dual = tf.split(
        dual_quaternion, (4, 4), axis=-1)

    quaternion_real = asserts.assert_normalized(quaternion_real)

    return tf.concat((quaternion.conjugate(quaternion_real),
                      quaternion.conjugate(quaternion_dual)),
                     axis=-1) 
Example #4
Source File: array_ops.py    From trax with Apache License 2.0 5 votes vote down vote up
def _boundaries_to_sizes(a, boundaries, axis):
  """Converting boundaries of splits to sizes of splits.

  Args:
    a: the array to be split.
    boundaries: the boundaries, as in np.split.
    axis: the axis along which to split.

  Returns:
    A list of sizes of the splits, as in tf.split.
  """
  if axis >= len(a.shape):
    raise ValueError('axis %s is out of bound for shape %s' % (axis, a.shape))
  total_size = a.shape[axis]
  sizes = []
  sizes_sum = 0
  prev = 0
  for i, b in enumerate(boundaries):
    size = b - prev
    if size < 0:
      raise ValueError('The %s-th boundary %s is smaller than the previous '
                       'boundary %s' % (i, b, prev))
    size = min(size, max(0, total_size - sizes_sum))
    sizes.append(size)
    sizes_sum += size
    prev = b
  sizes.append(max(0, total_size - sizes_sum))
  return sizes 
Example #5
Source File: array_ops.py    From trax with Apache License 2.0 5 votes vote down vote up
def split(ary, indices_or_sections, axis=0):
  ary = asarray(ary)
  if not isinstance(indices_or_sections, six.integer_types):
    indices_or_sections = _boundaries_to_sizes(ary, indices_or_sections, axis)
  result = tf.split(ary.data, indices_or_sections, axis=axis)
  return [utils.tensor_to_ndarray(a) for a in result] 
Example #6
Source File: array_ops.py    From trax with Apache License 2.0 5 votes vote down vote up
def _split_on_axis(np_fun, axis):
  @utils.np_doc(np_fun)
  def f(ary, indices_or_sections):
    return split(ary, indices_or_sections, axis=axis)
  return f 
Example #7
Source File: pixelcnn.py    From alibi-detect with Apache License 2.0 5 votes vote down vote up
def _apply_sigmoid_gating(x):
    """Apply the sigmoid gating in Figure 2 of [2]."""
    activation_tensor, gate_tensor = tf.split(x, 2, axis=-1)
    sigmoid_gate = tf.sigmoid(gate_tensor)
    return tf.keras.layers.multiply([sigmoid_gate, activation_tensor], dtype=x.dtype) 
Example #8
Source File: pixelcnn.py    From alibi-detect with Apache License 2.0 4 votes vote down vote up
def _sample_channels(self, component_logits, locs, scales, coeffs=None, seed=None):
        """Sample a single pixel-iteration and apply channel conditioning.
        Args:
          component_logits: 4D `Tensor` of logits for the Categorical distribution
            over Quantized Logistic mixture components. Dimensions are `[batch_size,
            height, width, num_logistic_mix]`.
          locs: 4D `Tensor` of location parameters for the Quantized Logistic
            mixture components. Dimensions are `[batch_size, height, width,
            num_logistic_mix, num_channels]`.
          scales: 4D `Tensor` of location parameters for the Quantized Logistic
            mixture components. Dimensions are `[batch_size, height, width,
            num_logistic_mix, num_channels]`.
          coeffs: 4D `Tensor` of coefficients for the linear dependence among color
            channels, or `None` if there is only one channel. Dimensions are
            `[batch_size, height, width, num_logistic_mix, num_coeffs]`, where
            `num_coeffs = num_channels * (num_channels - 1) // 2`.
          seed: `int`, random seed.
        Returns:
          samples: 4D `Tensor` of sampled image data with autoregression among
            channels. Dimensions are `[batch_size, height, width, num_channels]`.
        """
        num_channels = self.event_shape[-1]

        # sample mixture components once for the entire pixel
        component_dist = categorical.Categorical(logits=component_logits)
        mask = tf.one_hot(indices=component_dist.sample(seed=seed), depth=self._num_logistic_mix)
        mask = tf.cast(mask[..., tf.newaxis], self.dtype)

        # apply mixture component mask and separate out RGB parameters
        masked_locs = tf.reduce_sum(locs * mask, axis=-2)
        loc_tensors = tf.split(masked_locs, num_channels, axis=-1)
        masked_scales = tf.reduce_sum(scales * mask, axis=-2)
        scale_tensors = tf.split(masked_scales, num_channels, axis=-1)

        if coeffs is not None:
            num_coeffs = num_channels * (num_channels - 1) // 2
            masked_coeffs = tf.reduce_sum(coeffs * mask, axis=-2)
            coef_tensors = tf.split(masked_coeffs, num_coeffs, axis=-1)

        channel_samples = []
        coef_count = 0
        for i in range(num_channels):
            loc = loc_tensors[i]
            for c in channel_samples:
                loc += c * coef_tensors[coef_count]
                coef_count += 1

            logistic_samp = logistic.Logistic(loc=loc, scale=scale_tensors[i]).sample(seed=seed)
            logistic_samp = tf.clip_by_value(logistic_samp, -1., 1.)
            channel_samples.append(logistic_samp)

        return tf.concat(channel_samples, axis=-1) 
Example #9
Source File: continuous_batched.py    From compression with Apache License 2.0 4 votes vote down vote up
def compress(self, bottleneck):
    """Compresses a floating-point tensor.

    Compresses the tensor to bit strings. `bottleneck` is first quantized
    as in `quantize()`, and then compressed using the probability tables derived
    from `self.prior`. The quantized tensor can later be recovered by
    calling `decompress()`.

    The innermost `self.coding_rank` dimensions are treated as one coding unit,
    i.e. are compressed into one string each. Any additional dimensions to the
    left are treated as batch dimensions.

    Arguments:
      bottleneck: `tf.Tensor` containing the data to be compressed. Must have at
        least `self.coding_rank` dimensions, and the innermost dimensions must
        be broadcastable to `self.prior_shape`.

    Returns:
      A `tf.Tensor` having the same shape as `bottleneck` without the
      `self.coding_rank` innermost dimensions, containing a string for each
      coding unit.
    """
    input_shape = tf.shape(bottleneck)
    input_rank = tf.shape(input_shape)[0]
    batch_shape, coding_shape = tf.split(
        input_shape, [input_rank - self.coding_rank, self.coding_rank])
    broadcast_shape = coding_shape[
        :self.coding_rank - len(self.prior_shape)]

    indexes = self._compute_indexes(broadcast_shape)
    if self._quantization_offset is not None:
      bottleneck -= self._quantization_offset
    symbols = tf.cast(tf.round(bottleneck), tf.int32)
    symbols = tf.reshape(symbols, tf.concat([[-1], coding_shape], 0))

    # Prevent tensors from bouncing back and forth between host and GPU.
    with tf.device("/cpu:0"):
      cdf = self.cdf
      cdf_length = self.cdf_length
      cdf_offset = self.cdf_offset
      def loop_body(symbols):
        return range_coding_ops.unbounded_index_range_encode(
            symbols, indexes, cdf, cdf_length, cdf_offset,
            precision=self.range_coder_precision,
            overflow_width=4, debug_level=1)

      # TODO(jonycgn,ssjhv): Consider switching to Python control flow.
      strings = tf.map_fn(
          loop_body, symbols, dtype=tf.string, name="compress")

    strings = tf.reshape(strings, batch_shape)
    return strings 
Example #10
Source File: lingunet.py    From valan with Apache License 2.0 4 votes vote down vote up
def call(self,
           image_embed,
           instructions,
           instruction_lengths,
           training=False):
    assert self.num_channels == image_embed.shape[3]

    text_embed = self.text_embedder(instructions)
    text_embed = self.rnn(text_embed, instruction_lengths, training)
    text_embed_1, text_embed_2 = tf.split(text_embed, 2, axis=-1)
    batch_size = text_embed.shape[0]

    # Compute 1x1 convolution weights
    kern1 = self.dense_k1(text_embed_1)
    kern2 = self.dense_k2(text_embed_2)
    kern1 = tf.reshape(kern1, (
        batch_size, 1, 1, self.num_channels, self.num_channels))
    kern2 = tf.reshape(kern2, (
        batch_size, 1, 1, self.num_channels, self.num_channels))

    f0 = image_embed
    f1 = self.conv1(f0)
    f2 = self.conv2(f1)

    # Filter encoded image features to produce language-conditioned features
    #

    g1 = utils.parallel_conv2d(f1, kern1, 1, "SAME")
    g2 = utils.parallel_conv2d(f2, kern2, 1, "SAME")

    h2 = self.deconv2(g2)
    h2_g1 = tf.concat([h2, g1], axis=3)  # Assuming NHWC

    h1 = self.deconv1(h2_g1)

    out1 = self.dense1(h1)
    out2 = self.dense2(out1)
    out = tf.squeeze(self.out_dense(out2), -1)

    out_flat = tf.reshape(out, [batch_size, -1])
    # So that the output forms a prob distribution.
    out_flat = tf.nn.softmax(out_flat)
    return out_flat