Python tensorflow.compat.v2.range() Examples

The following are 30 code examples of tensorflow.compat.v2.range(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v2 , or try the search function .
Example #1
Source File: runnable.py    From models with Apache License 2.0 6 votes vote down vote up
def train(self,
            num_steps: Optional[tf.Tensor]) -> Optional[Dict[Text, tf.Tensor]]:
    """Implements model training with multiple steps.

    In training, it is common to break the total training steps into several
    training loops, so users can do checkpointing, write summaries and run some
    python callbacks. This is necessary for getting good performance in TPU
    training, as the overhead for launching a multi worker tf.function may be
    large in Eager mode. It is usually encouraged to create a host training loop
    (e.g. using a `tf.range` wrapping `strategy.run` inside a
    `tf.function`) in the TPU case. For the cases that don't require host
    training loop to acheive peak performance, users can just implement a simple
    python loop to drive each step.

    Args:
      num_steps: A guideline for how many training steps to run. Note that it is
        up to the model what constitutes a "step" (this may involve more than
        one update to model parameters, e.g. if training a GAN).

    Returns:
      The function may return a dictionary of `Tensors`, which will be
      written to logs and as TensorBoard summaries.
    """
    pass 
Example #2
Source File: array_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def rot90(m, k=1, axes=(0, 1)):  # pylint: disable=missing-docstring
  m_rank = tf.rank(m)
  ax1, ax2 = utils._canonicalize_axes(axes, m_rank)  # pylint: disable=protected-access

  k = k % 4
  if k == 0:
    return m
  elif k == 2:
    return flip(flip(m, ax1), ax2)
  else:
    perm = tf.range(m_rank)
    perm = tf.tensor_scatter_nd_update(perm, [[ax1], [ax2]], [ax2, ax1])

    if k == 1:
      return transpose(flip(m, ax2), perm)
    else:
      return flip(transpose(m, perm), ax2) 
Example #3
Source File: math_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def diff(a, n=1, axis=-1):
  def f(a):
    nd = a.shape.rank
    if (axis + nd if axis < 0 else axis) >= nd:
      raise ValueError("axis %s is out of bounds for array of dimension %s" %
                       (axis, nd))
    if n < 0:
      raise ValueError("order must be non-negative but got %s" % n)
    slice1 = [slice(None)] * nd
    slice2 = [slice(None)] * nd
    slice1[axis] = slice(1, None)
    slice2[axis] = slice(None, -1)
    slice1 = tuple(slice1)
    slice2 = tuple(slice2)
    op = tf.not_equal if a.dtype == tf.bool else tf.subtract
    for _ in range(n):
      a = op(a[slice1], a[slice2])
    return a
  return _scalar(f, a) 
Example #4
Source File: halton_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def test_many_small_batches_same_as_one_big_batch(self):
    dim = 2
    num_results_per_batch = 1
    num_batches = 3
    seed = 1925
    sample1, _ = random.halton.sample(
        dim, num_results_per_batch * num_batches, seed=seed)
    batch_indices = (
        tf.range(i * num_results_per_batch, (i + 1) * num_results_per_batch)
        for i in range(num_batches))
    sample2 = (
        random.halton.sample(dim, sequence_indices=sequence_indices, seed=seed)
        for sequence_indices in batch_indices)
    result_set1 = set(tuple(row) for row in self.evaluate(sample1))
    result_set2 = set()
    for batch, _ in sample2:
      result_set2.update(tuple(row) for row in self.evaluate(batch))
    self.assertEqual(result_set1, result_set2) 
Example #5
Source File: stateless_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def testOutputIsPermutation(self):
    """Checks that stateless_random_shuffle outputs a permutation."""
    for dtype in (tf.int32, tf.int64, tf.float32, tf.float64):
      identity_permutation = tf.range(10, dtype=dtype)
      random_shuffle_seed_1 = tff_rnd.stateless_random_shuffle(
          identity_permutation, seed=tf.constant((1, 42), tf.int64))
      random_shuffle_seed_2 = tff_rnd.stateless_random_shuffle(
          identity_permutation, seed=tf.constant((2, 42), tf.int64))
      # Check that the shuffles are of the correct dtype
      for shuffle in (random_shuffle_seed_1, random_shuffle_seed_2):
        np.testing.assert_equal(shuffle.dtype, dtype.as_numpy_dtype)
      random_shuffle_seed_1 = self.evaluate(random_shuffle_seed_1)
      random_shuffle_seed_2 = self.evaluate(random_shuffle_seed_2)
      identity_permutation = self.evaluate(identity_permutation)
      # Check that the shuffles are different
      self.assertTrue(
          np.abs(random_shuffle_seed_1 - random_shuffle_seed_2).max())
      # Check that the shuffles are indeed permutations
      for shuffle in (random_shuffle_seed_1, random_shuffle_seed_2):
        self.assertAllEqual(set(shuffle), set(identity_permutation)) 
Example #6
Source File: stateless_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def testOutputIsIndependentOfInputValues(self):
    """stateless_random_shuffle output is independent of input_tensor values."""
    # Generate sorted array of random numbers to control that the result
    # is independent of `input_tesnor` values
    np.random.seed(25)
    random_input = np.random.normal(size=[10])
    random_input.sort()
    for dtype in (tf.int32, tf.int64, tf.float32, tf.float64):
      # Permutation of a sequence [0, 1, .., 9]
      random_permutation = tff_rnd.stateless_random_shuffle(
          tf.range(10, dtype=dtype), seed=(100, 42))
      random_permutation = self.evaluate(random_permutation)
      # Shuffle `random_input` with the same seed
      random_shuffle_control = tff_rnd.stateless_random_shuffle(
          random_input, seed=(100, 42))
      random_shuffle_control = self.evaluate(random_shuffle_control)
      # Checks that the generated permutation does not depend on the underlying
      # values
      np.testing.assert_array_equal(
          np.argsort(random_permutation), np.argsort(random_shuffle_control)) 
Example #7
Source File: gradient_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def test_backward_unconnected_gradient(self):
    t = tf.range(1, 3, dtype=tf.float32)  # Shape [2]
    zeros = tf.zeros([2], dtype=t.dtype)
    expected_result = [0.0, 0.0]
    func = lambda t: tf.stack([zeros, zeros, zeros], axis=0)  # Shape [3, 2]
    with self.subTest("EagerExecution"):
      backward_grad = self.evaluate(tff.math.gradients(
          func, t, unconnected_gradients=tf.UnconnectedGradients.ZERO))
      self.assertEqual(backward_grad.shape, (2,))
      np.testing.assert_allclose(backward_grad, expected_result)
    with self.subTest("GraphExecution"):
      @tf.function
      def grad_computation():
        y = func(t)
        return tff.math.gradients(
            y, t, unconnected_gradients=tf.UnconnectedGradients.ZERO)
      backward_grad = self.evaluate(grad_computation())
      self.assertEqual(backward_grad.shape, (2,))
      np.testing.assert_allclose(backward_grad, expected_result) 
Example #8
Source File: lsm_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def test_expected_continuation(self):
    """Tests that expected continuation works in V=1 case.

    In particular this verifies that the regression done to get the expected
    continuation value is performed on those elements which have a positive
    exercise value.
    """
    for dtype in (np.float32, np.float64):
      a = tf.range(start=-2, limit=3, delta=1, dtype=dtype)
      design = tf.concat([a, a], axis=0)
      design = tf.concat([[tf.ones_like(design), design]], axis=1)

      # These values ensure that the expected continuation value is `(1,...,1).`
      exercise_now = tf.expand_dims(
          tf.concat([tf.ones_like(a), tf.zeros_like(a)], axis=0), -1)
      cashflow = tf.expand_dims(
          tf.concat([tf.ones_like(a), -tf.ones_like(a)], axis=0), -1)

      expected_exercise = lsm.expected_exercise_fn(
          design, cashflow, exercise_now)
      self.assertAllClose(expected_exercise, tf.ones_like(cashflow)) 
Example #9
Source File: gradient_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def test_forward_unconnected_gradient(self):
    t = tf.range(1, 3, dtype=tf.float32)  # Shape [2]
    zeros = tf.zeros([2], dtype=t.dtype)
    func = lambda t: tf.stack([zeros, zeros, zeros], axis=0)  # Shape [3, 2]
    expected_result = [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]
    with self.subTest("EagerExecution"):
      fwd_grad = self.evaluate(tff.math.fwd_gradient(
          func, t, unconnected_gradients=tf.UnconnectedGradients.ZERO))
      self.assertEqual(fwd_grad.shape, (3, 2))
      np.testing.assert_allclose(fwd_grad, expected_result)
    with self.subTest("GraphExecution"):
      @tf.function
      def grad_computation():
        y = func(t)
        return tff.math.fwd_gradient(
            y, t, unconnected_gradients=tf.UnconnectedGradients.ZERO)
      fwd_grad = self.evaluate(grad_computation())
      self.assertEqual(fwd_grad.shape, (3, 2))
      np.testing.assert_allclose(fwd_grad, expected_result) 
Example #10
Source File: util.py    From language with Apache License 2.0 6 votes vote down vote up
def labels_of_top_ranked_predictions_in_batch(labels, predictions):
  """Applying tf.metrics.mean to this gives precision at 1.

  Args:
    labels: minibatch of dense 0/1 labels, shape [batch_size rows, num_classes]
    predictions: minibatch of predictions of the same shape

  Returns:
    one-dimension tensor top_labels, where top_labels[i]=1.0 iff the
    top-scoring prediction for batch element i has label 1.0
  """
  indices_of_top_preds = tf.cast(tf.argmax(input=predictions, axis=1), tf.int32)
  batch_size = tf.reduce_sum(input_tensor=tf.ones_like(indices_of_top_preds))
  row_indices = tf.range(batch_size)
  thresholded_labels = tf.where(labels > 0.0, tf.ones_like(labels),
                                tf.zeros_like(labels))
  label_indices_to_gather = tf.transpose(
      a=tf.stack([row_indices, indices_of_top_preds]))
  return tf.gather_nd(thresholded_labels, label_indices_to_gather) 
Example #11
Source File: array_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def geomspace(start, stop, num=50, endpoint=True, dtype=float):  # pylint: disable=missing-docstring
  if dtype:
    dtype = utils.result_type(dtype)
  if num < 0:
    raise ValueError('Number of samples {} must be non-negative.'.format(num))
  if not num:
    return empty([0])
  step = 1.
  if endpoint:
    if num > 1:
      step = tf.pow((stop / start), 1 / (num - 1))
  else:
    step = tf.pow((stop / start), 1 / num)
  result = tf.cast(tf.range(num), step.dtype)
  result = tf.pow(step, result)
  result = tf.multiply(result, start)
  if dtype:
    result = tf.cast(result, dtype=dtype)
  return arrays_lib.tensor_to_ndarray(result)


# Building matrices. 
Example #12
Source File: array_ops.py    From trax with Apache License 2.0 5 votes vote down vote up
def arange(start, stop=None, step=1, dtype=None):
  """Returns `step`-separated values in the range [start, stop).

  Args:
    start: Start of the interval. Included in the range.
    stop: End of the interval. If not specified, `start` is treated as 0 and
      `start` value is used as `stop`. If specified, it is not included in the
      range if `step` is integer. When `step` is floating point, it may or may
      not be included.
    step: The difference between 2 consecutive values in the output range. It is
      recommended to use `linspace` instead of using non-integer values for
      `step`.
    dtype: Optional. Type of the resulting ndarray. Could be a python type, a
      NumPy type or a TensorFlow `DType`. If not provided, the largest type of
      `start`, `stop`, `step` is used.

  Raises:
    ValueError: If step is zero.
  """
  if not step:
    raise ValueError('step must be non-zero.')
  if dtype:
    dtype = utils.result_type(dtype)
  else:
    if stop is None:
      dtype = utils.result_type(start, step)
    else:
      dtype = utils.result_type(start, step, stop)
  if step > 0 and ((stop is not None and start > stop) or
                   (stop is None and start < 0)):
    return array([], dtype=dtype)
  if step < 0 and ((stop is not None and start < stop) or
                   (stop is None and start > 0)):
    return array([], dtype=dtype)
  # TODO(srbs): There are some bugs when start or stop is float type and dtype
  # is integer type.
  return arrays_lib.tensor_to_ndarray(
      tf.cast(tf.range(start, limit=stop, delta=step), dtype=dtype)) 
Example #13
Source File: utils.py    From models with Apache License 2.0 5 votes vote down vote up
def create_tf_while_loop_fn(step_fn):
  """Create a multiple steps function driven by tf.while_loop on the host.

  Args:
    step_fn: A function which takes `iterator` as input.

  Returns:
    A callable defined as the `loop_fn` defination below.
  """

  @tf.function
  def loop_fn(iterator, num_steps):
    """A loop function with multiple steps.

    Args:
      iterator: A nested structure of tf.data `Iterator` or
        `DistributedIterator`.
      num_steps: The number of steps in the loop. Must be a tf.Tensor.
    """
    if not isinstance(num_steps, tf.Tensor):
      raise ValueError("`num_steps` should be an `tf.Tensor`. Python object "
                       "may cause retracing.")

    for _ in tf.range(num_steps):
      step_fn(iterator)

  return loop_fn 
Example #14
Source File: continuous_batched_test.py    From compression with Apache License 2.0 5 votes vote down vote up
def test_high_entropy_bounds(self):
    # For high entropy distributions, the training bound should be very tight,
    # and the overhead of range coding manageable.
    noisy = uniform_noise.NoisyNormal(loc=0., scale=100.)
    em = ContinuousBatchedEntropyModel(noisy, 1, compression=True)
    x = noisy.base.sample([10000])
    bits_eval = em.bits(x, training=False)
    bits_training = em.bits(x, training=True)
    bits_compressed = 8 * len(em.compress(x).numpy())
    self.assertAllClose(bits_training, bits_eval, atol=0, rtol=5e-5)
    self.assertAllClose(bits_compressed, bits_eval, atol=0, rtol=5e-3) 
Example #15
Source File: utils.py    From valan with Apache License 2.0 5 votes vote down vote up
def get_first_true_column(x):
  """Transforms `x` into a tensor which has all elements set to False except the first True in the column.

  If x is [[True, False, False],
           [True, False, False],
           [False, True, False],
           [False, True, True]]
  the output should be
          [[True, False, False],
           [False, False, False],
           [False, True, False],
           [False, False, True]
          ]

  Args:
    x: A bool tensor with shape [num_steps, batch_size]

  Returns:
    A bool tensor with the same shape.
  """
  x = tf.transpose(x, perm=[1, 0])
  # Get indices
  y = tf.where(tf.equal(x, True))
  # Find first column in every row which is True
  first_true_cols = tf.cast(
      tf.math.segment_min(data=y[:, 1], segment_ids=y[:, 0]), tf.int32)
  # Convert back to indices
  first_true_indices = tf.stack(
      [tf.range(tf.size(first_true_cols)), first_true_cols], axis=1)
  # Now create the mask
  first_true_mask_sparse = tf.SparseTensor(
      indices=tf.cast(first_true_indices, tf.int64),
      values=tf.ones([tf.size(first_true_cols)], dtype=tf.bool),
      dense_shape=x.shape)
  first_true_mask = tf.sparse.to_dense(
      first_true_mask_sparse, default_value=False)
  return tf.transpose(first_true_mask, perm=[1, 0]) 
Example #16
Source File: utils.py    From valan with Apache License 2.0 5 votes vote down vote up
def levenshtein(seq1, seq2):
  """Computes Levenshtein edit distance between two sequences.

  Adapted from online tutorial by Frank Hofmann.

  Args:
    seq1: The first sequence of any datatype that implements equality operator.
    seq2: The second sequence of any datatype that implements equality operator.

  Returns:
    Levenshtein edit distance (integer) between the two sequences.
  """
  size_x = len(seq1) + 1
  size_y = len(seq2) + 1
  mat = np.zeros((size_x, size_y))
  for x in range(size_x):
    mat[x, 0] = x
  for y in range(size_y):
    mat[0, y] = y

  for x in range(1, size_x):
    for y in range(1, size_y):
      if seq1[x-1] == seq2[y-1]:
        mat[x, y] = min(
            mat[x-1, y] + 1,
            mat[x-1, y-1],
            mat[x, y-1] + 1
        )
      else:
        mat[x, y] = min(
            mat[x-1, y] + 1,
            mat[x-1, y-1] + 1,
            mat[x, y-1] + 1
            )

  return mat[size_x - 1, size_y - 1] 
Example #17
Source File: utils.py    From valan with Apache License 2.0 5 votes vote down vote up
def parallel_conv2d(inputs, filters, strides, padding):
  """Applies each filter in the batch of filters to each input.

  tf.nn.conv2d only supports applying the same filter on a batch of inputs.
  This function provides a similar interface, but allowing a batch of filters,
  a different one for each input.

  In the below definitions, B is the batch size, H and W are spatial input or
  output dimensions (overloaded between input and output), C1 is the input
  number of channels, C2 is output number of channels, KHxKW is the
  convolutional kernel spatial size.

  Args:
    inputs: BxHxWxC1 tensor - batch of input "images"
    filters: BxKHxKWxC1xC2 tensor - batch of convolutional kernels
    strides: See tf.nn.conv2d arg: strides
    padding: See tf.nn.conv2d arg: padding

  Returns:
    Tensor of shape BxHxWxC2
  """
  batch_size = inputs.shape[0]

  output_slices = [tf.nn.conv2d(inputs[i:i+1], filters[i], strides, padding)
                   for i in range(batch_size)]
  output = tf.stack(output_slices, axis=0)
  # Each output slice has a batch dimension of size 1. Get rid of it.
  assert output.shape[1] == 1, 'Each slice should have batch size of 1'
  output = output[:, 0, :, :, :]
  # Output should have same batch size and spatial dimensions as input, but
  # the number of channels is determined by the convolution filter
  assert_shape((batch_size, inputs.shape[1], inputs.shape[2], filters.shape[4]),
               output.shape)
  return output 
Example #18
Source File: continuous_base.py    From compression with Apache License 2.0 5 votes vote down vote up
def prior(self):
    """Prior distribution, used for range coding."""
    if not hasattr(self, "_prior"):
      raise RuntimeError(
          "This entropy model doesn't hold a reference to its prior "
          "distribution. This can happen when it is unserialized, because "
          "the prior is generally not serializable.")
    return self._prior 
Example #19
Source File: continuous_indexed.py    From compression with Apache License 2.0 5 votes vote down vote up
def bits(self, bottleneck, indexes, training=True):
    """Estimates the number of bits needed to compress a tensor.

    Arguments:
      bottleneck: `tf.Tensor` containing the data to be compressed.
      indexes: `tf.Tensor` specifying the scalar distribution for each element
        in `bottleneck`. See class docstring for examples.
      training: Boolean. If `False`, computes the Shannon information of
        `bottleneck` under the distribution computed by `self.prior_fn`,
        which is a non-differentiable, tight *lower* bound on the number of bits
        needed to compress `bottleneck` using `compress()`. If `True`, returns a
        somewhat looser, but differentiable *upper* bound on this quantity.

    Returns:
      A `tf.Tensor` having the same shape as `bottleneck` without the
      `self.coding_rank` innermost dimensions, containing the number of bits.
    """
    indexes = self._normalize_indexes(indexes)
    prior = self._make_prior(indexes)
    if training:
      quantized = bottleneck + tf.random.uniform(
          tf.shape(bottleneck), minval=-.5, maxval=.5, dtype=bottleneck.dtype)
    else:
      offset = helpers.quantization_offset(prior)
      quantized = self._quantize(bottleneck, offset)
    probs = prior.prob(quantized)
    probs = math_ops.lower_bound(probs, self.likelihood_bound)
    axes = tuple(range(-self.coding_rank, 0))
    bits = tf.reduce_sum(tf.math.log(probs), axis=axes) / -tf.math.log(2.)
    return bits 
Example #20
Source File: continuous_batched.py    From compression with Apache License 2.0 5 votes vote down vote up
def bits(self, bottleneck, training=True):
    """Estimates the number of bits needed to compress a tensor.

    Arguments:
      bottleneck: `tf.Tensor` containing the data to be compressed. Must have at
        least `self.coding_rank` dimensions, and the innermost dimensions must
        be broadcastable to `self.prior_shape`.
      training: Boolean. If `False`, computes the Shannon information of
        `bottleneck` under the distribution `self.prior`, which is a
        non-differentiable, tight *lower* bound on the number of bits needed to
        compress `bottleneck` using `compress()`. If `True`, returns a somewhat
        looser, but differentiable *upper* bound on this quantity.

    Returns:
      A `tf.Tensor` having the same shape as `bottleneck` without the
      `self.coding_rank` innermost dimensions, containing the number of bits.
    """
    if training:
      quantized = bottleneck + tf.random.uniform(
          tf.shape(bottleneck), minval=-.5, maxval=.5, dtype=bottleneck.dtype)
    else:
      quantized = self.quantize(bottleneck)
    probs = self.prior.prob(quantized)
    probs = math_ops.lower_bound(probs, self.likelihood_bound)
    axes = tuple(range(-self.coding_rank, 0))
    bits = tf.reduce_sum(tf.math.log(probs), axis=axes) / -tf.math.log(2.)
    return bits 
Example #21
Source File: continuous_batched.py    From compression with Apache License 2.0 5 votes vote down vote up
def _compute_indexes(self, broadcast_shape):
    # TODO(jonycgn, ssjhv): Investigate broadcasting in range coding op.
    prior_size = functools.reduce(lambda x, y: x * y, self.prior_shape, 1)
    indexes = tf.range(prior_size, dtype=tf.int32)
    indexes = tf.reshape(indexes, self.prior_shape)
    indexes = tf.broadcast_to(
        indexes, tf.concat([broadcast_shape, self.prior_shape], 0))
    return indexes 
Example #22
Source File: array_ops.py    From trax with Apache License 2.0 5 votes vote down vote up
def swapaxes(a, axis1, axis2):  # pylint: disable=missing-docstring
  a = asarray(a)

  a_rank = tf.rank(a)
  if axis1 < 0:
    axis1 += a_rank
  if axis2 < 0:
    axis2 += a_rank

  perm = tf.range(a_rank)
  perm = tf.tensor_scatter_nd_update(perm, [[axis1], [axis2]], [axis2, axis1])
  a = tf.transpose(a, perm)

  return utils.tensor_to_ndarray(a) 
Example #23
Source File: loop_with_variable_type_illegal_cases_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def _tf_range(l):
  return tf.range(len(l)) 
Example #24
Source File: loop_with_variable_type_illegal_cases_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def for_with_shape_invariant_violation(l):
  t = tf.constant([1])
  for _ in l:
    tf.autograph.experimental.set_loop_options(
        shape_invariants=((t, tf.TensorShape([1])),))
    t = tf.range(tf.random.uniform((), 2, 3, dtype=tf.int32))
  return t 
Example #25
Source File: loop_with_variable_type_illegal_cases_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def for_with_shape_erasure(l):
  t = tf.constant([1])
  for _ in l:
    t = tf.range(tf.random.uniform((), 2, 3, dtype=tf.int32))
  return t 
Example #26
Source File: loop_with_variable_type_illegal_cases_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def while_with_shape_erasure():
  t = tf.constant([1])
  while tf.constant(True):
    t = tf.range(tf.random.uniform((), 2, 3, dtype=tf.int32))
  return t 
Example #27
Source File: loop_with_variable_type_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def _int_dataset_range(n):
  return tf.data.Dataset.range(n).map(lambda x: tf.cast(x, tf.int32)) 
Example #28
Source File: loop_with_variable_type_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def for_with_variable_shape_and_break(n):
  v = tf.constant([0, 0])
  if n > 1:
    for i in range(n):
      tf.autograph.experimental.set_loop_options(
          shape_invariants=[(v, tf.TensorShape([None]))])
      v = tf.concat((v, [i]), 0)
      i += 1
      if i > 3:
        break
  else:
    v = tf.constant([1, 2, 3])
  return v 
Example #29
Source File: loop_with_variable_type_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def for_with_variable_shape_inside_if(n):
  v = tf.constant([0, 0])
  if n > 1:
    for i in range(n):
      tf.autograph.experimental.set_loop_options(
          shape_invariants=[(v, tf.TensorShape([None]))])
      v = tf.concat((v, [i]), 0)
      i += 1
  else:
    v = tf.constant([1, 2, 3])
  return v 
Example #30
Source File: continuous_base.py    From compression with Apache License 2.0 5 votes vote down vote up
def _check_compression(self):
    if not self.compression:
      raise RuntimeError(
          "For range coding, the entropy model must be instantiated with "
          "`compression=True`.")