Python tensorflow.compat.v2.function() Examples

The following are 30 code examples of tensorflow.compat.v2.function(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v2 , or try the search function .
Example #1
Source File: date_tensor.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def to_tensor(self):
    """Packs the dates into a single Tensor.

    The Tensor has shape `date_tensor.shape() + (3,)`, where the last dimension
    represents years, months and days, in this order.

    This can be convenient when the dates are the final result of a computation
    in the graph mode: a `tf.function` can return `date_tensor.to_tensor()`, or,
    if one uses `tf.compat.v1.Session`, they can call
    `session.run(date_tensor.to_tensor())`.

    Returns:
      A Tensor of shape `date_tensor.shape() + (3,)`.

    #### Example

    ```python
    dates = tff.datetime.dates_from_tuples([(2019, 1, 25), (2020, 3, 2)])
    dates.to_tensor()  # tf.Tensor with contents [[2019, 1, 25], [2020, 3, 2]].
    ```
    """
    return tf.stack((self.year(), self.month(), self.day()), axis=-1) 
Example #2
Source File: continuous_batched_test.py    From compression with Apache License 2.0 6 votes vote down vote up
def test_compression_works_in_tf_function(self):
    noisy = uniform_noise.NoisyNormal(loc=0, scale=5.)
    sample = noisy.base.sample([100])

    # Since tf.function traces each function twice, and only allows variable
    # creation in the first call, we need to have a stateful object in which we
    # create the entropy model only the first time the function is called, and
    # store it for the second time.

    class Compressor(object):

      def compress(self, values):
        if not hasattr(self, "em"):
          self.em = ContinuousBatchedEntropyModel(noisy, 1, compression=True)
        compressed = self.em.compress(values)
        decompressed = self.em.decompress(compressed, [])
        return decompressed

    values_eager = Compressor().compress(sample)
    values_function = tf.function(Compressor().compress)(sample)
    self.assertAllEqual(values_eager, values_function) 
Example #3
Source File: extensions.py    From trax with Apache License 2.0 6 votes vote down vote up
def logsumexp(x, axis=None, keepdims=None):
  """Computes log(sum(exp(elements across dimensions of a tensor))).

  Reduces `x` along the dimensions given in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  entry in `axis`. If `keepdims` is true, the reduced dimensions
  are retained with length 1.
  If `axis` has no entries, all dimensions are reduced, and a
  tensor with a single element is returned.
  This function is more numerically stable than log(sum(exp(input))). It avoids
  overflows caused by taking the exp of large inputs and underflows caused by
  taking the log of small inputs.

  Args:
    x: The tensor to reduce. Should have numeric type.
    axis: The dimensions to reduce. If `None` (the default), reduces all
      dimensions. Must be in the range `[-rank(x), rank(x))`.
    keepdims: If true, retains reduced dimensions with length 1.

  Returns:
    The reduced tensor.
  """
  return tf_np.asarray(
      tf.math.reduce_logsumexp(
          input_tensor=x.data, axis=axis, keepdims=keepdims)) 
Example #4
Source File: eval_actor_test.py    From valan with Apache License 2.0 6 votes vote down vote up
def test_run_eval_actor_once(self):
    hparams = {}
    hparams['max_iter'] = 1
    hparams['num_episodes_per_iter'] = 5
    hparams['logdir'] = os.path.join(FLAGS.test_tmpdir, 'model')

    mock_problem = testing_utils.MockProblem(unroll_length=FLAGS.unroll_length)
    agent = mock_problem.get_agent()
    ckpt_manager = _get_ckpt_manager(hparams['logdir'], agent=agent)
    ckpt_manager.save(checkpoint_number=0)

    # Create a no-op gRPC server that responds to Aggregator RPCs.
    server_address = 'unix:/tmp/eval_actor_test_grpc'
    server = grpc.Server([server_address])

    @tf.function(input_signature=[tf.TensorSpec(shape=(), dtype=tf.string)])
    def eval_enqueue(_):
      return []

    server.bind(eval_enqueue, batched=False)

    server.start()

    eval_actor.run_with_aggregator(mock_problem, server_address, hparams) 
Example #5
Source File: continuous_indexed.py    From compression with Apache License 2.0 6 votes vote down vote up
def quantize(self, bottleneck, indexes):
    """Quantizes a floating-point tensor.

    To use this entropy model as an information bottleneck during training, pass
    a tensor through this function. The tensor is rounded to integer values
    modulo a quantization offset, which depends on `indexes`. For instance, for
    Gaussian distributions, the returned values are rounded to the location of
    the mode of the distributions plus or minus an integer.

    The gradient of this rounding operation is overridden with the identity
    (straight-through gradient estimator).

    Arguments:
      bottleneck: `tf.Tensor` containing the data to be quantized.
      indexes: `tf.Tensor` specifying the scalar distribution for each element
        in `bottleneck`. See class docstring for examples.

    Returns:
      A `tf.Tensor` containing the quantized values.
    """
    indexes = self._normalize_indexes(indexes)
    offset = helpers.quantization_offset(self._make_prior(indexes))
    return self._quantize(bottleneck, offset) 
Example #6
Source File: test_util.py    From spectral-density with Apache License 2.0 6 votes vote down vote up
def hessian(function: Callable[[Parameters], tf.Tensor],
            parameters: Parameters) -> Parameters:
  """Computes the Hessian of a given function.

  Useful for testing, although scales very poorly.

  Args:
    function: A function for which we want to compute the Hessian.
    parameters: Parameters with respect to the Hessian should be computed.

  Returns:
    A tensor or list of tensors of same nested structure as `Parameters`,
      representing the Hessian.
  """
  with tf.GradientTape() as outer_tape:
    with tf.GradientTape() as inner_tape:
      value = function(parameters)
    grads = inner_tape.gradient(value, parameters)
    grads = tensor_list_util.tensor_list_to_vector(grads)
  return outer_tape.jacobian(grads, parameters) 
Example #7
Source File: runnable.py    From models with Apache License 2.0 6 votes vote down vote up
def train(self,
            num_steps: Optional[tf.Tensor]) -> Optional[Dict[Text, tf.Tensor]]:
    """Implements model training with multiple steps.

    In training, it is common to break the total training steps into several
    training loops, so users can do checkpointing, write summaries and run some
    python callbacks. This is necessary for getting good performance in TPU
    training, as the overhead for launching a multi worker tf.function may be
    large in Eager mode. It is usually encouraged to create a host training loop
    (e.g. using a `tf.range` wrapping `strategy.run` inside a
    `tf.function`) in the TPU case. For the cases that don't require host
    training loop to acheive peak performance, users can just implement a simple
    python loop to drive each step.

    Args:
      num_steps: A guideline for how many training steps to run. Note that it is
        up to the model what constitutes a "step" (this may involve more than
        one update to model parameters, e.g. if training a GAN).

    Returns:
      The function may return a dictionary of `Tensors`, which will be
      written to logs and as TensorBoard summaries.
    """
    pass 
Example #8
Source File: test_util.py    From spectral-density with Apache License 2.0 6 votes vote down vote up
def hessian_as_matrix(function: Callable[[Parameters], tf.Tensor],
                      parameters: Parameters) -> tf.Tensor:
  """Computes the Hessian of a given function.

  Same as `hessian`, although return a matrix of size [w_dim, w_dim], where
  `w_dim` is the number of parameters, which makes it easier to work with.

  Args:
    function: A function for which we want to compute the Hessian.
    parameters: Parameters with respect to the Hessian should be computed.

  Returns:
    A tensor of size [w_dim, w_dim] representing the Hessian.
  """
  hessian_as_tensor_list = hessian(function, parameters)
  hessian_as_tensor_list = [
      tf.reshape(e, [e.shape[0], -1]) for e in hessian_as_tensor_list]
  return tf.concat(hessian_as_tensor_list, axis=1) 
Example #9
Source File: census_example_v2.py    From transform with Apache License 2.0 6 votes vote down vote up
def input_fn(tf_transform_output, transformed_examples_pattern, batch_size):
  """An input function reading from transformed data, converting to model input.

  Args:
    tf_transform_output: Wrapper around output of tf.Transform.
    transformed_examples_pattern: Base filename of examples.
    batch_size: Batch size.

  Returns:
    The input data for training or eval, in the form of k.
  """
  return tf.data.experimental.make_batched_features_dataset(
      file_pattern=transformed_examples_pattern,
      batch_size=batch_size,
      features=tf_transform_output.transformed_feature_spec(),
      reader=tf.data.TFRecordDataset,
      label_key=LABEL_KEY,
      shuffle=True) 
Example #10
Source File: saved_model_v2_predictor_test.py    From tensor2robot with Apache License 2.0 6 votes vote down vote up
def _save_model(self, model, sample_features):
    if self._saved_model_path:
      return self._saved_model_path

    # Save inference_network_fn as the predict method for the saved_model.
    @tf.function(autograph=False)
    def predict(features):
      return model.inference_network_fn(features, None,
                                        tf.compat.v1.estimator.ModeKeys.PREDICT)

    # Call the model for the tf.function tracing side effects.
    predict(sample_features)
    model.predict = predict

    self._saved_model_path = self.create_tempdir().full_path
    tf.saved_model.save(model, self._saved_model_path)
    _generate_assets(model, self._saved_model_path)
    return self._saved_model_path 
Example #11
Source File: gradient_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def test_forward_unconnected_gradient(self):
    t = tf.range(1, 3, dtype=tf.float32)  # Shape [2]
    zeros = tf.zeros([2], dtype=t.dtype)
    func = lambda t: tf.stack([zeros, zeros, zeros], axis=0)  # Shape [3, 2]
    expected_result = [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]
    with self.subTest("EagerExecution"):
      fwd_grad = self.evaluate(tff.math.fwd_gradient(
          func, t, unconnected_gradients=tf.UnconnectedGradients.ZERO))
      self.assertEqual(fwd_grad.shape, (3, 2))
      np.testing.assert_allclose(fwd_grad, expected_result)
    with self.subTest("GraphExecution"):
      @tf.function
      def grad_computation():
        y = func(t)
        return tff.math.fwd_gradient(
            y, t, unconnected_gradients=tf.UnconnectedGradients.ZERO)
      fwd_grad = self.evaluate(grad_computation())
      self.assertEqual(fwd_grad.shape, (3, 2))
      np.testing.assert_allclose(fwd_grad, expected_result) 
Example #12
Source File: gradient_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def test_backward_unconnected_gradient(self):
    t = tf.range(1, 3, dtype=tf.float32)  # Shape [2]
    zeros = tf.zeros([2], dtype=t.dtype)
    expected_result = [0.0, 0.0]
    func = lambda t: tf.stack([zeros, zeros, zeros], axis=0)  # Shape [3, 2]
    with self.subTest("EagerExecution"):
      backward_grad = self.evaluate(tff.math.gradients(
          func, t, unconnected_gradients=tf.UnconnectedGradients.ZERO))
      self.assertEqual(backward_grad.shape, (2,))
      np.testing.assert_allclose(backward_grad, expected_result)
    with self.subTest("GraphExecution"):
      @tf.function
      def grad_computation():
        y = func(t)
        return tff.math.gradients(
            y, t, unconnected_gradients=tf.UnconnectedGradients.ZERO)
      backward_grad = self.evaluate(grad_computation())
      self.assertEqual(backward_grad.shape, (2,))
      np.testing.assert_allclose(backward_grad, expected_result) 
Example #13
Source File: custom_loops_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def test_with_xla(self):
    @tf.function
    def fn():
      x = tf.constant([[3.0, 4.0], [30.0, 40.0]])
      y = tf.constant([[7.0, 8.0], [70.0, 80.0]])
      alpha = tf.constant(2.0)
      beta = tf.constant(1.0)
      with tf.GradientTape(persistent=True) as tape:
        tape.watch([alpha, beta])
        def body(i, state):
          del i
          x, y = state
          return [x * alpha - beta, y * beta + x]
        out = for_loop(body, [x, y], [alpha, beta], 3)
      return tape.gradient(out[1], beta)

    grad = self.evaluate(tf.xla.experimental.compile(fn))[0]
    self.assertAllEqual(783, grad) 
Example #14
Source File: lsm_v2_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def test_american_option_put_xla(self):
    """Tests that LSM price of American put option with xla compilation."""
    # This is the same example as above. Here we compile the graph with the XLA
    # compiler.
    basis_fn = lsm_algorithm.make_polynomial_basis_v2(2)
    dtype = np.float64
    payoff_fn = lsm_algorithm.make_basket_put_payoff([1.1], dtype=dtype)
    # Option price function
    def american_put_price_fn(samples):
      return lsm_algorithm.least_square_mc_v2(
          samples, [1, 2, 3], payoff_fn, basis_fn,
          discount_factors=self.discount_factors, dtype=dtype)
    # Compile the graph with XLA
    @tf.function
    def xla_compiled_op(samples):
      return tf.xla.experimental.compile(american_put_price_fn, [samples])[0]
    # Option price
    american_put_price = xla_compiled_op(self.samples)
    self.assertAllClose(american_put_price, [0.1144],
                        rtol=1e-4, atol=1e-4) 
Example #15
Source File: utils_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def test_maybe_update_along_axis(self, dtype):
    """Tests that the values are updated correctly."""
    tensor = tf.ones([5, 4, 3, 2], dtype=dtype)
    new_tensor = tf.zeros([5, 4, 1, 2], dtype=dtype)
    @tf.function
    def maybe_update_along_axis(do_update):
      return utils.maybe_update_along_axis(
          tensor=tensor, new_tensor=new_tensor, axis=1, ind=2,
          do_update=do_update)
    updated_tensor = maybe_update_along_axis(True)
    with self.subTest(name='Shape'):
      self.assertEqual(updated_tensor.shape, tensor.shape)
    with self.subTest(name='UpdatedVals'):
      self.assertAllEqual(updated_tensor[:, 2, :, :],
                          tf.zeros_like(updated_tensor[:, 2, :, :]))
    with self.subTest(name='NotUpdatedVals'):
      self.assertAllEqual(updated_tensor[:, 1, :, :],
                          tf.ones_like(updated_tensor[:, 2, :, :]))
    with self.subTest(name='DoNotUpdateVals'):
      not_updated_tensor = maybe_update_along_axis(False)
      self.assertAllEqual(not_updated_tensor, tensor) 
Example #16
Source File: geometric_brownian_motion_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def test_multiivariate_xla_compatible(self):
    """Tests that multiivariate GBM sampling is XLA-compatible."""
    corr_matrix = [[1, 0.1], [0.1, 1]]
    process = tff.models.MultivariateGeometricBrownianMotion(
        dim=2, means=0.05, volatilities=[0.1, 0.2], corr_matrix=corr_matrix,
        dtype=tf.float64)
    times = [0.1, 0.5, 1.0]
    initial_state = [1.0, 2.0]
    @tf.function
    def sample_fn():
      return process.sample_paths(
          times=times, initial_state=initial_state, num_samples=10000)
    samples = tf.xla.experimental.compile(sample_fn)[0]
    log_s = tf.math.log(samples)
    mean = tf.reduce_mean(log_s, axis=0)
    expected_mean = ((process._means - process._vols**2 / 2)
                     * np.array(np.expand_dims(times, -1))
                     + np.log(initial_state))
    self.assertAllClose(mean, expected_mean, atol=1e-2, rtol=1e-2) 
Example #17
Source File: utils.py    From models with Apache License 2.0 5 votes vote down vote up
def create_loop_fn(step_fn):
  """Creates a multiple steps function driven by the python while loop.

  Args:
    step_fn: A function which takes `iterator` as input.

  Returns:
    A callable defined as the `loop_fn` defination below.
  """

  def loop_fn(iterator, num_steps, state=None, reduce_fn=None):
    """A loop function with multiple steps.

    Args:
      iterator: A nested structure of tf.data `Iterator` or
        `DistributedIterator`.
      num_steps: The number of steps in the loop. If `num_steps==-1`, will
        iterate until exausting the iterator.
      state: An optional initial state before running the loop.
      reduce_fn: a callable defined as `def reduce_fn(state, value)`, where
        `value` is the outputs from `step_fn`.

    Returns:
      The updated state.
    """
    try:
      step = 0
      # To make sure the OutOfRangeError exception can be handled well with
      # async remote eager, we need to wrap the loop body in a `async_scope`.
      with tf.experimental.async_scope():
        while (num_steps == -1 or step < num_steps):
          outputs = step_fn(iterator)
          if reduce_fn is not None:
            state = reduce_fn(state, outputs)
          step += 1
        return state
    except (StopIteration, tf.errors.OutOfRangeError):
      tf.experimental.async_clear_error()
      return state

  return loop_fn 
Example #18
Source File: distributed_dataset_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def test_iterator_next_with_catching_stop_iteration(self):
    with self.assertRaises(tf.errors.OutOfRangeError):
      tf.function(iterator_next_with_catching_stop_iteration)(self.ds, self.dds,
                                                              tf.constant(True)) 
Example #19
Source File: distributed_dataset_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def test_iterator_two_vars_loop(self):
    with self.assertRaises(RuntimeError):
      tf.function(iterator_two_vars_loop)(self.ds, self.dds) 
Example #20
Source File: distributed_dataset_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def iterator_next_with_catching_stop_iteration(ds, dds, cond):
  # This is the one instance when the use of TF iterators does not work as
  # intended. In graph mode, the `except` below will never catch, and the
  # tf.function will raise the error instead.
  # TODO(b/132311724): The error should be friendlier here.
  # Note: b/132298783 covers actually supporting this pattern.
  itr = iter(dds)
  try:
    while cond:
      ds.reduce('SUM', next(itr), axis=None)
  except StopIteration:
    pass 
Example #21
Source File: reference_test_base.py    From autograph with Apache License 2.0 5 votes vote down vote up
def assertFunctionMatchesEager(self, f, *args):
    compiled_data = self.runNative(tf.function(f), *args)
    native_data = self.runNative(f, *args)
    self.assertResultsMatch(f, args, native_data, compiled_data) 
Example #22
Source File: distributed_dataset_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def test_iterator_single_var_loop(self):
    with self.assertRaises(RuntimeError):
      tf.function(iterator_single_var_loop)(self.ds, self.dds) 
Example #23
Source File: reference_test_base.py    From autograph with Apache License 2.0 5 votes vote down vote up
def tf_function_custom(options=None):
  def fn(func):
    return tf.function(
        func,
        experimental_autograph_options=options)
  return fn 
Example #24
Source File: distributed_dataset_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def test_iterator_no_vars_loop(self):
    with self.assertRaises(RuntimeError):
      tf.function(iterator_no_vars_loop)(self.ds, self.dds) 
Example #25
Source File: datasets_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def test_dataset_loop_with_return_raises(self):
    # This is for the same reason why returns in loops aren't allowed.
    # TODO(mdan): This might be resolved by unrolling the loop once.
    with self.assertRaisesRegex(
        ValueError,
        'return statements are not supported within a TensorFlow loop.'):
      tf.function(dataset_loop_with_return)(self.ds) 
Example #26
Source File: cond_basic_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def test_creates_var_imbalanced_illegal(self, target, c, type_, exc_type,
                                          exc_regex):
    c = type_(c)
    with self.assertRaisesRegex(exc_type, exc_regex):
      tf.function(target)(c) 
Example #27
Source File: datasets_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def test_iterator_loop_with_return_raises(self):
    # This is for the same reason why returns in loops aren't allowed.
    # TODO(mdan): This might be resolved by unrolling the loop once.
    with self.assertRaisesRegex(
        ValueError,
        'return statements are not supported within a TensorFlow loop.'):
      tf.function(iterator_loop_with_return)(self.ds) 
Example #28
Source File: datasets_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def test_iterator_next_stopping(self):
    # Graph ops raise OutOfRangeError, but eager ops raise StopIteration
    with self.assertRaises(tf.errors.OutOfRangeError):
      tf.function(iterator_next_stopping)(self.ds, tf.constant(True)) 
Example #29
Source File: loop_with_variable_type_illegal_cases_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def test_while_with_variable_dtype(self):
    with self.assertRaisesRegex(
        TypeError,
        '"n" has dtype int32 before the loop, but dtype float32 after'):
      tf.function(while_with_variable_dtype)() 
Example #30
Source File: datasets_test.py    From autograph with Apache License 2.0 5 votes vote down vote up
def iterator_next_with_catching_stop_iteration(ds, cond):
  # This is the only instance when the use of TF iterators does not work as
  # intended. In graph mode, the `except` below will never catch, and the
  # tf.function will raise the error instead.
  # TODO(b/132311724): The error should be friendlier here.
  # Note: b/132298783 covers actually supporting this pattern.
  itr = iter(ds)
  try:
    while cond:
      next(itr)
  except StopIteration:
    pass