Python tensorflow.compat.v2.zeros_like() Examples

The following are 30 code examples of tensorflow.compat.v2.zeros_like(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v2 , or try the search function .
Example #1
Source File: multidim_parabolic_equation_stepper.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def _discretize_boundary_conditions(dx0, dx1, alpha, beta, gamma):
  """Discretizes boundary conditions."""
  # Converts a boundary condition given as alpha V + beta V_n = gamma,
  # where V_n is the derivative w.r.t. the normal to the boundary into
  # v0 = xi1 v1 + xi2 v2 + eta,
  # where v0 is the value on the boundary point of the grid, v1 and v2 - values
  # on the next two points on the grid.
  # The expressions are exactly the same for both boundaries.

  if beta is None:
    # Dirichlet condition.
    if alpha is None:
      raise ValueError(
          "Invalid boundary conditions: alpha and beta can't both be None.")
    zeros = tf.zeros_like(gamma)
    return zeros, zeros, gamma / alpha

  denom = beta * dx1 * (2 * dx0 + dx1)
  if alpha is not None:
    denom += alpha * dx0 * dx1 * (dx0 + dx1)
  xi1 = beta * (dx0 + dx1) * (dx0 + dx1) / denom
  xi2 = -beta * dx0 * dx0 / denom
  eta = gamma * dx0 * dx1 * (dx0 + dx1) / denom
  return xi1, xi2, eta 
Example #2
Source File: lsm_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def test_expected_continuation(self):
    """Tests that expected continuation works in V=1 case.

    In particular this verifies that the regression done to get the expected
    continuation value is performed on those elements which have a positive
    exercise value.
    """
    for dtype in (np.float32, np.float64):
      a = tf.range(start=-2, limit=3, delta=1, dtype=dtype)
      design = tf.concat([a, a], axis=0)
      design = tf.concat([[tf.ones_like(design), design]], axis=1)

      # These values ensure that the expected continuation value is `(1,...,1).`
      exercise_now = tf.expand_dims(
          tf.concat([tf.ones_like(a), tf.zeros_like(a)], axis=0), -1)
      cashflow = tf.expand_dims(
          tf.concat([tf.ones_like(a), -tf.ones_like(a)], axis=0), -1)

      expected_exercise = lsm.expected_exercise_fn(
          design, cashflow, exercise_now)
      self.assertAllClose(expected_exercise, tf.ones_like(cashflow)) 
Example #3
Source File: lsm_v2.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def _updated_cashflow(num_times, exercise_index, exercise_value,
                      expected_continuation, cashflow):
  """Revises the cashflow tensor where options will be exercised earlier."""
  do_exercise_bool = exercise_value > expected_continuation
  do_exercise = tf.cast(do_exercise_bool, exercise_value.dtype)
  # Shape [num_samples, payoff_dim]
  scaled_do_exercise = tf.where(do_exercise_bool, exercise_value,
                                tf.zeros_like(exercise_value))
  # This picks out the samples where we now wish to exercise.
  # Shape [num_samples, payoff_dim, 1]
  new_samp_masked = tf.expand_dims(scaled_do_exercise, axis=2)
  # This should be one on the current time step and zero otherwise.
  # This is an array with nonzero entries showing newly exercised payoffs.
  zeros = tf.zeros_like(cashflow)
  mask = tf.equal(tf.range(0, num_times), exercise_index - 1)
  new_cash = tf.where(mask, new_samp_masked, zeros)
  # Has shape [num_samples, payoff_dim, 1]
  old_mask = tf.expand_dims(1 - do_exercise, axis=2)
  mask = tf.range(0, num_times) >= exercise_index
  old_mask = tf.where(mask, old_mask, zeros)
  # Shape [num_samples, payoff_dim, num_times]
  old_cash = old_mask * cashflow
  return new_cash + old_cash 
Example #4
Source File: utils_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def test_maybe_update_along_axis(self, dtype):
    """Tests that the values are updated correctly."""
    tensor = tf.ones([5, 4, 3, 2], dtype=dtype)
    new_tensor = tf.zeros([5, 4, 1, 2], dtype=dtype)
    @tf.function
    def maybe_update_along_axis(do_update):
      return utils.maybe_update_along_axis(
          tensor=tensor, new_tensor=new_tensor, axis=1, ind=2,
          do_update=do_update)
    updated_tensor = maybe_update_along_axis(True)
    with self.subTest(name='Shape'):
      self.assertEqual(updated_tensor.shape, tensor.shape)
    with self.subTest(name='UpdatedVals'):
      self.assertAllEqual(updated_tensor[:, 2, :, :],
                          tf.zeros_like(updated_tensor[:, 2, :, :]))
    with self.subTest(name='NotUpdatedVals'):
      self.assertAllEqual(updated_tensor[:, 1, :, :],
                          tf.ones_like(updated_tensor[:, 2, :, :]))
    with self.subTest(name='DoNotUpdateVals'):
      not_updated_tensor = maybe_update_along_axis(False)
      self.assertAllEqual(not_updated_tensor, tensor) 
Example #5
Source File: brownian_motion.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def _exact_sampling(self, end_times, start_times, num_samples, initial_state,
                      random_type, seed):
    """Returns a sample of paths from the process."""
    non_decreasing = tf.debugging.assert_greater_equal(
        end_times, start_times, message='Sampling times must be non-decreasing')
    starts_non_negative = tf.debugging.assert_greater_equal(
        start_times,
        tf.zeros_like(start_times),
        message='Sampling times must not be < 0.')
    with tf.compat.v1.control_dependencies(
        [starts_non_negative, non_decreasing]):
      drifts = self._total_drift_fn(start_times, end_times)
      covars = self._total_covariance_fn(start_times, end_times)
      # path_deltas are of shape [num_samples, size(times), dim].
      path_deltas = mvn.multivariate_normal((num_samples,),
                                            mean=drifts,
                                            covariance_matrix=covars,
                                            random_type=random_type,
                                            seed=seed)
      paths = tf.cumsum(path_deltas, axis=1)
    return paths

  # Override 
Example #6
Source File: parabolic_equation_stepper.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def _discretize_boundary_conditions(dx0, dx1, alpha, beta, gamma):
  """Discretizes boundary conditions."""
  # Converts a boundary condition given as alpha V + beta V_n = gamma,
  # where V_n is the derivative w.r.t. the normal to the boundary into
  # v0 = xi1 v1 + xi2 v2 + eta,
  # where v0 is the value on the boundary point of the grid, v1 and v2 - values
  # on the next two points on the grid.
  # The expressions are exactly the same for both boundaries.

  if beta is None:
    # Dirichlet condition.
    if alpha is None:
      raise ValueError(
          "Invalid boundary conditions: alpha and beta can't both be None.")
    zeros = tf.zeros_like(gamma)
    return zeros, zeros, gamma / alpha

  denom = beta * dx1 * (2 * dx0 + dx1)
  if alpha is not None:
    denom += alpha * dx0 * dx1 * (dx0 + dx1)
  xi1 = beta * (dx0 + dx1) * (dx0 + dx1) / denom
  xi2 = -beta * dx0 * dx0 / denom
  eta = gamma * dx0 * dx1 * (dx0 + dx1) / denom
  return xi1, xi2, eta 
Example #7
Source File: util.py    From language with Apache License 2.0 6 votes vote down vote up
def labels_of_top_ranked_predictions_in_batch(labels, predictions):
  """Applying tf.metrics.mean to this gives precision at 1.

  Args:
    labels: minibatch of dense 0/1 labels, shape [batch_size rows, num_classes]
    predictions: minibatch of predictions of the same shape

  Returns:
    one-dimension tensor top_labels, where top_labels[i]=1.0 iff the
    top-scoring prediction for batch element i has label 1.0
  """
  indices_of_top_preds = tf.cast(tf.argmax(input=predictions, axis=1), tf.int32)
  batch_size = tf.reduce_sum(input_tensor=tf.ones_like(indices_of_top_preds))
  row_indices = tf.range(batch_size)
  thresholded_labels = tf.where(labels > 0.0, tf.ones_like(labels),
                                tf.zeros_like(labels))
  label_indices_to_gather = tf.transpose(
      a=tf.stack([row_indices, indices_of_top_preds]))
  return tf.gather_nd(thresholded_labels, label_indices_to_gather) 
Example #8
Source File: array_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def zeros_like(a, dtype=None):
  """Returns an array of zeros with the shape and type of the input array.

  Args:
    a: array_like. Could be an ndarray, a Tensor or any object that can be
      converted to a Tensor using `tf.convert_to_tensor`.
    dtype: Optional, defaults to dtype of the input array. The type of the
      resulting ndarray. Could be a python type, a NumPy type or a TensorFlow
      `DType`.

  Returns:
    An ndarray.
  """
  if isinstance(a, arrays_lib.ndarray):
    a = a.data
  if dtype is None:
    # We need to let utils.result_type decide the dtype, not tf.zeros_like
    dtype = utils.result_type(a)
  else:
    # TF and numpy has different interpretations of Python types such as
    # `float`, so we let `utils.result_type` decide.
    dtype = utils.result_type(dtype)
  dtype = tf.as_dtype(dtype)  # Work around b/149877262
  return arrays_lib.tensor_to_ndarray(tf.zeros_like(a, dtype)) 
Example #9
Source File: vector_hull_white.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _prepare_grid(times, *params):
  """Prepares grid of times for path generation.

  Args:
    times:  Rank 1 `Tensor` of increasing positive real values. The times at
      which the path points are to be evaluated.
    *params: Parameters of the Heston model. Either scalar `Tensor`s of the
      same `dtype` or instances of `PiecewiseConstantFunc`.

  Returns:
    Tuple `(all_times, mask)`.
    `all_times` is a 1-D real `Tensor` containing all points from 'times`, the
    uniform grid of points between `[0, times[-1]]` with grid size equal to
    `time_step`, and jump locations of piecewise constant parameters The
    `Tensor` is sorted in ascending order and may contain duplicates.
    `mask` is a boolean 1-D `Tensor` of the same shape as 'all_times', showing
    which elements of 'all_times' correspond to THE values from `times`.
    Guarantees that times[0]=0 and mask[0]=False.
  """
  additional_times = []
  for param in params:
    if hasattr(param, 'is_piecewise_constant'):
      if param.is_piecewise_constant:
        # Flatten all jump locations
        additional_times.append(tf.reshape(param.jump_locations(), [-1]))
  zeros = tf.constant([0], dtype=times.dtype)
  all_times = tf.concat([zeros] + [times] + additional_times, axis=0)
  additional_times_mask = [
      tf.zeros_like(times, dtype=tf.bool) for times in additional_times]
  mask = tf.concat([
      tf.cast(zeros, dtype=tf.bool),
      tf.ones_like(times, dtype=tf.bool)
  ] + additional_times_mask, axis=0)
  perm = tf.argsort(all_times, stable=True)
  all_times = tf.gather(all_times, perm)
  mask = tf.gather(mask, perm)
  return all_times, mask 
Example #10
Source File: generic_ito_process_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_sample_paths_wiener(self, use_xla):
    """Tests paths properties for Wiener process (dX = dW)."""

    def drift_fn(_, x):
      return tf.zeros_like(x)

    def vol_fn(_, x):
      return tf.expand_dims(tf.ones_like(x), -1)

    process = GenericItoProcess(dim=1, drift_fn=drift_fn, volatility_fn=vol_fn)
    times = np.array([0.1, 0.2, 0.3])
    num_samples = 10000

    @tf.function
    def fn():
      return process.sample_paths(
          times=times, num_samples=num_samples, seed=42, time_step=0.01)

    if use_xla:
      paths = self.evaluate(tf.xla.experimental.compile(fn))[0]
    else:
      paths = self.evaluate(fn())

    means = np.mean(paths, axis=0).reshape([-1])
    covars = np.cov(paths.reshape([num_samples, -1]), rowvar=False)
    expected_means = np.zeros((3,))
    expected_covars = np.minimum(times.reshape([-1, 1]), times.reshape([1, -1]))
    with self.subTest(name="Means"):
      self.assertAllClose(means, expected_means, rtol=1e-2, atol=1e-2)
    with self.subTest(name="Covar"):
      self.assertAllClose(covars, expected_covars, rtol=1e-2, atol=1e-2) 
Example #11
Source File: heston_model.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _prepare_grid(times, time_step, dtype, *params):
  """Prepares grid of times for path generation.

  Args:
    times:  Rank 1 `Tensor` of increasing positive real values. The times at
      which the path points are to be evaluated.
    time_step: Rank 0 real `Tensor`. Maximal distance between points in
      resulting grid.
    dtype: `tf.Dtype` of the input and output `Tensor`s.
    *params: Parameters of the Heston model. Either scalar `Tensor`s of the
      same `dtype` or instances of `PiecewiseConstantFunc`.

  Returns:
    Tuple `(all_times, mask)`.
    `all_times` is a 1-D real `Tensor` containing all points from 'times`, the
    uniform grid of points between `[0, times[-1]]` with grid size equal to
    `time_step`, and jump locations of piecewise constant parameters The
    `Tensor` is sorted in ascending order and may contain duplicates.
    `mask` is a boolean 1-D `Tensor` of the same shape as 'all_times', showing
    which elements of 'all_times' correspond to THE values from `times`.
    Guarantees that times[0]=0 and mask[0]=False.
  """
  grid = tf.range(0.0, times[-1], time_step, dtype=dtype)
  additional_times = []
  for param in params:
    if isinstance(param, piecewise.PiecewiseConstantFunc):
      additional_times.append(param.jump_locations())
  all_times = tf.concat([grid, times] + additional_times, axis=0)
  additional_times_mask = [
      tf.zeros_like(times, dtype=tf.bool) for times in additional_times]
  mask = tf.concat([
      tf.zeros_like(grid, dtype=tf.bool),
      tf.ones_like(times, dtype=tf.bool)
  ] + additional_times_mask, axis=0)
  perm = tf.argsort(all_times, stable=True)
  all_times = tf.gather(all_times, perm)
  mask = tf.gather(mask, perm)
  return all_times, mask 
Example #12
Source File: lsm.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _updated_cashflow(num_times, exercise_index, exercise_value,
                      expected_continuation, cashflow):
  """Revises the cashflow tensor where options will be exercised earlier."""
  do_exercise_bool = exercise_value > expected_continuation
  do_exercise = tf.cast(do_exercise_bool, exercise_value.dtype)
  # Shape [num_samples, payoff_dim]
  scaled_do_exercise = tf.where(do_exercise_bool, exercise_value,
                                tf.zeros_like(exercise_value))
  # This picks out the samples where we now wish to exercise.
  # Shape [num_samples, payoff_dim, 1]
  new_samp_masked = tf.expand_dims(scaled_do_exercise, 2)
  # This should be one on the current time step and zero otherwise.
  # This is an array with nonzero entries showing newly exercised payoffs.
  pad_shape = scaled_do_exercise.shape.as_list()
  zeros_before = tf.zeros(pad_shape + [exercise_index - 1],
                          dtype=scaled_do_exercise.dtype)
  zeros_after = tf.zeros(pad_shape + [num_times - exercise_index],
                         dtype=scaled_do_exercise.dtype)
  new_cash = tf.concat([zeros_before, new_samp_masked, zeros_after], -1)

  # Has shape [num_samples, payoff_dim, 1]
  old_samp_masker = tf.expand_dims(1 - do_exercise, 2)
  # Broadcast to shape [num_samples, payoff_dim, num_times - exercise_index]
  old_samp_masker_after = tf.broadcast_to(
      old_samp_masker, pad_shape + [num_times - exercise_index])
  # Has shape `[num_samples, payoff_dim, exercise_index]`
  zeros_before = tf.zeros(pad_shape + [exercise_index],
                          dtype=scaled_do_exercise.dtype)
  # Shape [num_samples, payoff_dim, num_times]
  old_mask = tf.concat([zeros_before,
                        old_samp_masker_after], -1)
  # Shape [num_samples, payoff_dim, num_times]
  old_cash = old_mask * cashflow
  return new_cash + old_cash 
Example #13
Source File: euler_sampling.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _prepare_grid(*, times, time_step, dtype):
  """Prepares grid of times for path generation.

  Args:
    times:  Rank 1 `Tensor` of increasing positive real values. The times at
      which the path points are to be evaluated.
    time_step: Rank 0 real `Tensor`. Maximal distance between points in
      resulting grid.
    dtype: `tf.Dtype` of the input and output `Tensor`s.

  Returns:
    Tuple `(all_times, mask, time_points)`.
    `all_times` is a 1-D real `Tensor` containing all points from 'times` and
    the uniform grid of points between `[0, times[-1]]` with grid size equal to
    `time_step`. The `Tensor` is sorted in ascending order and may contain
    duplicates.
    `mask` is a boolean 1-D `Tensor` of the same shape as 'all_times', showing
    which elements of 'all_times' correspond to THE values from `times`.
    Guarantees that times[0]=0 and mask[0]=False.
    `time_indices`. An integer `Tensor` of the same shape as `times` indicating
    `times` indices in `all_times`.
  """
  grid = tf.range(0.0, times[-1], time_step, dtype=dtype)
  all_times = tf.concat([grid, times], axis=0)
  mask = tf.concat([
      tf.zeros_like(grid, dtype=tf.bool),
      tf.ones_like(times, dtype=tf.bool)
  ],
                   axis=0)
  perm = tf.argsort(all_times, stable=True)
  all_times = tf.gather(all_times, perm)
  # Remove duplicate points
  all_times = tf.unique(all_times).y
  time_indices = tf.searchsorted(all_times, times)
  mask = tf.gather(mask, perm)
  return all_times, mask, time_indices 
Example #14
Source File: monotone_convex.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _region_3(g1plus2g0, g0plus2g1, g0, g1, x):
  """Computes conditional and value for points in region 3."""
  del g1plus2g0
  # Reference: Eq. 30, 31 in Ref [2]
  is_region_3 = (((g1 <= 0) & (g0plus2g1 > 0)) | ((g1 >= 0) & (g0plus2g1 < 0)))
  eta = 3 * g1 / (g1 - g0)
  x_cap = tf.math.minimum(x, eta)
  ratio = (eta - x_cap) / eta
  # Replace NaN values (corresponding to g1 == 0) with zeros.
  ratio = tf.where(tf.math.is_nan(ratio), tf.zeros_like(ratio), ratio)
  region_3_value = g1 + (g0 - g1) * tf.math.square(ratio)
  integrated_value = g1 * x + eta * (g0 - g1) / 3 * (1 - ratio**3)
  return is_region_3, region_3_value, integrated_value 
Example #15
Source File: brownian_motion_utils.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _volatility_fn_from_total_covar_fn(total_covariance_fn):
  """Volatility function from total covariance function."""

  def vol_fn(time):
    # We should consider changing the start time to be some small dt behind
    # the time. In case the total covariance is being computed by a numerical
    # integration, this will mean that we spend less time iterating.
    start_time = tf.zeros_like(time)
    total_covar_fn = lambda t: total_covariance_fn(start_time, t)
    vol_sq = gradient.fwd_gradient(total_covar_fn, time)
    return tf.linalg.cholesky(vol_sq, name='volatility')

  return vol_fn 
Example #16
Source File: ito_process.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _prepare_grid(self, times, grid_step):
    """Prepares grid of times for path generation.

    Args:
      times:  Rank 1 `Tensor` of increasing positive real values. The times at
        which the path points are to be evaluated.
      grid_step: Rank 0 real `Tensor`. Maximal distance between points in
        resulting grid.

    Returns:
      Tuple `(all_times, mask)`.
      `all_times` is 1-D real `Tensor` containing all points from 'times` and
      whose intervals are at most `grid_step`.
      `mask` is a boolean 1-D tensor of the same shape as 'all_times', showing
      which elements of 'all_times' correspond to values from `times`.
      Guarantees that times[0]=0 and grid_step[0]=False.
      'all_times` is sorted ascending and may contain duplicates.
    """
    grid = tf.range(0.0, times[-1], grid_step, dtype=self._dtype)
    all_times = tf.concat([grid, times], axis=0)
    mask = tf.concat([
        tf.zeros_like(grid, dtype=tf.bool),
        tf.ones_like(times, dtype=tf.bool)
    ],
                     axis=0)
    perm = tf.argsort(all_times, stable=True)
    all_times = tf.gather(all_times, perm)
    mask = tf.gather(mask, perm)
    return all_times, mask 
Example #17
Source File: bond_curve_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _compute_pv(cashflows, cashflow_times, reference_rates, reference_times):
  times = tf.concat(cashflow_times, axis=0)
  groups = tf.concat([
      tf.zeros_like(cashflow, dtype=tf.int32) + i
      for i, cashflow in enumerate(cashflows)
  ],
                     axis=0)
  rates = monotone_convex.interpolate_yields(
      times, reference_times, yields=reference_rates)
  discounts = tf.math.exp(-times * rates)
  cashflows = tf.concat(cashflows, axis=0)
  return tf.math.segment_sum(discounts * cashflows, groups) 
Example #18
Source File: euler_sampling_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_sample_paths_wiener(self, watch_params):
    """Tests paths properties for Wiener process (dX = dW)."""

    def drift_fn(_, x):
      return tf.zeros_like(x)

    def vol_fn(_, x):
      return tf.expand_dims(tf.ones_like(x), -1)

    times = np.array([0.1, 0.2, 0.3])
    num_samples = 10000
    if watch_params:
      watch_params = []
    else:
      watch_params = None
    paths = euler_sampling.sample(
        dim=1, drift_fn=drift_fn, volatility_fn=vol_fn,
        times=times, num_samples=num_samples, seed=42, time_step=0.005,
        watch_params=watch_params)
    self.assertAllEqual(paths.shape.as_list(), [num_samples, 3, 1])
    paths = self.evaluate(paths)
    means = np.mean(paths, axis=0).reshape([-1])
    covars = np.cov(paths.reshape([num_samples, -1]), rowvar=False)
    expected_means = np.zeros((3,))
    expected_covars = np.minimum(times.reshape([-1, 1]), times.reshape([1, -1]))
    self.assertAllClose(means, expected_means, rtol=1e-2, atol=1e-2)
    self.assertAllClose(covars, expected_covars, rtol=1e-2, atol=1e-2) 
Example #19
Source File: utils.py    From valan with Apache License 2.0 5 votes vote down vote up
def gather_from_dict(tensor_dict, choice):
  """Chooses tensor values along first dimension using given choice.

  If `tensor_dict` = {
    0: zeros(shape=(6)),
    1: ones(shape=(6)),
    2: twos(shape=(6)),
    3: threes(shape=(6))
  }
  and choice = [0, 0, 2, 2, 1, 0]
  then returned tensor is [0., 0., 2., 2., 1., 0.]

  Args:
    tensor_dict: A dict with int keys and tensor values. All tensor values must
      be of same type and shape.
    choice: A 1-d int tensor with number of elements equal to first dimension of
      tensors in `tensor_dict`. The values in the tensor must be valid keys in
      `tensor_dict`.

  Returns:
    A tensor of same type and shape as tensors in `tensor_dict`.
  """
  one_tensor = next(iter(tensor_dict.values()))

  # Check number of elements in `choice`.
  tf.debugging.assert_rank(choice, rank=1)
  tf.debugging.assert_equal(tf.size(choice), tf.shape(one_tensor)[0])

  zeros_tensor = tf.zeros_like(one_tensor)
  final_tensor = zeros_tensor
  for c, t in tensor_dict.items():
    # Check shapes and type
    tf.debugging.assert_equal(tf.shape(t), tf.shape(one_tensor))
    tf.debugging.assert_type(t, tf_type=one_tensor.dtype)
    final_tensor += tf.compat.v1.where(tf.equal(choice, c), t, zeros_tensor)
  return final_tensor 
Example #20
Source File: array_ops.py    From trax with Apache License 2.0 5 votes vote down vote up
def empty_like(a, dtype=None):
  """Returns an empty array with the shape and possibly type of the input array.

  Args:
    a: array_like. Could be an ndarray, a Tensor or any object that can be
      converted to a Tensor using `tf.convert_to_tensor`.
    dtype: Optional, defaults to dtype of the input array. The type of the
      resulting ndarray. Could be a python type, a NumPy type or a TensorFlow
      `DType`.

  Returns:
    An ndarray.
  """
  return zeros_like(a, dtype) 
Example #21
Source File: cubic_interpolation.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _validate_arguments(x_data):
  """Checks that input arguments are in the non-decreasing order."""
  diffs = x_data[..., 1:] - x_data[..., :-1]
  return tf.compat.v1.debugging.assert_greater_equal(
      diffs,
      tf.zeros_like(diffs),
      message="x_data is not sorted in non-decreasing order.") 
Example #22
Source File: conjugate_gradient_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_float32(self):
    minimum = np.array([1.0, 1.0], dtype=np.float32)
    scales = np.array([2.0, 3.0], dtype=np.float32)
    start = np.zeros_like(minimum)

    @tff.math.make_val_and_grad_fn
    def quadratic(x):
      return tf.reduce_sum(input_tensor=scales * (x - minimum)**2)

    result = tff.math.optimizer.conjugate_gradient_minimize(
        quadratic, initial_position=start)
    self.assertEqual(result.position.dtype, tf.float32)
    self.assertArrayNear(self.evaluate(result.position), minimum, 1e-5) 
Example #23
Source File: conjugate_gradient_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_multiple_functions(self):
    # Define 3 independednt quadratic functions, each with its own minimum.
    minima = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
    func = lambda x: tf.reduce_sum(tf.square(x - minima), axis=1)
    self._check_algorithm(
        func=func, start_point=np.zeros_like(minima), expected_argmin=minima) 
Example #24
Source File: custom_loops.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _jacobian_wrt_parameter(y, param, tape):
  """Computes a Jacobian w.r.t. a parameter."""
  # For input shapes (b, dy), yields shape (b, dy, 1) (1 is added for
  # convenience elsewhere).
  # To avoid having to broadcast param to y's shape, we need to take a forward
  # gradient.
  with tf.GradientTape() as w_tape:
    w = tf.zeros_like(y)
    w_tape.watch(w)
    vjp = tape.gradient(y, param, output_gradients=w)
  if vjp is None:  # Unconnected.
    return tf.expand_dims(tf.zeros_like(y), axis=-1)
  return tf.expand_dims(w_tape.gradient(vjp, w), axis=-1) 
Example #25
Source File: pixelcnn.py    From alibi-detect with Apache License 2.0 5 votes vote down vote up
def _data_dep_init(self, inputs):
        """Data dependent initialization."""
        # Normalize kernel first so that calling the layer calculates
        # `tf.dot(v, x)/tf.norm(v)` as in (5) in ([Salimans and Kingma, 2016][1]).
        self._compute_weights()

        activation = self.layer.activation
        self.layer.activation = None

        use_bias = self.layer.bias is not None
        if use_bias:
            bias = self.layer.bias
            self.layer.bias = tf.zeros_like(bias)

        # Since the bias is initialized as zero, setting the activation to zero and
        # calling the initialized layer (with normalized kernel) yields the correct
        # computation ((5) in Salimans and Kingma (2016))
        x_init = self.layer(inputs)
        norm_axes_out = list(range(x_init.shape.rank - 1))
        m_init, v_init = tf.nn.moments(x_init, norm_axes_out)
        scale_init = 1. / tf.sqrt(v_init + 1e-10)

        self.g.assign(self.g * scale_init)
        if use_bias:
            self.layer.bias = bias
            self.layer.bias.assign(-m_init * scale_init)
        self.layer.activation = activation 
Example #26
Source File: multidim_parabolic_equation_stepper_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _reference_pde_solution(xs, t, num_terms=5):
  """Solution for the reference diffusion equation."""
  u = tf.zeros_like(xs)
  for k in range(num_terms):
    n = 2 * k + 1
    term = tf.math.sin(np.pi * n * xs) * tf.math.exp(-n**2 * np.pi**2 * t)
    term *= 4 / (np.pi**2 * n**2)
    if k % 2 == 1:
      term *= -1
    u += term
  return u 
Example #27
Source File: parabolic_equation_stepper_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _reference_pde_solution(xs, t, num_terms=5):
  """Solution for the reference diffusion equation."""
  u = tf.zeros_like(xs)
  for k in range(num_terms):
    n = 2 * k + 1
    term = tf.math.sin(np.pi * n * xs) * tf.math.exp(-n**2 * np.pi**2 * t)
    term *= 4 / (np.pi**2 * n**2)
    if k % 2 == 1:
      term *= -1
    u += term
  return u 
Example #28
Source File: american_option_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_option_prices_no_cost_of_carries(self,
                                            dtype,
                                            discount_rates,
                                            volatilities,
                                            expiries,
                                            expected_prices):
    """Tests the prices when no cost_of_carries is supplied."""
    spots = np.array([80.0, 90.0, 100.0, 110.0, 120.0])
    strikes = np.array([100.0, 100.0, 100.0, 100.0, 100.0])
    is_call_options = False
    computed_prices, converged, failed = adesi_whaley(
        volatilities=volatilities,
        strikes=strikes,
        expiries=expiries,
        discount_rates=discount_rates,
        spots=spots,
        is_call_options=is_call_options,
        tolerance=1e-5,  # float32 does not converge to tolerance 1e-8
        dtype=dtype)
    with self.subTest(name='ExpectedPrices'):
      self.assertAllClose(expected_prices, computed_prices,
                          rtol=5e-3, atol=5e-3)
    with self.subTest(name='AllConverged'):
      self.assertAllEqual(converged, tf.ones_like(computed_prices))
    with self.subTest(name='NonFailed'):
      self.assertAllEqual(failed, tf.zeros_like(computed_prices)) 
Example #29
Source File: american_option_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_option_prices_zero_cost_of_carries(self,
                                              discount_rates,
                                              volatilities,
                                              expiries,
                                              expected_prices):
    """Tests the prices when cost_of_carries is zero."""
    forwards = np.array([80.0, 90.0, 100.0, 110.0, 120.0] * 2)
    strikes = np.array([100.0] * 10)
    is_call_options = [True] * 5 + [False] * 5
    cost_of_carries = 0.
    computed_prices, converged, failed = adesi_whaley(
        volatilities=volatilities,
        strikes=strikes,
        expiries=expiries,
        discount_rates=discount_rates,
        cost_of_carries=cost_of_carries,
        forwards=forwards,
        is_call_options=is_call_options,
        dtype=tf.float64)
    with self.subTest(name='ExpectedPrices'):
      self.assertAllClose(expected_prices, computed_prices,
                          rtol=5e-3, atol=5e-3)
    with self.subTest(name='AllConverged'):
      self.assertAllEqual(converged, tf.ones_like(computed_prices))
    with self.subTest(name='NonFailed'):
      self.assertAllEqual(failed, tf.zeros_like(computed_prices)) 
Example #30
Source File: american_option_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_option_prices_pos_carries(self,
                                     discount_rates,
                                     volatilities,
                                     expiries,
                                     expected_prices):
    """Tests the prices for positive cost_of_carries."""
    spots = np.array([80.0, 90.0, 100.0, 110.0, 120.0] * 2)
    strikes = np.array([100.0] * 10)
    is_call_options = [True] * 5 + [False] * 5
    cost_of_carries = 0.04
    computed_prices, converged, failed = adesi_whaley(
        volatilities=volatilities,
        strikes=strikes,
        expiries=expiries,
        discount_rates=discount_rates,
        cost_of_carries=cost_of_carries,
        spots=spots,
        is_call_options=is_call_options,
        dtype=tf.float64)
    with self.subTest(name='ExpectedPrices'):
      self.assertAllClose(expected_prices, computed_prices,
                          rtol=5e-3, atol=5e-3)
    with self.subTest(name='AllConverged'):
      self.assertAllEqual(converged, tf.ones_like(computed_prices))
    with self.subTest(name='NonFailed'):
      self.assertAllEqual(failed, tf.zeros_like(computed_prices))