Python tensorflow.compat.v2.sqrt() Examples

The following are 26 code examples of tensorflow.compat.v2.sqrt(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v2 , or try the search function .
Example #1
Source File: generic_ito_process_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def test_sample_paths_dtypes(self):
    """Sampled paths have the expected dtypes."""
    for dtype in [np.float32, np.float64]:
      drift_fn = lambda t, x: tf.sqrt(t) * tf.ones_like(x, dtype=t.dtype)
      vol_fn = lambda t, x: t * tf.ones([1, 1], dtype=t.dtype)
      process = GenericItoProcess(
          dim=1, drift_fn=drift_fn, volatility_fn=vol_fn, dtype=dtype)

      paths = self.evaluate(
          process.sample_paths(
              times=[0.1, 0.2],
              num_samples=10,
              initial_state=[0.1],
              time_step=0.01,
              seed=123))
      self.assertEqual(paths.dtype, dtype)

  # Several tests below are unit tests for GenericItoProcess.fd_solver_backward:
  # they mock out the pde solver and check only the conversion of SDE to PDE,
  # but not PDE solving. There are also integration tests further below. 
Example #2
Source File: quantizers.py    From qkeras with Apache License 2.0 6 votes vote down vote up
def __init__(self,
               bits=8,
               max_value=None,
               use_stochastic_rounding=False,
               quadratic_approximation=False):
    self.bits = bits
    self.max_value = max_value
    self.use_stochastic_rounding = use_stochastic_rounding
    # if True, round to the exponent for sqrt(x),
    # so that the return value can be divided by two without remainder.
    self.quadratic_approximation = quadratic_approximation
    need_exponent_sign_bit = _need_exponent_sign_bit_check(self.max_value)
    self._min_exp = -2**(self.bits - need_exponent_sign_bit)
    self._max_exp = 2**(self.bits - need_exponent_sign_bit) - 1
    if self.quadratic_approximation:
      self._max_exp = 2 * (self._max_exp // 2) 
Example #3
Source File: heston_model.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def _update_log_spot(
    kappa, theta, epsilon, rho,
    current_vol, next_vol, current_log_spot, time_step, normals,
    gamma_1=0.5, gamma_2=0.5):
  """Updates log-spot value."""
  k_0 = - rho * kappa * theta / epsilon * time_step
  k_1 = (gamma_1 * time_step
         * (kappa * rho / epsilon - 0.5)
         - rho / epsilon)
  k_2 = (gamma_2 * time_step
         * (kappa * rho / epsilon - 0.5)
         + rho / epsilon)
  k_3 = gamma_1 * time_step * (1 - rho**2)
  k_4 = gamma_2 * time_step * (1 - rho**2)

  next_log_spot = (
      current_log_spot + k_0 + k_1 * current_vol + k_2 * next_vol
      + tf.sqrt(k_3 * current_vol + k_4 * next_vol) * normals)
  return next_log_spot 
Example #4
Source File: euler_sampling_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def test_sample_paths_dtypes(self):
    """Sampled paths have the expected dtypes."""
    for dtype in [np.float32, np.float64]:
      drift_fn = lambda t, x: tf.sqrt(t) * tf.ones_like(x, dtype=t.dtype)
      vol_fn = lambda t, x: t * tf.ones([1, 1], dtype=t.dtype)

      paths = self.evaluate(
          euler_sampling.sample(
              dim=1,
              drift_fn=drift_fn, volatility_fn=vol_fn,
              times=[0.1, 0.2],
              num_samples=10,
              initial_state=[0.1],
              time_step=0.01,
              seed=123,
              dtype=dtype))

      self.assertEqual(paths.dtype, dtype) 
Example #5
Source File: gdn_test.py    From compression with Apache License 2.0 5 votes vote down vote up
def test_rgdn_has_correct_output(self):
    x = tf.random.uniform((1, 2, 3, 4), -.5, .5, dtype=tf.float32)
    y = gdn.GDN(inverse=False, rectify=True)(x)
    self.assertEqual(x.shape, y.shape)
    x = tf.maximum(x, 0)
    self.assertAllClose(y, x / tf.sqrt(1 + .1 * (x ** 2)), rtol=0, atol=1e-6) 
Example #6
Source File: generic_ito_process_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _gaussian(xs, variance):
  return np.exp(-np.square(xs) / (2 * variance)) / np.sqrt(2 * np.pi * variance) 
Example #7
Source File: generic_ito_process_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_sample_paths_2d(self):
    """Tests path properties for 2-dimentional Ito process.

    We construct the following Ito processes.

    dX_1 = mu_1 sqrt(t) dt + s11 dW_1 + s12 dW_2
    dX_2 = mu_2 sqrt(t) dt + s21 dW_1 + s22 dW_2

    mu_1, mu_2 are constants.
    s_ij = a_ij t + b_ij

    For this process expected value at time t is (x_0)_i + 2/3 * mu_i * t^1.5.
    """
    mu = np.array([0.2, 0.7])
    a = np.array([[0.4, 0.1], [0.3, 0.2]])
    b = np.array([[0.33, -0.03], [0.21, 0.5]])

    def drift_fn(t, x):
      return mu * tf.sqrt(t) * tf.ones_like(x, dtype=t.dtype)

    def vol_fn(t, x):
      del x
      return (a * t + b) * tf.ones([2, 2], dtype=t.dtype)

    num_samples = 10000
    process = GenericItoProcess(dim=2, drift_fn=drift_fn, volatility_fn=vol_fn)
    times = np.array([0.1, 0.21, 0.32, 0.43, 0.55])
    x0 = np.array([0.1, -1.1])
    paths = self.evaluate(
        process.sample_paths(
            times,
            num_samples=num_samples,
            initial_state=x0,
            time_step=0.01,
            seed=12134))

    self.assertAllClose(paths.shape, (num_samples, 5, 2), atol=0)
    means = np.mean(paths, axis=0)
    times = np.reshape(times, [-1, 1])
    expected_means = x0 + (2.0 / 3.0) * mu * np.power(times, 1.5)
    self.assertAllClose(means, expected_means, rtol=1e-2, atol=1e-2) 
Example #8
Source File: heston_model.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _update_variance(
    kappa, theta, epsilon, rho,
    current_vol, time_step, normals, psi_c=1.5):
  """Updates variance value."""
  del rho
  psi_c = tf.convert_to_tensor(psi_c, dtype=kappa.dtype)
  scaled_time = tf.exp(-kappa * time_step)
  epsilon_squared = epsilon**2
  m = theta + (current_vol - theta) * scaled_time
  s_squared = (
      current_vol * epsilon_squared * scaled_time / kappa
      * (1 - scaled_time) + theta * epsilon_squared / 2 / kappa
      * (1 - scaled_time)**2)
  psi = s_squared / m**2
  uniforms = 0.5 * (1 + tf.math.erf(normals[..., 0] / _SQRT_2))
  cond = psi < psi_c
  # Result where `cond` is true
  psi_inv = 2 / psi
  b_squared = psi_inv - 1 + tf.sqrt(psi_inv * (psi_inv - 1))

  a = m / (1 + b_squared)
  next_var_true = a * (tf.sqrt(b_squared) + tf.squeeze(normals[..., 1]))**2
  # Result where `cond` is false
  p = (psi - 1) / (psi + 1)
  beta = (1 - p) / m
  next_var_false = tf.where(uniforms > p,
                            tf.math.log(1 - p) - tf.math.log(1 - uniforms),
                            tf.zeros_like(uniforms)) / beta
  next_var = tf.where(cond, next_var_true, next_var_false)
  return next_var 
Example #9
Source File: univariate_geometric_brownian_motion.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _sample_paths(self,
                    times,
                    num_requested_times,
                    initial_state,
                    num_samples,
                    random_type,
                    seed,
                    skip):
    """Returns a sample of paths from the process."""
    # Normal draws needed for sampling
    normal_draws = utils.generate_mc_normal_draws(
        num_normal_draws=1, num_time_steps=num_requested_times,
        num_sample_paths=num_samples, random_type=random_type,
        seed=seed,
        dtype=self._dtype, skip=skip)
    times = tf.concat([[0], times], -1)
    dt = times[1:] - times[:-1]
    # The logarithm of all the increments between the times.
    log_increments = ((self._mu - self._sigma**2 / 2) * dt
                      + tf.sqrt(dt) * self._sigma
                      * tf.transpose(tf.squeeze(normal_draws, -1)))
    # Since the implementation of tf.math.cumsum is single-threaded we
    # use lower-triangular matrix multiplication instead
    once = tf.ones([num_requested_times, num_requested_times],
                   dtype=self._dtype)
    lower_triangular = tf.linalg.band_part(once, -1, 0)
    cumsum = tf.linalg.matvec(lower_triangular,
                              log_increments)
    samples = initial_state * tf.math.exp(cumsum)
    return tf.expand_dims(samples, -1)

  # TODO(b/152967694): Remove the duplicate methods. 
Example #10
Source File: gdn_test.py    From compression with Apache License 2.0 5 votes vote down vote up
def test_channels_last_has_correct_output(self):
    # This tests that the layer produces the correct output for a number of
    # different input dimensionalities with 'channels_last' data format.
    for ndim in [2, 3, 4, 5, 6]:
      x = tf.random.uniform((1, 2, 3, 4, 5, 6)[:ndim], dtype=tf.float32)
      y = gdn.GDN(inverse=False, rectify=False, data_format="channels_last")(x)
      self.assertEqual(x.shape, y.shape)
      self.assertAllClose(y, x / tf.sqrt(1 + .1 * (x ** 2)), rtol=0, atol=1e-6) 
Example #11
Source File: gdn_test.py    From compression with Apache License 2.0 5 votes vote down vote up
def test_channels_first_has_correct_output(self):
    # This tests that the layer produces the correct output for a number of
    # different input dimensionalities with 'channels_first' data format.
    for ndim in [2, 3, 4, 5, 6]:
      x = tf.random.uniform((6, 5, 4, 3, 2, 1)[:ndim], dtype=tf.float32)
      y = gdn.GDN(inverse=False, rectify=False, data_format="channels_first")(x)
      self.assertEqual(x.shape, y.shape)
      self.assertAllClose(y, x / tf.sqrt(1 + .1 * (x ** 2)), rtol=0, atol=1e-6) 
Example #12
Source File: gdn_test.py    From compression with Apache License 2.0 5 votes vote down vote up
def test_igdn_has_correct_output(self):
    x = tf.random.uniform((1, 2, 3, 4), dtype=tf.float32)
    y = gdn.GDN(inverse=True, rectify=False)(x)
    self.assertEqual(x.shape, y.shape)
    self.assertAllClose(y, x * tf.sqrt(1 + .1 * (x ** 2)), rtol=0, atol=1e-6) 
Example #13
Source File: euler_sampling_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_sample_paths_1d(self):
    """Tests path properties for 1-dimentional Ito process.

    We construct the following Ito process.

    ````
    dX = mu * sqrt(t) * dt + (a * t + b) dW
    ````

    For this process expected value at time t is x_0 + 2/3 * mu * t^1.5 .
    """
    mu = 0.2
    a = 0.4
    b = 0.33

    def drift_fn(t, x):
      return mu * tf.sqrt(t) * tf.ones_like(x, dtype=t.dtype)

    def vol_fn(t, x):
      del x
      return (a * t + b) * tf.ones([1, 1], dtype=t.dtype)

    times = np.array([0.1, 0.21, 0.32, 0.43, 0.55])
    num_samples = 10000
    x0 = np.array([0.1])
    paths = self.evaluate(
        euler_sampling.sample(
            dim=1,
            drift_fn=drift_fn, volatility_fn=vol_fn,
            times=times, num_samples=num_samples, initial_state=x0,
            time_step=0.01, seed=12134))

    self.assertAllClose(paths.shape, (num_samples, 5, 1), atol=0)
    means = np.mean(paths, axis=0).reshape(-1)
    expected_means = x0 + (2.0 / 3.0) * mu * np.power(times, 1.5)
    self.assertAllClose(means, expected_means, rtol=1e-2, atol=1e-2) 
Example #14
Source File: math_ops.py    From trax with Apache License 2.0 5 votes vote down vote up
def hypot(x1, x2):
  return sqrt(square(x1) + square(x2)) 
Example #15
Source File: sobol_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_normal_integral_mean_and_var_correctly_estimated(self):
    n = int(1000)
    # This test is almost identical to the similarly named test in
    # monte_carlo_test.py. The only difference is that we use the Sobol
    # samples instead of the random samples to evaluate the expectations.
    # MC with pseudo random numbers converges at the rate of 1/ Sqrt(N)
    # (N=number of samples). For QMC in low dimensions, the expected convergence
    # rate is ~ 1/N. Hence we should only need 1e3 samples as compared to the
    # 1e6 samples used in the pseudo-random monte carlo.
    dtype = tf.float64
    mu_p = tf.constant([-1., 1.], dtype=dtype)
    mu_q = tf.constant([0., 0.], dtype=dtype)
    sigma_p = tf.constant([0.5, 0.5], dtype=dtype)
    sigma_q = tf.constant([1., 1.], dtype=dtype)
    p = tfp.distributions.Normal(loc=mu_p, scale=sigma_p)
    q = tfp.distributions.Normal(loc=mu_q, scale=sigma_q)

    cdf_sample = random.sobol.sample(2, n, dtype=dtype)
    q_sample = q.quantile(cdf_sample)

    # Compute E_p[X].
    e_x = tf.reduce_mean(q_sample * p.prob(q_sample) / q.prob(q_sample), 0)

    # Compute E_p[X^2 - E_p[X]^2].
    e_x2 = tf.reduce_mean(q_sample**2 * p.prob(q_sample) / q.prob(q_sample)
                          - e_x**2, 0)
    stddev = tf.sqrt(e_x2)

    # Keep the tolerance levels the same as in monte_carlo_test.py.
    self.assertEqual(p.batch_shape, e_x.shape)
    self.assertAllClose(self.evaluate(p.mean()), self.evaluate(e_x), rtol=0.01)
    self.assertAllClose(
        self.evaluate(p.stddev()), self.evaluate(stddev), rtol=0.02) 
Example #16
Source File: halton_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_normal_integral_mean_and_var_correctly_estimated(self):
    n = int(1000)
    # This test is almost identical to the similarly named test in
    # monte_carlo_test.py. The only difference is that we use the Halton
    # samples instead of the random samples to evaluate the expectations.
    # MC with pseudo random numbers converges at the rate of 1/ Sqrt(N)
    # (N=number of samples). For QMC in low dimensions, the expected convergence
    # rate is ~ 1/N. Hence we should only need 1e3 samples as compared to the
    # 1e6 samples used in the pseudo-random monte carlo.
    mu_p = tf.constant([-1., 1.], dtype=tf.float64)
    mu_q = tf.constant([0., 0.], dtype=tf.float64)
    sigma_p = tf.constant([0.5, 0.5], dtype=tf.float64)
    sigma_q = tf.constant([1., 1.], dtype=tf.float64)
    p = tfd.Normal(loc=mu_p, scale=sigma_p)
    q = tfd.Normal(loc=mu_q, scale=sigma_q)

    cdf_sample, _ = random.halton.sample(2, num_results=n, dtype=tf.float64,
                                         seed=1729)
    q_sample = q.quantile(cdf_sample)

    # Compute E_p[X].
    e_x = tf.reduce_mean(q_sample * p.prob(q_sample) / q.prob(q_sample), 0)

    # Compute E_p[X^2 - E_p[X]^2].
    e_x2 = tf.reduce_mean(q_sample**2 * p.prob(q_sample) / q.prob(q_sample)
                          - e_x**2, 0)
    stddev = tf.sqrt(e_x2)

    # Keep the tolerance levels the same as in monte_carlo_test.py.
    self.assertEqual(p.batch_shape, e_x.shape)
    self.assertAllClose(self.evaluate(p.mean()), self.evaluate(e_x), rtol=0.01)
    self.assertAllClose(
        self.evaluate(p.stddev()), self.evaluate(stddev), rtol=0.02) 
Example #17
Source File: pixelcnn.py    From alibi-detect with Apache License 2.0 5 votes vote down vote up
def _data_dep_init(self, inputs):
        """Data dependent initialization."""
        # Normalize kernel first so that calling the layer calculates
        # `tf.dot(v, x)/tf.norm(v)` as in (5) in ([Salimans and Kingma, 2016][1]).
        self._compute_weights()

        activation = self.layer.activation
        self.layer.activation = None

        use_bias = self.layer.bias is not None
        if use_bias:
            bias = self.layer.bias
            self.layer.bias = tf.zeros_like(bias)

        # Since the bias is initialized as zero, setting the activation to zero and
        # calling the initialized layer (with normalized kernel) yields the correct
        # computation ((5) in Salimans and Kingma (2016))
        x_init = self.layer(inputs)
        norm_axes_out = list(range(x_init.shape.rank - 1))
        m_init, v_init = tf.nn.moments(x_init, norm_axes_out)
        scale_init = 1. / tf.sqrt(v_init + 1e-10)

        self.g.assign(self.g * scale_init)
        if use_bias:
            self.layer.bias = bias
            self.layer.bias.assign(-m_init * scale_init)
        self.layer.activation = activation 
Example #18
Source File: pixelcnn.py    From alibi-detect with Apache License 2.0 5 votes vote down vote up
def _init_norm(self):
        """Set the norm of the weight vector."""
        kernel_norm = tf.sqrt(tf.reduce_sum(tf.square(self.v), axis=self.kernel_norm_axes))
        self.g.assign(kernel_norm) 
Example #19
Source File: quantizers.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               bits=8,
               max_value=None,
               use_stochastic_rounding=False,
               quadratic_approximation=False):
    self.bits = bits
    self.max_value = max_value
    self.use_stochastic_rounding = use_stochastic_rounding
    # if True, round to the exponent for sqrt(x),
    # so that the return value can be divided by two without remainder.
    self.quadratic_approximation = quadratic_approximation
    need_exponent_sign_bit = _need_exponent_sign_bit_check(self.max_value)
    non_sign_bits = self.bits - 1
    self._min_exp, self._max_exp = _get_min_max_exponents(
        non_sign_bits, need_exponent_sign_bit, self.quadratic_approximation) 
Example #20
Source File: math_ops.py    From trax with Apache License 2.0 5 votes vote down vote up
def sqrt(x):
  return _scalar(tf.sqrt, x, True) 
Example #21
Source File: helpers.py    From compression with Apache License 2.0 4 votes vote down vote up
def estimate_tails(func, target, shape, dtype):
  """Estimates approximate tail quantiles.

  This runs a simple Adam iteration to determine tail quantiles. The
  objective is to find an `x` such that:
  ```
  func(x) == target
  ```
  For instance, if `func` is a CDF and the target is a quantile value, this
  would find the approximate location of that quantile. Note that `func` is
  assumed to be monotonic. When each tail estimate has passed the optimal value
  of `x`, the algorithm does 10 additional iterations and then stops.

  This operation is vectorized. The tensor shape of `x` is given by `shape`, and
  `target` must have a shape that is broadcastable to the output of `func(x)`.

  Arguments:
    func: A callable that computes cumulative distribution function, survival
      function, or similar.
    target: The desired target value.
    shape: The shape of the `tf.Tensor` representing `x`.
    dtype: The `tf.dtypes.Dtype` of the computation (and the return value).

  Returns:
    A `tf.Tensor` representing the solution (`x`).
  """
  with tf.name_scope("estimate_tails"):
    dtype = tf.as_dtype(dtype)
    shape = tf.convert_to_tensor(shape, tf.int32)
    target = tf.convert_to_tensor(target, dtype)

    def loop_cond(tails, m, v, count):
      del tails, m, v  # unused
      return tf.reduce_min(count) < 10

    def loop_body(tails, m, v, count):
      with tf.GradientTape(watch_accessed_variables=False) as tape:
        tape.watch(tails)
        loss = abs(func(tails) - target)
      grad = tape.gradient(loss, tails)
      m = .5 * m + .5 * grad  # Adam mean estimate.
      v = .9 * v + .1 * tf.square(grad)  # Adam variance estimate.
      tails -= .5 * m / (tf.sqrt(v) + 1e-7)
      # Start counting when the gradient flips sign (note that this assumes
      # `tails` is initialized to zero).
      count = tf.where(
          tf.math.logical_or(count > 0, tails * grad > 0),
          count + 1, count)
      return tails, m, v, count

    init_tails = tf.zeros(shape, dtype=dtype)
    init_m = tf.zeros(shape, dtype=dtype)
    init_v = tf.ones(shape, dtype=dtype)
    init_count = tf.zeros(shape, dtype=tf.int32)
    return tf.while_loop(
        loop_cond, loop_body, (init_tails, init_m, init_v, init_count),
        back_prop=False)[0] 
Example #22
Source File: multivariate_geometric_brownian_motion.py    From tf-quant-finance with Apache License 2.0 4 votes vote down vote up
def _sample_paths(self,
                    times,
                    num_requested_times,
                    initial_state,
                    num_samples,
                    random_type,
                    seed,
                    skip):
    """Returns a sample of paths from the process."""
    # Normal draws needed for sampling.
    # Shape [num_requested_times, num_samples, dim]
    normal_draws = utils.generate_mc_normal_draws(
        num_normal_draws=self._dim, num_time_steps=num_requested_times,
        num_sample_paths=num_samples, random_type=random_type,
        seed=seed,
        dtype=self._dtype, skip=skip)
    times = tf.concat([[0], times], -1)
    # Time increments
    # Shape [num_requested_times, 1, 1]
    dt = tf.expand_dims(tf.expand_dims(times[1:] - times[:-1], axis=-1),
                        axis=-1)
    if self._corr_matrix is None:
      stochastic_increment = normal_draws
    else:
      cholesky = tf.linalg.cholesky(self._corr_matrix)
      stochastic_increment = tf.linalg.matvec(cholesky, normal_draws)

    # The logarithm of all the increments between the times.
    # Shape [num_requested_times, num_samples, dim]
    log_increments = ((self._means - self._vols**2 / 2) * dt
                      + tf.sqrt(dt) * self._vols
                      * stochastic_increment)

    # Since the implementation of tf.math.cumsum is single-threaded we
    # use lower-triangular matrix multiplication instead
    once = tf.ones([num_requested_times, num_requested_times],
                   dtype=self._dtype)
    lower_triangular = tf.linalg.band_part(once, -1, 0)
    cumsum = tf.linalg.matvec(lower_triangular,
                              tf.transpose(log_increments))
    cumsum = tf.transpose(cumsum, [1, 2, 0])
    samples = initial_state * tf.math.exp(cumsum)
    return samples 
Example #23
Source File: ito_process.py    From tf-quant-finance with Apache License 2.0 4 votes vote down vote up
def _sample_paths(self, times, grid_step, keep_mask, num_requested_times,
                    num_samples, initial_state, random_type, seed, swap_memory):
    """Returns a sample of paths from the process."""
    dt = times[1:] - times[:-1]
    sqrt_dt = tf.sqrt(dt)
    current_state = initial_state + tf.zeros(
        [num_samples, self.dim()], dtype=initial_state.dtype)
    steps_num = tf.shape(dt)[-1]
    wiener_mean = tf.zeros((self.dim(), 1), dtype=self._dtype)

    cond_fn = lambda i, *args: i < steps_num

    def step_fn(i, written_count, current_state, result):
      """Performs one step of Euler scheme."""
      current_time = times[i + 1]
      dw = random_ops.mv_normal_sample((num_samples,),
                                       mean=wiener_mean,
                                       random_type=random_type,
                                       seed=seed)
      dw = dw * sqrt_dt[i]
      dt_inc = dt[i] * self.drift_fn()(current_time, current_state)  # pylint: disable=not-callable
      dw_inc = tf.squeeze(
          tf.matmul(self.volatility_fn()(current_time, current_state), dw), -1)  # pylint: disable=not-callable
      next_state = current_state + dt_inc + dw_inc

      def write_next_state_to_result():
        # Replace result[:, written_count, :] with next_state.
        one_hot = tf.one_hot(written_count, depth=num_requested_times)
        mask = tf.expand_dims(one_hot > 0, axis=-1)
        return tf.where(mask, tf.expand_dims(next_state, axis=1), result)

      # Keep only states for times requested by user.
      result = tf.cond(keep_mask[i + 1],
                       write_next_state_to_result,
                       lambda: result)
      written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)
      return i + 1, written_count, next_state, result

    # Maximum number iterations is passed to the while loop below. It improves
    # performance of the while loop on a GPU and is needed for XLA-compilation
    # comptatiblity
    maximum_iterations = (
        tf.cast(1. / grid_step, dtype=tf.int32) + tf.size(times))
    result = tf.zeros((num_samples, num_requested_times, self.dim()))
    _, _, _, result = tf.compat.v1.while_loop(
        cond_fn,
        step_fn, (0, 0, current_state, result),
        maximum_iterations=maximum_iterations,
        swap_memory=swap_memory)

    return result 
Example #24
Source File: euler_sampling_test.py    From tf-quant-finance with Apache License 2.0 4 votes vote down vote up
def test_antithetic_sample_paths_mean_2d(self, random_type, seed):
    """Tests path properties for 2-dimentional anthithetic variates method.

    The same test as above but with `PSEUDO_ANTITHETIC` random type.
    We construct the following Ito processes.

    dX_1 = mu_1 sqrt(t) dt + s11 dW_1 + s12 dW_2
    dX_2 = mu_2 sqrt(t) dt + s21 dW_1 + s22 dW_2

    mu_1, mu_2 are constants.
    s_ij = a_ij t + b_ij

    For this process expected value at time t is (x_0)_i + 2/3 * mu_i * t^1.5.

    Args:
      random_type: Random number type defined by tff.math.random.RandomType
        enum.
      seed: Random seed.
    """
    mu = np.array([0.2, 0.7])
    a = np.array([[0.4, 0.1], [0.3, 0.2]])
    b = np.array([[0.33, -0.03], [0.21, 0.5]])

    def drift_fn(t, x):
      del x
      return mu * tf.sqrt(t)

    def vol_fn(t, x):
      del x
      return (a * t + b) * tf.ones([2, 2], dtype=t.dtype)

    times = np.array([0.1, 0.21, 0.32, 0.43, 0.55])
    num_samples = 5000
    x0 = np.array([0.1, -1.1])
    paths = self.evaluate(
        euler_sampling.sample(
            dim=2,
            drift_fn=drift_fn, volatility_fn=vol_fn,
            times=times,
            num_samples=num_samples,
            initial_state=x0,
            random_type=random_type,
            time_step=0.01,
            seed=seed))

    self.assertAllClose(paths.shape, (num_samples, 5, 2), atol=0)
    means = np.mean(paths, axis=0)
    times = np.reshape(times, [-1, 1])
    expected_means = x0 + (2.0 / 3.0) * mu * np.power(times, 1.5)
    # Antithetic variates method produces better estimate than the
    # estimate with the `PSEUDO` random type
    self.assertAllClose(means, expected_means, rtol=5e-3, atol=5e-3) 
Example #25
Source File: euler_sampling_test.py    From tf-quant-finance with Apache License 2.0 4 votes vote down vote up
def test_sample_paths_2d(self, random_type, seed):
    """Tests path properties for 2-dimentional Ito process.

    We construct the following Ito processes.

    dX_1 = mu_1 sqrt(t) dt + s11 dW_1 + s12 dW_2
    dX_2 = mu_2 sqrt(t) dt + s21 dW_1 + s22 dW_2

    mu_1, mu_2 are constants.
    s_ij = a_ij t + b_ij

    For this process expected value at time t is (x_0)_i + 2/3 * mu_i * t^1.5.

    Args:
      random_type: Random number type defined by tff.math.random.RandomType
        enum.
      seed: Random seed.
    """
    mu = np.array([0.2, 0.7])
    a = np.array([[0.4, 0.1], [0.3, 0.2]])
    b = np.array([[0.33, -0.03], [0.21, 0.5]])

    def drift_fn(t, x):
      return mu * tf.sqrt(t) * tf.ones_like(x, dtype=t.dtype)

    def vol_fn(t, x):
      del x
      return (a * t + b) * tf.ones([2, 2], dtype=t.dtype)

    num_samples = 10000
    times = np.array([0.1, 0.21, 0.32, 0.43, 0.55])
    x0 = np.array([0.1, -1.1])
    paths = self.evaluate(
        euler_sampling.sample(
            dim=2,
            drift_fn=drift_fn, volatility_fn=vol_fn,
            times=times,
            num_samples=num_samples,
            initial_state=x0,
            time_step=0.01,
            random_type=random_type,
            seed=seed))

    self.assertAllClose(paths.shape, (num_samples, 5, 2), atol=0)
    means = np.mean(paths, axis=0)
    times = np.reshape(times, [-1, 1])
    expected_means = x0 + (2.0 / 3.0) * mu * np.power(times, 1.5)
    self.assertAllClose(means, expected_means, rtol=1e-2, atol=1e-2) 
Example #26
Source File: parabolic_equation_stepper_test.py    From tf-quant-finance with Apache License 2.0 4 votes vote down vote up
def testCrankNicolsonOscillationDamping(self):
    """Tests the Crank-Nicolson oscillation damping.

    Oscillations arise in Crank-Nicolson scheme when the initial (or final)
    conditions have discontinuities. We use Heaviside step function as initial
    conditions. The exact solution of the heat equation with unbounded x is
    ```None
    u(x, t) = (1 + erf(x/2sqrt(t))/2
    ```
    We take large enough x_min, x_max to be able to use this as a reference
    solution.

    CrankNicolsonWithOscillationDamping produces much smaller error than
    the usual crank_nicolson_scheme.
    """

    final_t = 1
    x_min = -10
    x_max = 10
    dtype = np.float32

    def final_cond_fn(x):
      return 0.0 if x < 0 else 1.0

    def expected_result_fn(x):
      return 1 / 2 + tf.math.erf(x / (2 * tf.sqrt(dtype(final_t)))) / 2

    @dirichlet
    def lower_boundary_fn(t, x):
      del t, x
      return 0

    @dirichlet
    def upper_boundary_fn(t, x):
      del t, x
      return 1

    grid = grids.uniform_grid(
        minimums=[x_min], maximums=[x_max], sizes=[1000], dtype=dtype)

    self._testHeatEquation(
        grid=grid,
        final_t=final_t,
        time_step=0.01,
        final_cond_fn=final_cond_fn,
        expected_result_fn=expected_result_fn,
        one_step_fn=crank_nicolson_with_oscillation_damping_step(),
        lower_boundary_fn=lower_boundary_fn,
        upper_boundary_fn=upper_boundary_fn,
        error_tolerance=1e-3)