Python tensorflow.compat.v2.expand_dims() Examples

The following are 30 code examples of tensorflow.compat.v2.expand_dims(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v2 , or try the search function .
Example #1
Source File: tensor_wrapper.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def _apply_op(self, op_fn):
    """Applies given tensor-to-tensor op.

    This method is used for implementing ops that take a tensor and return a new
    tensor, such as tf.expand_dims or tf.transpose. Implementing wrappers
    should apply `op_fn` to the backing tensor(s) and return an new wrapper
    instance with the updated backing tensor.

    Args:
       op_fn: Callable that applies tensor-to-tensor op to the given Tensor.
        E.g. applies tf.expand_dims.

    Returns:
      A TensorWrapper instance with updated backing tensor(s).
    """
    raise NotImplementedError() 
Example #2
Source File: time_marching_schemes_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def testHomogeneous(self, scheme, accuracy_order):
    # Tests solving du/dt = At for a time step.
    # Compares with exact solution u(t) = exp(At) u(0).

    # Time step should be small enough to "resolve" different orders of accuracy
    time_step = 0.0001
    u = tf.constant([1, 2, -1, -2], dtype=tf.float64)
    matrix = tf.constant(
        [[1, -1, 0, 0], [3, 1, 2, 0], [0, -2, 1, 4], [0, 0, 3, 1]],
        dtype=tf.float64)

    tridiag_form = self._convert_to_tridiagonal_format(matrix)
    actual = self.evaluate(
        scheme(u, 0, time_step, lambda t: (tridiag_form, None)))
    expected = self.evaluate(
        tf.squeeze(
            tf.matmul(tf.linalg.expm(matrix * time_step), tf.expand_dims(u,
                                                                         1))))

    error_tolerance = 30 * time_step**(accuracy_order + 1)
    self.assertLess(np.max(np.abs(actual - expected)), error_tolerance) 
Example #3
Source File: time_marching_schemes_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def testHomogeneousBackwards(self, scheme, accuracy_order):
    # Tests solving du/dt = At for a backward time step.
    # Compares with exact solution u(0) = exp(-At) u(t).
    time_step = 0.0001
    u = tf.constant([1, 2, -1, -2], dtype=tf.float64)
    matrix = tf.constant(
        [[1, -1, 0, 0], [3, 1, 2, 0], [0, -2, 1, 4], [0, 0, 3, 1]],
        dtype=tf.float64)

    tridiag_form = self._convert_to_tridiagonal_format(matrix)
    actual = self.evaluate(
        scheme(u, time_step, 0, lambda t: (tridiag_form, None)))

    expected = self.evaluate(
        tf.squeeze(
            tf.matmul(
                tf.linalg.expm(-matrix * time_step), tf.expand_dims(u, 1))))

    error_tolerance = 30 * time_step**(accuracy_order + 1)
    self.assertLess(np.max(np.abs(actual - expected)), error_tolerance) 
Example #4
Source File: mt_agent.py    From valan with Apache License 2.0 6 votes vote down vote up
def _neck(self, torso_outputs, state):
    current_lstm_state, text_enc_outputs, ins_classifier_logits = state
    image_features = tf.cast(torso_outputs[constants.PANO_ENC], tf.float32)
    lstm_output, next_lstm_state = self._image_encoder(image_features,
                                                       current_lstm_state)

    lstm_output = tf.expand_dims(lstm_output, axis=1)

    # c_text has shape [batch_size, 1, self._text_attention_size]
    c_text = self._text_attention([
        self._text_attention_project_hidden(lstm_output),
        self._text_attention_project_text(text_enc_outputs)
    ])
    # The next_lstm_state are ListWrappers. In order to make it consistent with
    # get_initial_state, we convert them to tuple.
    result_state = []
    for one_state in next_lstm_state:
      result_state.append((one_state[0], one_state[1]))
    torso_outputs['hidden_state'] = lstm_output
    torso_outputs['c_text'] = c_text
    torso_outputs['ins_classifier_logits'] = ins_classifier_logits
    return (torso_outputs, (result_state, text_enc_outputs,
                            ins_classifier_logits)) 
Example #5
Source File: baseline_agent.py    From valan with Apache License 2.0 6 votes vote down vote up
def _torso(self, observation):
    conv_out = observation[streetview_constants.IMAGE_FEATURES]
    heading = observation[streetview_constants.HEADING]
    last_action = observation[streetview_constants.PREV_ACTION_IDX]

    conv_out = tf.cast(conv_out, tf.float32)

    img_encoding = self._dense_img_extra(self._dense_img(conv_out))
    img_encoding = tf.keras.layers.Flatten()(img_encoding)

    heading = tf.expand_dims(heading, -1)
    last_action_embedded = self._action_embedder(last_action)

    torso_output = tf.concat([heading, last_action_embedded, img_encoding],
                             axis=1)
    timestep_embedded = self._timestep_embedder(
        observation[streetview_constants.TIMESTEP])
    return {
        'neck_input': torso_output,
        streetview_constants.TIMESTEP: timestep_embedded,
    } 
Example #6
Source File: time_marching_schemes_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def testInhomogeneousBackwards(self, scheme, accuracy_order):
    # Tests solving du/dt = At + b for a backward time step.
    # Compares with exact solution u(0) = exp(-At) u(t)
    # + (exp(-At) - 1) A^(-1) b.
    time_step = 0.0001
    u = tf.constant([1, 2, -1, -2], dtype=tf.float64)
    matrix = tf.constant(
        [[1, -1, 0, 0], [3, 1, 2, 0], [0, -2, 1, 4], [0, 0, 3, 1]],
        dtype=tf.float64)
    b = tf.constant([1, -1, -2, 2], dtype=tf.float64)

    tridiag_form = self._convert_to_tridiagonal_format(matrix)
    actual = self.evaluate(scheme(u, time_step, 0, lambda t: (tridiag_form, b)))

    exponent = tf.linalg.expm(-matrix * time_step)
    eye = tf.eye(4, 4, dtype=tf.float64)
    u = tf.expand_dims(u, 1)
    b = tf.expand_dims(b, 1)
    expected = (
        tf.matmul(exponent, u) +
        tf.matmul(exponent - eye, tf.matmul(tf.linalg.inv(matrix), b)))
    expected = self.evaluate(tf.squeeze(expected))

    error_tolerance = 30 * time_step**(accuracy_order + 1)
    self.assertLess(np.max(np.abs(actual - expected)), error_tolerance) 
Example #7
Source File: time_marching_schemes_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def testInhomogeneous(self, scheme, accuracy_order):
    # Tests solving du/dt = At + b for a time step.
    # Compares with exact solution u(t) = exp(At) u(0) + (exp(At) - 1) A^(-1) b.
    time_step = 0.0001
    u = tf.constant([1, 2, -1, -2], dtype=tf.float64)
    matrix = tf.constant(
        [[1, -1, 0, 0], [3, 1, 2, 0], [0, -2, 1, 4], [0, 0, 3, 1]],
        dtype=tf.float64)
    b = tf.constant([1, -1, -2, 2], dtype=tf.float64)

    tridiag_form = self._convert_to_tridiagonal_format(matrix)
    actual = self.evaluate(scheme(u, 0, time_step, lambda t: (tridiag_form, b)))

    exponent = tf.linalg.expm(matrix * time_step)
    eye = tf.eye(4, 4, dtype=tf.float64)
    u = tf.expand_dims(u, 1)
    b = tf.expand_dims(b, 1)
    expected = (
        tf.matmul(exponent, u) +
        tf.matmul(exponent - eye, tf.matmul(tf.linalg.inv(matrix), b)))
    expected = self.evaluate(tf.squeeze(expected))

    error_tolerance = 30 * time_step**(accuracy_order + 1)
    self.assertLess(np.max(np.abs(actual - expected)), error_tolerance) 
Example #8
Source File: douglas_adi.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def _apply_tridiag_matrix_explicitly(values, superdiag, diag, subdiag,
                                     dim, n_dims):
  """Applies tridiagonal matrix explicitly."""
  perm = _get_permutation(values, n_dims, dim)

  # Make the given dimension the last one in the tensors, treat all the
  # other spatial dimensions as batch dimensions.
  if perm is not None:
    values = tf.transpose(values, perm)
    superdiag, diag, subdiag = (
        tf.transpose(c, perm) for c in (superdiag, diag, subdiag))

  values = tf.squeeze(
      tf.linalg.tridiagonal_matmul((superdiag, diag, subdiag),
                                   tf.expand_dims(values, -1),
                                   diagonals_format='sequence'), -1)

  # Transpose back to how it was.
  if perm is not None:
    values = tf.transpose(values, perm)
  return values 
Example #9
Source File: discriminator_agent_test.py    From valan with Apache License 2.0 6 votes vote down vote up
def test_call(self):
    env_output = self._env.reset()
    observation = tf.nest.map_structure(lambda t: tf.expand_dims(t, 0),
                                        env_output.observation)
    initial_agent_state = self._agent.get_initial_state(
        observation, batch_size=1)
    # Agent always expects time,batch dimensions. First add and then remove.
    env_output = utils.add_time_batch_dim(env_output)
    agent_output, _ = self._agent(env_output, initial_agent_state)
    initial_agent_state = ([
        (tf.random.normal([self.batch_size,
                           512]), tf.random.normal([self.batch_size, 512])),
        (tf.random.normal([self.batch_size,
                           512]), tf.random.normal([self.batch_size, 512]))
    ], tf.random.normal([self.batch_size, 5, 512]))
    agent_output, _ = self._agent(self._test_environment, initial_agent_state)
    self.assertEqual(agent_output.policy_logits.shape, [3, 1, 1]) 
Example #10
Source File: vector_hull_white.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def _conditional_variance_x(self, t, mr_t, sigma_t):
    """Computes the variance of x(t), see [1], Eq. 10.41."""
    t = tf.repeat(tf.expand_dims(t, axis=0), self._dim, axis=0)
    var_x_between_vol_knots = self._variance_int(self._padded_knots,
                                                 self._jump_locations,
                                                 self._jump_values_vol,
                                                 self._jump_values_mr)
    varx_at_vol_knots = tf.concat(
        [self._zero_padding,
         _cumsum_using_matvec(var_x_between_vol_knots)],
        axis=1)

    time_index = tf.searchsorted(self._jump_locations, t)
    vn = tf.concat(
        [self._zero_padding,
         self._jump_locations], axis=1)

    var_x_t = self._variance_int(
        tf.gather(vn, time_index, batch_dims=1), t, sigma_t, mr_t)
    var_x_t = var_x_t + tf.gather(varx_at_vol_knots, time_index, batch_dims=1)

    var_x_t = (var_x_t[:, 1:] - var_x_t[:, :-1]) * tf.math.exp(
        -2 * tf.broadcast_to(mr_t, t.shape)[:, 1:] * t[:, 1:])
    return var_x_t 
Example #11
Source File: vector_hull_white.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def _compute_yt(self, t, mr_t, sigma_t):
    """Computes y(t) as described in [1], section 10.1.6.1."""
    t = tf.repeat(tf.expand_dims(t, axis=0), self._dim, axis=0)
    time_index = tf.searchsorted(self._jump_locations, t)
    y_between_vol_knots = self._y_integral(
        self._padded_knots, self._jump_locations, self._jump_values_vol,
        self._jump_values_mr)
    y_at_vol_knots = tf.concat(
        [self._zero_padding,
         _cumsum_using_matvec(y_between_vol_knots)], axis=1)

    vn = tf.concat(
        [self._zero_padding, self._jump_locations], axis=1)
    y_t = self._y_integral(
        tf.gather(vn, time_index, batch_dims=1), t, sigma_t, mr_t)
    y_t = y_t + tf.gather(y_at_vol_knots, time_index, batch_dims=1)
    return tf.math.exp(-2 * mr_t * t) * y_t 
Example #12
Source File: vector_hull_white.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def _exact_discretization_setup(self, dim):
    """Initial setup for efficient computations."""
    self._zero_padding = tf.zeros((dim, 1), dtype=self._dtype)
    self._jump_locations = tf.concat(
        [self._volatility.jump_locations(),
         self._mean_reversion.jump_locations()], axis=-1)
    self._jump_values_vol = self._volatility(self._jump_locations)
    self._jump_values_mr = self._mean_reversion(self._jump_locations)
    if dim == 1:
      self._padded_knots = tf.concat([
          self._zero_padding,
          tf.expand_dims(self._jump_locations[:-1], axis=0)
      ], axis=1)
      self._jump_values_vol = tf.expand_dims(self._jump_values_vol, axis=0)
      self._jump_values_mr = tf.expand_dims(self._jump_values_mr, axis=0)
      self._jump_locations = tf.expand_dims(self._jump_locations, axis=0)

    else:
      self._padded_knots = tf.concat(
          [self._zero_padding, self._jump_locations[:, :-1]], axis=1) 
Example #13
Source File: euler_sampling.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def _euler_step(*, i, written_count, current_state, result,
                drift_fn, volatility_fn, wiener_mean,
                num_samples, times, dt, sqrt_dt, keep_mask,
                random_type, seed, normal_draws):
  """Performs one step of Euler scheme."""
  current_time = times[i + 1]
  written_count = tf.cast(written_count, tf.int32)
  if normal_draws is not None:
    dw = normal_draws[i]
  else:
    dw = random.mv_normal_sample(
        (num_samples,), mean=wiener_mean, random_type=random_type,
        seed=seed)
  dw = dw * sqrt_dt[i]
  dt_inc = dt[i] * drift_fn(current_time, current_state)  # pylint: disable=not-callable
  dw_inc = tf.linalg.matvec(volatility_fn(current_time, current_state), dw)  # pylint: disable=not-callable
  next_state = current_state + dt_inc + dw_inc
  result = utils.maybe_update_along_axis(
      tensor=result,
      do_update=keep_mask[i + 1],
      ind=written_count,
      axis=1,
      new_tensor=tf.expand_dims(next_state, axis=1))
  written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)
  return i + 1, written_count, next_state, result 
Example #14
Source File: lsm_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def setUp(self):
    """Sets `samples` as in the Longstaff-Schwartz paper."""
    super(LsmTest, self).setUp()
    # See Longstaff, F.A. and Schwartz, E.S., 2001. Valuing American options by
    # simulation: a simple least-squares approach.
    samples = [[1.0, 1.09, 1.08, 1.34],
               [1.0, 1.16, 1.26, 1.54],
               [1.0, 1.22, 1.07, 1.03],
               [1.0, 0.93, 0.97, 0.92],
               [1.0, 1.11, 1.56, 1.52],
               [1.0, 0.76, 0.77, 0.90],
               [1.0, 0.92, 0.84, 1.01],
               [1.0, 0.88, 1.22, 1.34]]
    # Expand dims to reflect that `samples` represent sample paths of
    # a 1-dimensional process
    self.samples = np.expand_dims(samples, -1)
    # Interest rates between exercise times
    interest_rates = [0.06, 0.06, 0.06]
    # Corresponding discount factors
    self.discount_factors = np.exp(-np.cumsum(interest_rates)) 
Example #15
Source File: lsm_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def test_expected_continuation(self):
    """Tests that expected continuation works in V=1 case.

    In particular this verifies that the regression done to get the expected
    continuation value is performed on those elements which have a positive
    exercise value.
    """
    for dtype in (np.float32, np.float64):
      a = tf.range(start=-2, limit=3, delta=1, dtype=dtype)
      design = tf.concat([a, a], axis=0)
      design = tf.concat([[tf.ones_like(design), design]], axis=1)

      # These values ensure that the expected continuation value is `(1,...,1).`
      exercise_now = tf.expand_dims(
          tf.concat([tf.ones_like(a), tf.zeros_like(a)], axis=0), -1)
      cashflow = tf.expand_dims(
          tf.concat([tf.ones_like(a), -tf.ones_like(a)], axis=0), -1)

      expected_exercise = lsm.expected_exercise_fn(
          design, cashflow, exercise_now)
      self.assertAllClose(expected_exercise, tf.ones_like(cashflow)) 
Example #16
Source File: brownian_motion_test.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def test_time_dependent_construction(self):
    """Tests with time dependent drift and variance."""

    def vol_fn(t):
      return tf.expand_dims(0.2 - 0.1 * tf.exp(-t), axis=-1)

    def variance_fn(t0, t1):
      # The instantaneous volatility is 0.2 - 0.1 e^(-t).
      tot_var = (t1 - t0) * 0.04 - (tf.exp(-2 * t1) - tf.exp(-2 * t0)) * 0.005
      tot_var += 0.04 * (tf.exp(-t1) - tf.exp(-t0))
      return tf.reshape(tot_var, [-1, 1, 1])

    process = BrownianMotion(
        dim=1, drift=0.1, volatility=vol_fn, total_covariance_fn=variance_fn)
    t0 = np.array([0.2, 0.7, 0.9])
    delta_t = np.array([0.1, 0.8, 0.3])
    t1 = t0 + delta_t
    drifts = self.evaluate(process.total_drift_fn()(t0, t1))
    self.assertArrayNear(drifts, 0.1 * delta_t, 1e-10)
    variances = self.evaluate(process.total_covariance_fn()(t0, t1))
    self.assertArrayNear(
        variances.reshape([-1]), [0.00149104, 0.02204584, 0.00815789], 1e-8) 
Example #17
Source File: lsm_v2.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def _updated_cashflow(num_times, exercise_index, exercise_value,
                      expected_continuation, cashflow):
  """Revises the cashflow tensor where options will be exercised earlier."""
  do_exercise_bool = exercise_value > expected_continuation
  do_exercise = tf.cast(do_exercise_bool, exercise_value.dtype)
  # Shape [num_samples, payoff_dim]
  scaled_do_exercise = tf.where(do_exercise_bool, exercise_value,
                                tf.zeros_like(exercise_value))
  # This picks out the samples where we now wish to exercise.
  # Shape [num_samples, payoff_dim, 1]
  new_samp_masked = tf.expand_dims(scaled_do_exercise, axis=2)
  # This should be one on the current time step and zero otherwise.
  # This is an array with nonzero entries showing newly exercised payoffs.
  zeros = tf.zeros_like(cashflow)
  mask = tf.equal(tf.range(0, num_times), exercise_index - 1)
  new_cash = tf.where(mask, new_samp_masked, zeros)
  # Has shape [num_samples, payoff_dim, 1]
  old_mask = tf.expand_dims(1 - do_exercise, axis=2)
  mask = tf.range(0, num_times) >= exercise_index
  old_mask = tf.where(mask, old_mask, zeros)
  # Shape [num_samples, payoff_dim, num_times]
  old_cash = old_mask * cashflow
  return new_cash + old_cash 
Example #18
Source File: joined_ito_process.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _get_parameters(times, *params):
  """Gets parameter values at at specified `times`."""
  res = []
  for param in params:
    if callable(param):
      # Used only in drift and volatility computation.
      # Here `times` is of shape [1]
      t = tf.squeeze(times)
      # The result has to have shape [1] + param.shape
      param_value = tf.convert_to_tensor(param(t), dtype=times.dtype,
                                         name="param_value")
      res.append(tf.expand_dims(param_value, 0))
    else:
      res.append(param + tf.zeros(times.shape + param.shape, dtype=times.dtype))
  return res 
Example #19
Source File: rnn_wrapper.py    From agents with Apache License 2.0 5 votes vote down vote up
def get_initial_state(self, inputs=None):
    inputs_flat = [
        tf.convert_to_tensor(x, name='input', dtype_hint=self.dtype)
        for x in tf.nest.flatten(inputs)
    ]
    has_time_axis = all(
        [x.shape.ndims is None or x.shape.ndims > 2 for x in inputs_flat])
    if not has_time_axis:
      inputs_flat = [tf.expand_dims(t, axis=1) for t in inputs_flat]
    inputs = tf.nest.pack_sequence_as(inputs, inputs_flat)
    return self._layer.get_initial_state(inputs) 
Example #20
Source File: bounded_holiday_calendar.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _compute_is_bus_day_table(self):
    """Computes and caches "is business day" table."""
    if self._table_cache.is_bus_day is not None:
      return self._table_cache.is_bus_day

    with tf.init_scope():
      ordinals = tf.range(self._ordinal_offset,
                          self._ordinal_offset + self._calendar_size)
      # Apply weekend mask
      week_days = (ordinals - 1) % 7
      is_holiday = tf.gather(self._weekend_mask, week_days)

      # Apply holidays
      if self._holidays is not None:
        indices = self._holidays.ordinal() - self._ordinal_offset
        ones_at_indices = tf.scatter_nd(
            tf.expand_dims(indices, axis=-1), tf.ones_like(indices),
            is_holiday.shape)
        is_holiday = tf.bitwise.bitwise_or(is_holiday, ones_at_indices)

      # Add a business day at the beginning and at the end, i.e. at 31 Dec of
      # start_year-1 and at 1 Jan of end_year+1. This trick is to avoid dealing
      # with special cases on boundaries.
      # For example, for Following and Preceding conventions we'd need a special
      # value that means "unknown" in the tables. More complicated conventions
      # then combine the Following and Preceding tables, and would need special
      # treatment of the "unknown" values.
      # With these "fake" business days, all computations are automatically
      # correct, unless we land on those extra days - for this reason we add
      # assertions in all API calls before returning.
      is_bus_day_table = tf.concat([[1], 1 - is_holiday, [1]], axis=0)
      self._table_cache.is_bus_day = is_bus_day_table
    return is_bus_day_table 
Example #21
Source File: rnn_wrapper_test.py    From agents with Apache License 2.0 5 votes vote down vote up
def testWrapperCall(self):
    wrapper = rnn_wrapper.RNNWrapper(
        tf.keras.layers.LSTM(3, return_state=True, return_sequences=True))

    batch_size = 2
    input_depth = 5
    inputs = np.random.rand(batch_size, input_depth).astype(np.float32)

    # Make sure wrapper call works when no time dimension is passed in.
    outputs, next_state = wrapper(inputs)

    inputs_time_dim = tf.expand_dims(inputs, axis=1)
    outputs_time_dim, next_state_time_dim = wrapper(inputs_time_dim)
    outputs_time_dim = tf.squeeze(outputs_time_dim, axis=1)

    outputs_manual_state, next_state_manual_state = wrapper(
        inputs, wrapper.get_initial_state(inputs))

    self.evaluate(tf.compat.v1.global_variables_initializer())
    for out_variant in (outputs, outputs_time_dim, outputs_manual_state):
      self.assertEqual(out_variant.shape, (batch_size, 3))
    for state_variant in (next_state, next_state_time_dim,
                          next_state_manual_state):
      self.assertLen(state_variant, 2)
      self.assertEqual(state_variant[0].shape, (batch_size, 3))
      self.assertEqual(state_variant[1].shape, (batch_size, 3))

    self.assertAllClose(outputs, outputs_time_dim)
    self.assertAllClose(outputs, outputs_manual_state)
    self.assertAllClose(next_state, next_state_time_dim)
    self.assertAllClose(next_state, next_state_manual_state) 
Example #22
Source File: euler_sampling.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _for_loop(*, steps_num, current_state,
              drift_fn, volatility_fn, wiener_mean, watch_params,
              num_samples, times, dt, sqrt_dt, time_indices,
              keep_mask, random_type, seed, normal_draws):
  """Smaple paths using custom for_loop."""
  num_time_points = time_indices.shape.as_list()[-1]
  if num_time_points == 1:
    iter_nums = steps_num
  else:
    iter_nums = time_indices
  def step_fn(i, current_state):
    # Unpack current_state
    current_state = current_state[0]
    _, _, next_state, _ = _euler_step(
        i=i,
        written_count=0,
        current_state=current_state,
        result=tf.expand_dims(current_state, axis=1),
        drift_fn=drift_fn,
        volatility_fn=volatility_fn,
        wiener_mean=wiener_mean,
        num_samples=num_samples,
        times=times,
        dt=dt,
        sqrt_dt=sqrt_dt,
        keep_mask=keep_mask,
        random_type=random_type,
        seed=seed,
        normal_draws=normal_draws)
    return [next_state]
  result = custom_loops.for_loop(
      body_fn=step_fn,
      initial_state=[current_state],
      params=watch_params,
      num_iterations=iter_nums)[0]
  if num_time_points == 1:
    return tf.expand_dims(result, axis=1)
  return tf.transpose(result, (1, 0, 2)) 
Example #23
Source File: generic_ito_process.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _coord_grid_to_mesh_grid(coord_grid):
  if len(coord_grid) == 1:
    return tf.expand_dims(coord_grid[0], -1)
  return tf.stack(values=tf.meshgrid(*coord_grid, indexing='ij'), axis=-1) 
Example #24
Source File: univariate_geometric_brownian_motion.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _sample_paths(self,
                    times,
                    num_requested_times,
                    initial_state,
                    num_samples,
                    random_type,
                    seed,
                    skip):
    """Returns a sample of paths from the process."""
    # Normal draws needed for sampling
    normal_draws = utils.generate_mc_normal_draws(
        num_normal_draws=1, num_time_steps=num_requested_times,
        num_sample_paths=num_samples, random_type=random_type,
        seed=seed,
        dtype=self._dtype, skip=skip)
    times = tf.concat([[0], times], -1)
    dt = times[1:] - times[:-1]
    # The logarithm of all the increments between the times.
    log_increments = ((self._mu - self._sigma**2 / 2) * dt
                      + tf.sqrt(dt) * self._sigma
                      * tf.transpose(tf.squeeze(normal_draws, -1)))
    # Since the implementation of tf.math.cumsum is single-threaded we
    # use lower-triangular matrix multiplication instead
    once = tf.ones([num_requested_times, num_requested_times],
                   dtype=self._dtype)
    lower_triangular = tf.linalg.band_part(once, -1, 0)
    cumsum = tf.linalg.matvec(lower_triangular,
                              log_increments)
    samples = initial_state * tf.math.exp(cumsum)
    return tf.expand_dims(samples, -1)

  # TODO(b/152967694): Remove the duplicate methods. 
Example #25
Source File: univariate_geometric_brownian_motion.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def volatility_fn(self):
    """Python callable calculating the instantaneous volatility."""
    def _vol_fn(t, x):
      """Volatility function of the GBM."""
      del t
      vol = self._sigma * tf.expand_dims(x, -1)
      return vol
    return _vol_fn 
Example #26
Source File: multivariate_geometric_brownian_motion.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _coord_grid_to_mesh_grid(coord_grid):
  if len(coord_grid) == 1:
    return tf.expand_dims(coord_grid[0], -1)
  return tf.stack(values=tf.meshgrid(*coord_grid, indexing="ij"), axis=-1) 
Example #27
Source File: brownian_motion_utils_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_construct_drift_callable(self):
    dtype = tf.float64
    a, b = 0.1, -0.8

    def test_drift_fn(t):
      return tf.expand_dims(t * a + b, axis=-1)

    def test_total_drift_fn(t1, t2):
      res = (t2**2 - t1**2) * a / 2 + (t2 - t1) * b
      return tf.expand_dims(res, axis=-1)

    drift_fn, total_drift_fn = bm_utils.construct_drift_data(
        test_drift_fn, test_total_drift_fn, 1, dtype)
    times = tf.constant([0.0, 1.0, 2.0], dtype=dtype)
    drift_vals = self.evaluate(drift_fn(times))
    np.testing.assert_array_equal(drift_vals.shape, [3, 1])
    np.testing.assert_allclose(drift_vals, [[-0.8], [-0.7], [-0.6]])

    t1 = tf.constant([1.0, 2.0, 3.0], dtype=dtype)
    t2 = tf.constant([1.5, 3.0, 5.0], dtype=dtype)

    total_vals = self.evaluate(total_drift_fn(t1, t2))
    np.testing.assert_array_equal(total_vals.shape, [3, 1])
    np.testing.assert_allclose(
        total_vals, [[-0.3375], [-0.55], [-0.8]], atol=1e-7)
    # Tests that total drift is None if drift is a callable and no total_drift
    # is supplied

    _, total_drift = bm_utils.construct_drift_data(test_drift_fn, None, 1,
                                                   dtype)
    self.assertIsNone(total_drift)

  # Tests for volatility. There are 10 cases. 
Example #28
Source File: image_encoder.py    From valan with Apache License 2.0 5 votes vote down vote up
def call(self, image_features, current_lstm_state):
    """Function call.

    Args:
      image_features: A tensor with shape[batch_size, num_views,
        feature_vector_length]
      current_lstm_state: A list of (state_c, state_h) tuple

    Returns:
      next_hidden_state: Hidden state vector [batch_size, lstm_space_size],
        current steps's LSTM output.
      next_lstm_state: Same shape as current_lstm_state.
    """
    # Attention-based visual-feature pooling. Pool the visual features of
    # shape [batch_size, num_views, feature_vector_length] to
    # [batch_size, attention_space_size].

    # LSTM state is a tuple (h, c) and `current_lstm_state` is a list of such
    # tuples. We use last LSTM layer's `h` to attention-pool current step's
    # image features.
    previous_step_lstm_output = current_lstm_state[-1][0]
    # [batch_size, 1, lstm_space_size]
    hidden_state = tf.expand_dims(previous_step_lstm_output, axis=1)
    # [batch_size, 1, attention_space_size]
    x = self._projection_hidden_layer(hidden_state)
    # [batch_size, num_view, attention_space_size]
    y = self._projection_image_feature(image_features)

    # v_t has shape[batch_size, 1, attention_space_size], representing the
    # current visual context.
    v_t = self.attention([x, y])


    v_t = tf.squeeze(v_t, axis=1)
    next_lstm_output, next_state = self.history_context_encoder(
        v_t, current_lstm_state)

    return (next_lstm_output, next_state) 
Example #29
Source File: brownian_motion_test.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def test_paths_time_dependent(self):
    """Tests path properties with time dependent drift and variance."""

    def vol_fn(t):
      return tf.expand_dims(0.2 - 0.1 * tf.exp(-t), axis=-1)

    def variance_fn(t0, t1):
      # The instantaneous volatility is 0.2 - 0.1 e^(-t).
      tot_var = (t1 - t0) * 0.04 - (tf.exp(-2 * t1) - tf.exp(-2 * t0)) * 0.005
      tot_var += 0.04 * (tf.exp(-t1) - tf.exp(-t0))
      return tf.reshape(tot_var, [-1, 1, 1])

    process = BrownianMotion(
        dim=1, drift=0.1, volatility=vol_fn, total_covariance_fn=variance_fn)
    times = np.array([0.2, 0.33, 0.7, 0.9, 1.88])
    num_samples = 10000
    paths = self.evaluate(
        process.sample_paths(
            times,
            num_samples=num_samples,
            initial_state=np.array(0.1),
            seed=12134))

    self.assertArrayEqual(paths.shape, (num_samples, 5, 1))
    self.assertArrayNear(
        np.mean(paths, axis=0).reshape([-1]), 0.1 + times * 0.1, 0.05)

    covars = np.cov(paths.reshape([num_samples, 5]), rowvar=False)
    # Expected covariances are: cov_{ij} = variance_fn(0, min(t_i, t_j))
    min_times = np.minimum(times.reshape([-1, 1]),
                           times.reshape([1, -1])).reshape([-1])
    expected_covars = self.evaluate(
        variance_fn(tf.zeros_like(min_times), min_times))
    self.assertArrayNear(covars.reshape([-1]), expected_covars, 0.005) 
Example #30
Source File: utils.py    From valan with Apache License 2.0 5 votes vote down vote up
def add_time_batch_dim(*nested_tensors):
  if len(nested_tensors) == 1:
    return tf.nest.map_structure(
        lambda t: tf.expand_dims(tf.expand_dims(t, 0), 0), nested_tensors[0])
  return [
      tf.nest.map_structure(lambda t: tf.expand_dims(tf.expand_dims(t, 0), 0),
                            tensor) for tensor in nested_tensors
  ]