Python tensorflow.compat.v2.broadcast_to() Examples

The following are 28 code examples of tensorflow.compat.v2.broadcast_to(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v2 , or try the search function .
Example #1
Source File: array_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None):  # pylint: disable=missing-docstring,redefined-outer-name
  """order, subok and shape arguments mustn't be changed."""
  if order != 'K':
    raise ValueError('Non-standard orders are not supported.')
  if not subok:
    raise ValueError('subok being False is not supported.')
  if shape:
    raise ValueError('Overriding the shape is not supported.')

  a = asarray(a).data
  dtype = dtype or utils.result_type(a)
  fill_value = asarray(fill_value, dtype=dtype)
  return arrays_lib.tensor_to_ndarray(
      tf.broadcast_to(fill_value.data, tf.shape(a)))


# TODO(wangpeng): investigate whether we can make `copy` default to False.
# TODO(wangpeng): utils.np_doc can't handle np.array because np.array is a
#   builtin function. Make utils.np_doc support builtin functions. 
Example #2
Source File: array_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def tril(m, k=0):  # pylint: disable=missing-docstring
  m = asarray(m).data
  m_shape = m.shape.as_list()

  if len(m_shape) < 2:
    raise ValueError('Argument to tril must have rank at least 2')

  if m_shape[-1] is None or m_shape[-2] is None:
    raise ValueError('Currently, the last two dimensions of the input array '
                     'need to be known.')

  z = tf.constant(0, m.dtype)

  mask = tri(*m_shape[-2:], k=k, dtype=bool)
  return utils.tensor_to_ndarray(
      tf.where(tf.broadcast_to(mask, tf.shape(m)), m, z)) 
Example #3
Source File: array_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def triu(m, k=0):  # pylint: disable=missing-docstring
  m = asarray(m).data
  m_shape = m.shape.as_list()

  if len(m_shape) < 2:
    raise ValueError('Argument to triu must have rank at least 2')

  if m_shape[-1] is None or m_shape[-2] is None:
    raise ValueError('Currently, the last two dimensions of the input array '
                     'need to be known.')

  z = tf.constant(0, m.dtype)

  mask = tri(*m_shape[-2:], k=k - 1, dtype=bool)
  return utils.tensor_to_ndarray(
      tf.where(tf.broadcast_to(mask, tf.shape(m)), z, m)) 
Example #4
Source File: deep_factorized.py    From compression with Apache License 2.0 6 votes vote down vote up
def _prob(self, y):
    """Called by the base class to compute likelihoods."""
    # Convert to (channels, 1, batch) format by collapsing dimensions and then
    # commuting channels to front.
    y = tf.broadcast_to(
        y, tf.broadcast_dynamic_shape(tf.shape(y), self.batch_shape_tensor()))
    shape = tf.shape(y)
    y = tf.reshape(y, (-1, 1, self.batch_shape.num_elements()))
    y = tf.transpose(y, (2, 1, 0))

    # Evaluate densities.
    # We can use the special rule below to only compute differences in the left
    # tail of the sigmoid. This increases numerical stability: sigmoid(x) is 1
    # for large x, 0 for small x. Subtracting two numbers close to 0 can be done
    # with much higher precision than subtracting two numbers close to 1.
    lower = self._logits_cumulative(y - .5)
    upper = self._logits_cumulative(y + .5)
    # Flip signs if we can move more towards the left tail of the sigmoid.
    sign = tf.stop_gradient(-tf.math.sign(lower + upper))
    p = abs(tf.sigmoid(sign * upper) - tf.sigmoid(sign * lower))

    # Convert back to (broadcasted) input tensor shape.
    p = tf.transpose(p, (2, 1, 0))
    p = tf.reshape(p, shape)
    return p 
Example #5
Source File: math_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def _tf_gcd(x1, x2):
  def _gcd_cond_fn(x1, x2):
    return tf.reduce_any(x2 != 0)
  def _gcd_body_fn(x1, x2):
    # tf.math.mod will raise an error when any element of x2 is 0. To avoid
    # that, we change those zeros to ones. Their values don't matter because
    # they won't be used.
    x2_safe = tf.where(x2 != 0, x2, tf.constant(1, x2.dtype))
    x1, x2 = (tf.where(x2 != 0, x2, x1),
              tf.where(x2 != 0, tf.math.mod(x1, x2_safe),
                       tf.constant(0, x2.dtype)))
    return (tf.where(x1 < x2, x2, x1), tf.where(x1 < x2, x1, x2))
  if (not np.issubdtype(x1.dtype.as_numpy_dtype, np.integer) or
      not np.issubdtype(x2.dtype.as_numpy_dtype, np.integer)):
    raise ValueError("Arguments to gcd must be integers.")
  shape = tf.broadcast_static_shape(x1.shape, x2.shape)
  x1 = tf.broadcast_to(x1, shape)
  x2 = tf.broadcast_to(x2, shape)
  gcd, _ = tf.while_loop(_gcd_cond_fn, _gcd_body_fn,
                         (tf.math.abs(x1), tf.math.abs(x2)))
  return gcd 
Example #6
Source File: vector_hull_white.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def _conditional_variance_x(self, t, mr_t, sigma_t):
    """Computes the variance of x(t), see [1], Eq. 10.41."""
    t = tf.repeat(tf.expand_dims(t, axis=0), self._dim, axis=0)
    var_x_between_vol_knots = self._variance_int(self._padded_knots,
                                                 self._jump_locations,
                                                 self._jump_values_vol,
                                                 self._jump_values_mr)
    varx_at_vol_knots = tf.concat(
        [self._zero_padding,
         _cumsum_using_matvec(var_x_between_vol_knots)],
        axis=1)

    time_index = tf.searchsorted(self._jump_locations, t)
    vn = tf.concat(
        [self._zero_padding,
         self._jump_locations], axis=1)

    var_x_t = self._variance_int(
        tf.gather(vn, time_index, batch_dims=1), t, sigma_t, mr_t)
    var_x_t = var_x_t + tf.gather(varx_at_vol_knots, time_index, batch_dims=1)

    var_x_t = (var_x_t[:, 1:] - var_x_t[:, :-1]) * tf.math.exp(
        -2 * tf.broadcast_to(mr_t, t.shape)[:, 1:] * t[:, 1:])
    return var_x_t 
Example #7
Source File: custom_loops.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def _batch_jacobian(y, x, tape):
  """Computes a Jacobian w.r.t. last dimensions of y and x."""
  # y and x must have the same batch dimensions.
  # For input shapes (b, dy), (b, dx) yields shape (b, dy, dx).
  d = y.shape.as_list()[-1]
  if d is None:
    raise ValueError("Last dimension of state Tensors must be known.")
  grads = []
  for i in range(d):
    w = tf.broadcast_to(tf.one_hot(i, d, dtype=y.dtype), y.shape)
    # We must use tf.UnconnectedGradients.ZERO here and below, because some
    # state components may legitimately not depend on each other or some of the
    # params.
    grad = tape.gradient(y, x, output_gradients=w,
                         unconnected_gradients=tf.UnconnectedGradients.ZERO)
    grads.append(grad)
  return tf.stack(grads, axis=-2) 
Example #8
Source File: continuous_batched.py    From compression with Apache License 2.0 5 votes vote down vote up
def _compute_indexes(self, broadcast_shape):
    # TODO(jonycgn, ssjhv): Investigate broadcasting in range coding op.
    prior_size = functools.reduce(lambda x, y: x * y, self.prior_shape, 1)
    indexes = tf.range(prior_size, dtype=tf.int32)
    indexes = tf.reshape(indexes, self.prior_shape)
    indexes = tf.broadcast_to(
        indexes, tf.concat([broadcast_shape, self.prior_shape], 0))
    return indexes 
Example #9
Source File: array_ops.py    From trax with Apache License 2.0 5 votes vote down vote up
def full(shape, fill_value, dtype=None):  # pylint: disable=redefined-outer-name
  """Returns an array with given shape and dtype filled with `fill_value`.

  Args:
    shape: A valid shape object. Could be a native python object or an object
       of type ndarray, numpy.ndarray or tf.TensorShape.
    fill_value: array_like. Could be an ndarray, a Tensor or any object that can
      be converted to a Tensor using `tf.convert_to_tensor`.
    dtype: Optional, defaults to dtype of the `fill_value`. The type of the
      resulting ndarray. Could be a python type, a NumPy type or a TensorFlow
      `DType`.

  Returns:
    An ndarray.

  Raises:
    ValueError: if `fill_value` can not be broadcast to shape `shape`.
  """
  fill_value = asarray(fill_value, dtype=dtype)
  if utils.isscalar(shape):
    shape = tf.reshape(shape, [1])
  return arrays_lib.tensor_to_ndarray(tf.broadcast_to(fill_value.data, shape))


# Using doc only here since np full_like signature doesn't seem to have the
# shape argument (even though it exists in the documentation online). 
Example #10
Source File: deep_factorized.py    From compression with Apache License 2.0 5 votes vote down vote up
def _make_variables(self):
    """Creates the variables representing the parameters of the distribution."""
    channels = self.batch_shape.num_elements()
    filters = (1,) + self.num_filters + (1,)
    scale = self.init_scale ** (1 / (len(self.num_filters) + 1))
    self._matrices = []
    self._biases = []
    self._factors = []

    for i in range(len(self.num_filters) + 1):
      init = tf.math.log(tf.math.expm1(1 / scale / filters[i + 1]))
      init = tf.cast(init, dtype=self.dtype)
      init = tf.broadcast_to(init, (channels, filters[i + 1], filters[i]))
      matrix = tf.Variable(init, name="matrix_{}".format(i))
      self._matrices.append(matrix)

      bias = tf.Variable(
          tf.random.uniform(
              (channels, filters[i + 1], 1), -.5, .5, dtype=self.dtype),
          name="bias_{}".format(i))
      self._biases.append(bias)

      if i < len(self.num_filters):
        factor = tf.Variable(
            tf.zeros((channels, filters[i + 1], 1), dtype=self.dtype),
            name="factor_{}".format(i))
        self._factors.append(factor) 
Example #11
Source File: multi_objective_scalarizer.py    From agents with Apache License 2.0 5 votes vote down vote up
def call(self, multi_objectives: tf.Tensor) -> tf.Tensor:
    transformed_objectives = tf.maximum(
        multi_objectives * self._slopes + self._offsets, 0)
    nonzero_mask = tf.broadcast_to(
        tf.cast(tf.abs(self._direction) >= self.ALMOST_ZERO, dtype=tf.bool),
        multi_objectives.shape)
    return tf.reduce_min(
        tf.where(nonzero_mask, transformed_objectives / self._direction,
                 multi_objectives.dtype.max),
        axis=1) 
Example #12
Source File: bounded_holiday_calendar.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def business_days_between(self, from_dates, to_dates):
    """Calculates number of business between pairs of dates.

    For each pair, the initial date is included in the difference, and the final
    date is excluded. If the final date is the same or earlier than the initial
    date, zero is returned.

    Args:
      from_dates: DateTensor of initial dates.
      to_dates: DateTensor of final dates, should be broadcastable to
        `from_dates`.

    Returns:
       An int32 Tensor with the number of business days between the
       corresponding pairs of dates.
    """
    cumul_bus_days_table = self._compute_cumul_bus_days_table()
    ordinals_1, ordinals_2 = from_dates.ordinal(), to_dates.ordinal()
    with tf.control_dependencies(
        self._assert_ordinals_in_bounds(ordinals_1) +
        self._assert_ordinals_in_bounds(ordinals_2)):
      ordinals_2 = tf.broadcast_to(ordinals_2, ordinals_1.shape)
      cumul_bus_days_1 = self._gather(cumul_bus_days_table,
                                      ordinals_1 - self._ordinal_offset + 1)
      cumul_bus_days_2 = self._gather(cumul_bus_days_table,
                                      ordinals_2 - self._ordinal_offset + 1)
      return tf.math.maximum(cumul_bus_days_2 - cumul_bus_days_1, 0) 
Example #13
Source File: tensor_wrapper.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def broadcast_to(self, shape):
    """See tf.broadcast_to."""
    return self._apply_op(lambda t: tf.broadcast_to(t, shape)) 
Example #14
Source File: vector_hull_white.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _conditional_mean_x(self, t, mr_t, sigma_t):
    """Computes the drift term in [1], Eq. 10.39."""
    t = tf.repeat(tf.expand_dims(t, axis=0), self._dim, axis=0)
    time_index = tf.searchsorted(self._jump_locations, t)
    vn = tf.concat([self._zero_padding, self._jump_locations], axis=1)
    y_between_vol_knots = self._y_integral(self._padded_knots,
                                           self._jump_locations,
                                           self._jump_values_vol,
                                           self._jump_values_mr)

    y_at_vol_knots = tf.concat(
        [self._zero_padding,
         _cumsum_using_matvec(y_between_vol_knots)], axis=1)

    ex_between_vol_knots = self._ex_integral(self._padded_knots,
                                             self._jump_locations,
                                             self._jump_values_vol,
                                             self._jump_values_mr,
                                             y_at_vol_knots[:, :-1])

    ex_at_vol_knots = tf.concat(
        [self._zero_padding,
         _cumsum_using_matvec(ex_between_vol_knots)], axis=1)

    c = tf.gather(y_at_vol_knots, time_index, batch_dims=1)
    exp_x_t = self._ex_integral(
        tf.gather(vn, time_index, batch_dims=1), t, sigma_t, mr_t, c)
    exp_x_t = exp_x_t + tf.gather(ex_at_vol_knots, time_index, batch_dims=1)
    exp_x_t = (exp_x_t[:, 1:] - exp_x_t[:, :-1]) * tf.math.exp(
        -tf.broadcast_to(mr_t, t.shape)[:, 1:] * t[:, 1:])
    return exp_x_t 
Example #15
Source File: zero_coupon_bond_option.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _analytic_valuation(discount_rate_fn, model, strikes, expiries, maturities,
                        dim, is_call_options):
  """Performs analytic valuation."""

  discount_factor_expiry = tf.math.exp(
      -discount_rate_fn(expiries) * expiries)
  input_shape = expiries.shape
  variance = _bond_option_variance(
      model, tf.reshape(expiries, shape=[-1]), tf.reshape(maturities, [-1]),
      dim)
  variance = tf.reshape(variance, [dim] + input_shape)
  discount_factor_maturity = tf.math.exp(-discount_rate_fn(maturities) *
                                         maturities)
  forward_bond_price = discount_factor_maturity / discount_factor_expiry
  sqrt_variance = tf.math.sqrt(variance)
  d1 = (tf.expand_dims(tf.math.log(forward_bond_price / strikes), axis=0) +
        0.5 * variance) / sqrt_variance
  d2 = d1 - tf.math.sqrt(variance)
  option_value_call = (
      tf.expand_dims(discount_factor_maturity, axis=0) * _ncdf(d1) -
      tf.expand_dims(strikes * discount_factor_expiry, axis=0) * _ncdf(d2))
  option_value_put = (
      tf.expand_dims(strikes * discount_factor_expiry, axis=0) * _ncdf(-d2)
      - tf.expand_dims(discount_factor_maturity, axis=0) * _ncdf(-d1))

  is_call_options = tf.broadcast_to(is_call_options, [dim] + strikes.shape)
  option_value = tf.where(is_call_options, option_value_call,
                          option_value_put)

  # Make `dim` as the last dimension and return.
  return tf.transpose(
      option_value,
      perm=[i for i in range(1, len(option_value.shape.as_list()))] + [0])


# TODO(b/158501671): Clean-up this implementation. 
Example #16
Source File: lsm.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _updated_cashflow(num_times, exercise_index, exercise_value,
                      expected_continuation, cashflow):
  """Revises the cashflow tensor where options will be exercised earlier."""
  do_exercise_bool = exercise_value > expected_continuation
  do_exercise = tf.cast(do_exercise_bool, exercise_value.dtype)
  # Shape [num_samples, payoff_dim]
  scaled_do_exercise = tf.where(do_exercise_bool, exercise_value,
                                tf.zeros_like(exercise_value))
  # This picks out the samples where we now wish to exercise.
  # Shape [num_samples, payoff_dim, 1]
  new_samp_masked = tf.expand_dims(scaled_do_exercise, 2)
  # This should be one on the current time step and zero otherwise.
  # This is an array with nonzero entries showing newly exercised payoffs.
  pad_shape = scaled_do_exercise.shape.as_list()
  zeros_before = tf.zeros(pad_shape + [exercise_index - 1],
                          dtype=scaled_do_exercise.dtype)
  zeros_after = tf.zeros(pad_shape + [num_times - exercise_index],
                         dtype=scaled_do_exercise.dtype)
  new_cash = tf.concat([zeros_before, new_samp_masked, zeros_after], -1)

  # Has shape [num_samples, payoff_dim, 1]
  old_samp_masker = tf.expand_dims(1 - do_exercise, 2)
  # Broadcast to shape [num_samples, payoff_dim, num_times - exercise_index]
  old_samp_masker_after = tf.broadcast_to(
      old_samp_masker, pad_shape + [num_times - exercise_index])
  # Has shape `[num_samples, payoff_dim, exercise_index]`
  zeros_before = tf.zeros(pad_shape + [exercise_index],
                          dtype=scaled_do_exercise.dtype)
  # Shape [num_samples, payoff_dim, num_times]
  old_mask = tf.concat([zeros_before,
                        old_samp_masker_after], -1)
  # Shape [num_samples, payoff_dim, num_times]
  old_cash = old_mask * cashflow
  return new_cash + old_cash 
Example #17
Source File: utils.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def broadcast_batch_shape(x, batch_shape):
  """Broadcasts batch shape of `x`."""
  return tf.broadcast_to(x, tf.TensorShape(batch_shape) + x.shape[-1]) 
Example #18
Source File: piecewise.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _try_broadcast_to(x, batch_shape, name):
  """Broadcasts batch shape of `x` to a `batch_shape` if possible."""
  batch_shape_x = x.shape.as_list()[:-1]
  if batch_shape_x != batch_shape:
    try:
      np.broadcast_to(np.zeros(batch_shape_x), batch_shape)
    except ValueError:
      raise ValueError('Batch shapes of `{2}` should be broadcastable with {0} '
                       'but it is {1} instead'.format(
                           batch_shape, batch_shape_x, name))
    return tf.broadcast_to(x, batch_shape + x.shape[-1:])
  return x 
Example #19
Source File: parabolic_equation_stepper.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _prepare_boundary_conditions(boundary_tensor, value_grid):
  """Prepares values received from boundary_condition callables."""
  if boundary_tensor is None:
    return None
  boundary_tensor = tf.convert_to_tensor(boundary_tensor, value_grid.dtype)
  # Broadcast to batch dimensions.
  broadcast_shape = tf.shape(value_grid)[:-1]
  return tf.broadcast_to(boundary_tensor, broadcast_shape) 
Example #20
Source File: parabolic_equation_stepper.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _prepare_pde_coeffs(raw_coeffs, value_grid):
  """Prepares values received from second_order_coeff_fn and similar."""
  if raw_coeffs is None:
    return None
  dtype = value_grid.dtype
  coeffs = tf.convert_to_tensor(raw_coeffs, dtype=dtype)

  broadcast_shape = tf.shape(value_grid)
  coeffs = tf.broadcast_to(coeffs, broadcast_shape)
  return coeffs 
Example #21
Source File: multidim_parabolic_equation_stepper.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _prepare_boundary_conditions(boundary_tensor, value_grid, batch_rank, dim):
  """Prepares values received from boundary_condition callables."""
  if boundary_tensor is None:
    return None
  boundary_tensor = tf.convert_to_tensor(boundary_tensor, value_grid.dtype)
  # Broadcast to the shape of the boundary: it is the shape of value grid with
  # one dimension removed.
  dim_to_remove = batch_rank + dim
  broadcast_shape = []
  # Shape slicing+concatenation seems error-prone, so let's do it simply.
  for i, size in enumerate(value_grid.shape):
    if i != dim_to_remove:
      broadcast_shape.append(size)
  return tf.broadcast_to(boundary_tensor, broadcast_shape) 
Example #22
Source File: multidim_parabolic_equation_stepper.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _prepare_pde_coeff(raw_coeff, value_grid):
  # Converts values received from second_order_coeff_fn and similar Callables
  # into a format usable further down in the pipeline.
  if raw_coeff is None:
    return None
  dtype = value_grid.dtype
  coeff = tf.convert_to_tensor(raw_coeff, dtype=dtype)
  coeff = tf.broadcast_to(coeff, tf.shape(value_grid))
  return coeff 
Example #23
Source File: math_ops.py    From trax with Apache License 2.0 5 votes vote down vote up
def polyval(p, x):
  def f(p, x):
    if p.shape.rank == 0:
      p = tf.reshape(p, [1])
    p = tf.unstack(p)
    # TODO(wangpeng): Make tf version take a tensor for p instead of a list.
    y = tf.math.polyval(p, x)
    # If the polynomial is 0-order, numpy requires the result to be broadcast to
    # `x`'s shape.
    if len(p) == 1:
      y = tf.broadcast_to(y, x.shape)
    return y
  return _bin_op(f, p, x) 
Example #24
Source File: array_ops.py    From trax with Apache License 2.0 5 votes vote down vote up
def broadcast_to(array, shape):  # pylint: disable=redefined-outer-name
  return full(shape, array) 
Example #25
Source File: lsm.py    From tf-quant-finance with Apache License 2.0 4 votes vote down vote up
def expected_exercise_fn(design, continuation_value, exercise_value):
  """Returns the expected continuation value for each path.

  Args:
    design: A real `Tensor` of shape `[basis_size, num_samples]`.
    continuation_value: A `Tensor` of shape `[num_samples, payoff_dim]` and of
      the same dtype as `design`. The optimal value of the option conditional on
      not exercising now or earlier, taking future information into account.
    exercise_value: A `Tensor` of the same shape and dtype as
      `continuation_value`. Value of the option if exercised immideately at
      the current time

  Returns:
    A `Tensor` of the same shape and dtype as `continuation_value` whose
    `(n, v)`-th entry represents the expected continuation value of sample path
    `n` under the `v`-th payoff scheme.
  """
  # We wish to value each option under different payoffs, expressed through a
  # multidimensional payoff function. While the basis calculated from the sample
  # paths is the same for each payoff, the LSM algorithm requires us to fit a
  # regression model only on the in-the-money paths, which are payoff dependent,
  # hence we create multiple copies of the regression design (basis) matrix and
  # zero out rows for out of the money paths under each payoff.
  batch_design = tf.broadcast_to(
      tf.expand_dims(design, -1), design.shape + [continuation_value.shape[-1]])
  mask = tf.cast(exercise_value > 0, design.dtype)
  # Zero out contributions from samples we'd never exercise at this point (i.e.,
  # these extra observations do not change the regression coefficients).
  masked = tf.transpose(batch_design * mask, perm=(2, 1, 0))
  # For design matrix X and response y, the coefficients beta of the best linear
  # unbiased estimate are contained in the equation X'X beta = X'y. Here `lhs`
  # is X'X and `rhs` is X'y, or rather a tensor of such left hand and right hand
  # sides, one for each payoff dimension.
  lhs = tf.matmul(masked, masked, transpose_a=True)
  # Use pseudo inverse for the regression matrix to ensure stability of the
  # algorithm.
  lhs_pinv = tf.linalg.pinv(lhs)
  rhs = tf.matmul(
      masked,
      tf.expand_dims(tf.transpose(continuation_value), -1),
      transpose_a=True)
  beta = tf.linalg.matmul(lhs_pinv, rhs)
  continuation = tf.matmul(tf.transpose(batch_design, perm=(2, 1, 0)), beta)
  return tf.maximum(tf.transpose(tf.squeeze(continuation, -1)), 0.0) 
Example #26
Source File: cashflow_stream.py    From tf-quant-finance with Apache License 2.0 4 votes vote down vote up
def _setup(self, coupon_spec):
    """Setup tensors for efficient computations."""

    if isinstance(coupon_spec, list):
      cpn_frequency = dates.PeriodTensor.stack(
          [x.coupon_frequency for x in coupon_spec], axis=0)
      businessday_rule = coupon_spec[-1].businessday_rule
      notional = tf.convert_to_tensor([x.notional for x in coupon_spec],
                                      dtype=self._dtype)
      fixed_rate = tf.convert_to_tensor([x.coupon_rate for x in coupon_spec],
                                        dtype=self._dtype)
      daycount_convention = coupon_spec[-1].daycount_convention
    else:
      cpn_frequency = coupon_spec.coupon_frequency
      businessday_rule = coupon_spec.businessday_rule
      notional = tf.broadcast_to(
          tf.convert_to_tensor(coupon_spec.notional, dtype=self._dtype),
          self._start_date.shape)
      fixed_rate = tf.broadcast_to(
          tf.convert_to_tensor(coupon_spec.coupon_rate, dtype=self._dtype),
          self._start_date.shape)
      daycount_convention = coupon_spec.daycount_convention

    cpn_dates, _ = self._generate_schedule(cpn_frequency, businessday_rule)
    payment_dates = cpn_dates[:, 1:]

    notional = tf.repeat(notional, payment_dates.shape.as_list()[-1])
    daycount_fractions = rc.get_daycount_fraction(
        cpn_dates[:, :-1],
        cpn_dates[:, 1:],
        daycount_convention,
        dtype=self._dtype)

    coupon_rate = tf.expand_dims(fixed_rate, axis=-1)
    coupon_rate = tf.repeat(coupon_rate, payment_dates.shape.as_list()[-1])
    contract_index = tf.repeat(tf.range(0, self._batch_size),
                               payment_dates.shape.as_list()[-1])

    self._num_cashflows = payment_dates.shape.as_list()[-1]
    self._payment_dates = payment_dates.reshape([-1])
    self._notional = notional
    self._daycount_fractions = tf.reshape(daycount_fractions, [-1])
    self._coupon_rate = coupon_rate
    self._fixed_rate = tf.convert_to_tensor(fixed_rate, dtype=self._dtype)
    self._contract_index = contract_index 
Example #27
Source File: math_ops.py    From trax with Apache License 2.0 4 votes vote down vote up
def average(a, axis=None, weights=None, returned=False):  # pylint: disable=missing-docstring
  if axis is not None and not isinstance(axis, six.integer_types):
    # TODO(wangpeng): Support tuple of ints as `axis`
    raise ValueError('`axis` must be an integer. Tuple of ints is not '
                     'supported yet. Got type: %s' % type(axis))
  a = array_ops.array(a)
  if weights is None:  # Treat all weights as 1
    if not np.issubdtype(a.dtype, np.inexact):
      a = a.astype(utils.result_type(a.dtype, dtypes.default_float_type()))
    avg = tf.reduce_mean(a.data, axis=axis)
    if returned:
      if axis is None:
        weights_sum = tf.size(a.data)
      else:
        weights_sum = tf.shape(a.data)[axis]
      weights_sum = tf.cast(weights_sum, a.data.dtype)
  else:
    if np.issubdtype(a.dtype, np.inexact):
      out_dtype = utils.result_type(a.dtype, weights)
    else:
      out_dtype = utils.result_type(a.dtype, weights,
                                    dtypes.default_float_type())
    a = array_ops.array(a, out_dtype).data
    weights = array_ops.array(weights, out_dtype).data

    def rank_equal_case():
      tf.debugging.Assert(tf.reduce_all(tf.shape(a) == tf.shape(weights)),
                          [tf.shape(a), tf.shape(weights)])
      weights_sum = tf.reduce_sum(weights, axis=axis)
      avg = tf.reduce_sum(a * weights, axis=axis) / weights_sum
      return avg, weights_sum
    if axis is None:
      avg, weights_sum = rank_equal_case()
    else:
      def rank_not_equal_case():
        tf.debugging.Assert(tf.rank(weights) == 1, [tf.rank(weights)])
        weights_sum = tf.reduce_sum(weights)
        axes = tf.convert_to_tensor([[axis], [0]])
        avg = tf.tensordot(a, weights, axes) / weights_sum
        return avg, weights_sum
      # We condition on rank rather than shape equality, because if we do the
      # latter, when the shapes are partially unknown but the ranks are known
      # and different, utils.cond will run shape checking on the true branch,
      # which will raise a shape-checking error.
      avg, weights_sum = utils.cond(tf.rank(a) == tf.rank(weights),
                                    rank_equal_case, rank_not_equal_case)

  avg = array_ops.array(avg)
  if returned:
    weights_sum = array_ops.broadcast_to(weights_sum, tf.shape(avg.data))
    return avg, weights_sum
  return avg 
Example #28
Source File: continuous_batched.py    From compression with Apache License 2.0 4 votes vote down vote up
def __init__(self, prior, coding_rank, compression=False,
               likelihood_bound=1e-9, tail_mass=2**-8,
               range_coder_precision=12):
    """Initializer.

    Arguments:
      prior: A `tfp.distributions.Distribution` object. A density model fitting
        the marginal distribution of the bottleneck data with additive uniform
        noise, which is shared a priori between the sender and the receiver. For
        best results, the distribution should be flexible enough to have a
        unit-width uniform distribution as a special case, since this is the
        marginal distribution for bottleneck dimensions that are constant. The
        distribution parameters may not depend on data (they must be either
        variables or constants).
      coding_rank: Integer. Number of innermost dimensions considered a coding
        unit. Each coding unit is compressed to its own bit string, and the
        `bits()` method sums over each coding unit.
      compression: Boolean. If set to `True`, the range coding tables used by
        `compress()` and `decompress()` will be built on instantiation. If set
        to `False`, these two methods will not be accessible.
      likelihood_bound: Float. Lower bound for likelihood values, to prevent
        training instabilities.
      tail_mass: Float. Approximate probability mass which is range encoded with
        less precision, by using a Golomb-like code.
      range_coder_precision: Integer. Precision passed to the range coding op.

    Raises:
      RuntimeError: when attempting to instantiate an entropy model with
        `compression=True` and not in eager execution mode.
    """
    if coding_rank < prior.batch_shape.rank:
      raise ValueError(
          "`coding_rank` can't be smaller than batch rank of prior.")
    super().__init__(
        prior, coding_rank, compression=compression,
        likelihood_bound=likelihood_bound, tail_mass=tail_mass,
        range_coder_precision=range_coder_precision)

    quantization_offset = helpers.quantization_offset(prior)
    if self.compression:
      # Optimization: if the quantization offset is zero, we don't need to
      # subtract/add it when quantizing, and we don't need to serialize its
      # value. Note that this code will only work in eager mode.
      # TODO(jonycgn): Reconsider if this optimization is worth keeping once
      # the implementation is stable.
      if tf.executing_eagerly() and tf.reduce_all(
          tf.equal(quantization_offset, 0.)):
        quantization_offset = None
      else:
        quantization_offset = tf.broadcast_to(
            quantization_offset, self.prior_shape)
        quantization_offset = tf.Variable(
            quantization_offset, trainable=False, name="quantization_offset")
    self._quantization_offset = quantization_offset