Python tensorflow.compat.v2.transpose() Examples

The following are 30 code examples of tensorflow.compat.v2.transpose(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v2 , or try the search function .
Example #1
Source File: array_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def transpose(a, axes=None):
  """Permutes dimensions of the array.

  Args:
    a: array_like. Could be an ndarray, a Tensor or any object that can
      be converted to a Tensor using `tf.convert_to_tensor`.
    axes: array_like. A list of ints with length rank(a) or None specifying the
      order of permutation. The i'th dimension of the output array corresponds
      to axes[i]'th dimension of the `a`. If None, the axes are reversed.

  Returns:
    An ndarray.
  """
  a = asarray(a)
  if axes is not None:
    axes = asarray(axes)
  return utils.tensor_to_ndarray(tf.transpose(a=a.data, perm=axes)) 
Example #2
Source File: deep_factorized.py    From compression with Apache License 2.0 6 votes vote down vote up
def _prob(self, y):
    """Called by the base class to compute likelihoods."""
    # Convert to (channels, 1, batch) format by collapsing dimensions and then
    # commuting channels to front.
    y = tf.broadcast_to(
        y, tf.broadcast_dynamic_shape(tf.shape(y), self.batch_shape_tensor()))
    shape = tf.shape(y)
    y = tf.reshape(y, (-1, 1, self.batch_shape.num_elements()))
    y = tf.transpose(y, (2, 1, 0))

    # Evaluate densities.
    # We can use the special rule below to only compute differences in the left
    # tail of the sigmoid. This increases numerical stability: sigmoid(x) is 1
    # for large x, 0 for small x. Subtracting two numbers close to 0 can be done
    # with much higher precision than subtracting two numbers close to 1.
    lower = self._logits_cumulative(y - .5)
    upper = self._logits_cumulative(y + .5)
    # Flip signs if we can move more towards the left tail of the sigmoid.
    sign = tf.stop_gradient(-tf.math.sign(lower + upper))
    p = abs(tf.sigmoid(sign * upper) - tf.sigmoid(sign * lower))

    # Convert back to (broadcasted) input tensor shape.
    p = tf.transpose(p, (2, 1, 0))
    p = tf.reshape(p, shape)
    return p 
Example #3
Source File: __init__.py    From language with Apache License 2.0 6 votes vote down vote up
def set_initial_value(self, rel_name, m):
    """Provide value that will be used to initialize a relation matrix.

    Args:
      rel_name: string name of relation
      m: a matrix that can used as argument to scipy.coo_matrix, for a sparse
        relation, or any matrix, for a dense relation

    Raises:
      RelationNameError: If the relation cannot be found.
      ValueError: If the relation and initial_value have different shapes.
    """
    if not self.is_relation(rel_name):
      raise RelationNameError(rel_name, 'Relation is not defined.')
    expected_shape = self.get_shape(rel_name)
    if m.shape[1] != expected_shape[1]:
      raise ValueError(
          'relation and initial_value have different columns: %d vs %d' %
          (expected_shape[1], m.shape[1]))
    if self.is_dense(rel_name):
      self._np_initval[rel_name] = m.transpose()
    else:
      self._np_initval[rel_name] = scipy.sparse.coo_matrix(m.transpose()) 
Example #4
Source File: util.py    From language with Apache License 2.0 6 votes vote down vote up
def labels_of_top_ranked_predictions_in_batch(labels, predictions):
  """Applying tf.metrics.mean to this gives precision at 1.

  Args:
    labels: minibatch of dense 0/1 labels, shape [batch_size rows, num_classes]
    predictions: minibatch of predictions of the same shape

  Returns:
    one-dimension tensor top_labels, where top_labels[i]=1.0 iff the
    top-scoring prediction for batch element i has label 1.0
  """
  indices_of_top_preds = tf.cast(tf.argmax(input=predictions, axis=1), tf.int32)
  batch_size = tf.reduce_sum(input_tensor=tf.ones_like(indices_of_top_preds))
  row_indices = tf.range(batch_size)
  thresholded_labels = tf.where(labels > 0.0, tf.ones_like(labels),
                                tf.zeros_like(labels))
  label_indices_to_gather = tf.transpose(
      a=tf.stack([row_indices, indices_of_top_preds]))
  return tf.gather_nd(thresholded_labels, label_indices_to_gather) 
Example #5
Source File: mnist.py    From datasets with Apache License 2.0 6 votes vote down vote up
def _info(self):
    return tfds.core.DatasetInfo(
        builder=self,
        description=(
            "The EMNIST dataset is a set of handwritten character digits "
            "derived from the NIST Special Database 19 and converted to "
            "a 28x28 pixel image format and dataset structure that directly "
            "matches the MNIST dataset.\n\n"
            "Note: Like the original EMNIST data, images provided here are "
            "inverted horizontally and rotated 90 anti-clockwise. You can use "
            "`tf.transpose` within `ds.map` to convert the images to a "
            "human-friendlier format."),
        features=tfds.features.FeaturesDict({
            "image":
                tfds.features.Image(shape=MNIST_IMAGE_SHAPE),
            "label":
                tfds.features.ClassLabel(
                    num_classes=self.builder_config.class_number),
        }),
        supervised_keys=("image", "label"),
        homepage=("https://www.nist.gov/itl/products-and-services/"
                  "emnist-dataset"),
        citation=_EMNIST_CITATION,
    ) 
Example #6
Source File: tensor_wrapper.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def _apply_op(self, op_fn):
    """Applies given tensor-to-tensor op.

    This method is used for implementing ops that take a tensor and return a new
    tensor, such as tf.expand_dims or tf.transpose. Implementing wrappers
    should apply `op_fn` to the backing tensor(s) and return an new wrapper
    instance with the updated backing tensor.

    Args:
       op_fn: Callable that applies tensor-to-tensor op to the given Tensor.
        E.g. applies tf.expand_dims.

    Returns:
      A TensorWrapper instance with updated backing tensor(s).
    """
    raise NotImplementedError() 
Example #7
Source File: vector_hull_white.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def _get_parameters(times, *params):
  """Gets parameter values at at specified `times`."""
  res = []
  for param in params:
    if isinstance(param, piecewise.PiecewiseConstantFunc):
      jump_locations = param.jump_locations()
      if len(jump_locations.shape) > 1:
        # If `jump_locations` has batch dimension, transpose the result
        # Shape [num_times, dim]
        res.append(tf.transpose(param(times)))
      else:
        # Shape [num_times, dim]
        res.append(param(times))
    elif callable(param):
      # Used only in drift and volatility computation.
      # Here `times` is of shape [1]
      t = tf.squeeze(times)
      # The result has to have shape [1] + param.shape
      res.append(tf.expand_dims(param(t), 0))
    else:
      res.append(param + tf.zeros(times.shape + param.shape, dtype=times.dtype))
  return res 
Example #8
Source File: array_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def reshape(a, newshape, order='C'):
  """order argument can only b 'C' or 'F'."""
  if order not in {'C', 'F'}:
    raise ValueError('Unsupported order argument {}'.format(order))

  a = asarray(a)
  if isinstance(newshape, arrays_lib.ndarray):
    newshape = newshape.data
  if isinstance(newshape, int):
    newshape = [newshape]

  if order == 'F':
    r = tf.transpose(tf.reshape(tf.transpose(a.data), newshape[::-1]))
  else:
    r = tf.reshape(a.data, newshape)

  return utils.tensor_to_ndarray(r) 
Example #9
Source File: array_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def rot90(m, k=1, axes=(0, 1)):  # pylint: disable=missing-docstring
  m_rank = tf.rank(m)
  ax1, ax2 = utils._canonicalize_axes(axes, m_rank)  # pylint: disable=protected-access

  k = k % 4
  if k == 0:
    return m
  elif k == 2:
    return flip(flip(m, ax1), ax2)
  else:
    perm = tf.range(m_rank)
    perm = tf.tensor_scatter_nd_update(perm, [[ax1], [ax2]], [ax2, ax1])

    if k == 1:
      return transpose(flip(m, ax2), perm)
    else:
      return flip(transpose(m, perm), ax2) 
Example #10
Source File: douglas_adi.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def _apply_tridiag_matrix_explicitly(values, superdiag, diag, subdiag,
                                     dim, n_dims):
  """Applies tridiagonal matrix explicitly."""
  perm = _get_permutation(values, n_dims, dim)

  # Make the given dimension the last one in the tensors, treat all the
  # other spatial dimensions as batch dimensions.
  if perm is not None:
    values = tf.transpose(values, perm)
    superdiag, diag, subdiag = (
        tf.transpose(c, perm) for c in (superdiag, diag, subdiag))

  values = tf.squeeze(
      tf.linalg.tridiagonal_matmul((superdiag, diag, subdiag),
                                   tf.expand_dims(values, -1),
                                   diagonals_format='sequence'), -1)

  # Transpose back to how it was.
  if perm is not None:
    values = tf.transpose(values, perm)
  return values 
Example #11
Source File: utils.py    From valan with Apache License 2.0 5 votes vote down vote up
def get_first_true_column(x):
  """Transforms `x` into a tensor which has all elements set to False except the first True in the column.

  If x is [[True, False, False],
           [True, False, False],
           [False, True, False],
           [False, True, True]]
  the output should be
          [[True, False, False],
           [False, False, False],
           [False, True, False],
           [False, False, True]
          ]

  Args:
    x: A bool tensor with shape [num_steps, batch_size]

  Returns:
    A bool tensor with the same shape.
  """
  x = tf.transpose(x, perm=[1, 0])
  # Get indices
  y = tf.where(tf.equal(x, True))
  # Find first column in every row which is True
  first_true_cols = tf.cast(
      tf.math.segment_min(data=y[:, 1], segment_ids=y[:, 0]), tf.int32)
  # Convert back to indices
  first_true_indices = tf.stack(
      [tf.range(tf.size(first_true_cols)), first_true_cols], axis=1)
  # Now create the mask
  first_true_mask_sparse = tf.SparseTensor(
      indices=tf.cast(first_true_indices, tf.int64),
      values=tf.ones([tf.size(first_true_cols)], dtype=tf.bool),
      dense_shape=x.shape)
  first_true_mask = tf.sparse.to_dense(
      first_true_mask_sparse, default_value=False)
  return tf.transpose(first_true_mask, perm=[1, 0]) 
Example #12
Source File: uniform_noise.py    From compression with Apache License 2.0 5 votes vote down vote up
def _quantization_offset(self):
    # Picks the "peakiest" of the component quantization offsets.
    offsets = helpers.quantization_offset(self.components_distribution)
    rank = self.batch_shape.rank
    transposed_offsets = tf.transpose(offsets, [rank] + list(range(rank)))
    component = tf.argmax(self.log_prob(transposed_offsets), axis=0)
    return tf.gather(offsets, component, axis=-1, batch_dims=rank) 
Example #13
Source File: nql_test.py    From language with Apache License 2.0 5 votes vote down vote up
def test_grid_tf(self):
    x_np = self.minibatch_np([cell(2, 2), cell(3, 3)])
    x = self.context.as_nql(x_np, 'place_t')
    for (d, ij_list) in zip(
        'nsew', [[(1, 2), (2, 3)], [(3, 2)], [(2, 3)], [(2, 1), (3, 2)]]):
      mat = self.context.get_tf_tensor(d)
      y_mat = tf.transpose(
          a=tf.sparse.sparse_dense_matmul(mat, tf.transpose(a=x.tf))).numpy()
      y_dict = self.context.as_dicts(y_mat, 'place_t')
      for k, (i, j) in enumerate(ij_list):
        self.assertIn(cell(i, j), y_dict[k]) 
Example #14
Source File: nql_test.py    From language with Apache License 2.0 5 votes vote down vote up
def test_grid_tf(self):
    # check the tensorflow computations
    x = self.context.one(cell(2, 2), 'place_t')
    for (d, (i, j)) in zip('nsew', [(1, 2), (3, 2), (2, 3), (2, 1)]):
      mat = self.context.get_tf_tensor(d)
      y_vec = tf.transpose(
          a=tf.sparse.sparse_dense_matmul(mat, tf.transpose(a=x.tf)))
      self.assertEqual(self._vec2cell(y_vec), cell(i, j)) 
Example #15
Source File: nql_test.py    From language with Apache License 2.0 5 votes vote down vote up
def test_grid_np(self):
    # check the numpy computations
    x_vec = self.context.one_hot_numpy_array(cell(2, 2), 'place_t')
    for (d, (i, j)) in zip('nsew', [(1, 2), (3, 2), (2, 3), (2, 1)]):
      mat = self.context._np_initval[d]
      y_vec = mat.dot(x_vec.transpose()).transpose()
      self.assertEqual(self._vec2cell(y_vec), cell(i, j)) 
Example #16
Source File: __init__.py    From language with Apache License 2.0 5 votes vote down vote up
def query_kg(self,
               rel_name,
               entity_name,
               as_object = False):
    """Simple method to query the KG, mainly for debugging.

    Finds things related to the named entity_name by the named relation.  When
    as_object is True then entity_name should be a second argument/object for
    the triple, rather than the first argument, similarly to follow with
    inverted == -1.

    Args:
      rel_name: string name for KG relation
      entity_name: string name for KG entity
      as_object: if True use inverse of relation.

    Yields:
      Tuples of entity string names and their weights.
    """
    m = self._np_initval[rel_name].transpose()
    query_type_finder_fun = self.get_range if as_object else self.get_domain
    answer_type_finder_fun = self.get_domain if as_object else self.get_range
    array_to_match = m.col if as_object else m.row
    array_to_extract_from = m.row if as_object else m.col
    answer_type = answer_type_finder_fun(rel_name)
    i = self.get_id(entity_name, query_type_finder_fun(rel_name))
    for k in range(m.nnz):
      if array_to_match[k] == i:
        entity_name = self.get_entity_name(array_to_extract_from[k],
                                           answer_type)
        yield entity_name, m.data[k]


# Helper classes and utilities 
Example #17
Source File: array_ops.py    From trax with Apache License 2.0 5 votes vote down vote up
def swapaxes(a, axis1, axis2):  # pylint: disable=missing-docstring
  a = asarray(a)

  a_rank = tf.rank(a)
  if axis1 < 0:
    axis1 += a_rank
  if axis2 < 0:
    axis2 += a_rank

  perm = tf.range(a_rank)
  perm = tf.tensor_scatter_nd_update(perm, [[axis1], [axis2]], [axis2, axis1])
  a = tf.transpose(a, perm)

  return utils.tensor_to_ndarray(a) 
Example #18
Source File: __init__.py    From language with Apache License 2.0 5 votes vote down vote up
def get_initial_value(
      self, rel_name):
    """Return value that will be used to initialize a relation matrix.

    Args:
      rel_name: string name of relation

    Returns:
       A numpy matrix or scipy sparse matrix.
    """
    return self._np_initval[rel_name].transpose() 
Example #19
Source File: tensor_wrapper.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def transpose(self, perm=None):
    """See tf.transpose."""
    return self._apply_op(lambda t: tf.transpose(t, perm)) 
Example #20
Source File: douglas_adi.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _apply_correction(theta, values, explicit_contribution, superdiag, diag,
                      subdiag, inhomog_term_delta, t1, t2, dim, n_dims):
  """Applies correction for the given dimension."""
  rhs = (
      values - theta * explicit_contribution +
      theta * inhomog_term_delta * (t2 - t1))

  # Make the given dimension the last one in the tensors, treat all the
  # other spatial dimensions as batch dimensions.
  perm = _get_permutation(values, n_dims, dim)
  if perm is not None:
    rhs = tf.transpose(rhs, perm)
    superdiag, diag, subdiag = (
        tf.transpose(c, perm) for c in (superdiag, diag, subdiag))

  subdiag = -theta * subdiag * (t2 - t1)
  diag = 1 - theta * diag * (t2 - t1)
  superdiag = -theta * superdiag * (t2 - t1)
  result = tf.linalg.tridiagonal_solve((superdiag, diag, subdiag),
                                       rhs,
                                       diagonals_format='sequence',
                                       partial_pivoting=False)

  # Transpose back to how it was.
  if perm is not None:
    result = tf.transpose(result, perm)
  return result 
Example #21
Source File: euler_sampling.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _for_loop(*, steps_num, current_state,
              drift_fn, volatility_fn, wiener_mean, watch_params,
              num_samples, times, dt, sqrt_dt, time_indices,
              keep_mask, random_type, seed, normal_draws):
  """Smaple paths using custom for_loop."""
  num_time_points = time_indices.shape.as_list()[-1]
  if num_time_points == 1:
    iter_nums = steps_num
  else:
    iter_nums = time_indices
  def step_fn(i, current_state):
    # Unpack current_state
    current_state = current_state[0]
    _, _, next_state, _ = _euler_step(
        i=i,
        written_count=0,
        current_state=current_state,
        result=tf.expand_dims(current_state, axis=1),
        drift_fn=drift_fn,
        volatility_fn=volatility_fn,
        wiener_mean=wiener_mean,
        num_samples=num_samples,
        times=times,
        dt=dt,
        sqrt_dt=sqrt_dt,
        keep_mask=keep_mask,
        random_type=random_type,
        seed=seed,
        normal_draws=normal_draws)
    return [next_state]
  result = custom_loops.for_loop(
      body_fn=step_fn,
      initial_state=[current_state],
      params=watch_params,
      num_iterations=iter_nums)[0]
  if num_time_points == 1:
    return tf.expand_dims(result, axis=1)
  return tf.transpose(result, (1, 0, 2)) 
Example #22
Source File: generic_ito_process.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _backward_pde_coeffs(drift_fn, volatility_fn, discounting):
  """Returns coeffs of the backward PDE."""
  def second_order_coeff_fn(t, coord_grid):
    sigma = volatility_fn(t, _coord_grid_to_mesh_grid(coord_grid))
    sigma_times_sigma_t = tf.linalg.matmul(sigma, sigma, transpose_b=True)

    # We currently have [dim, dim] as innermost dimensions, but the returned
    # tensor must have [dim, dim] as outermost dimensions.
    rank = len(sigma.shape.as_list())
    perm = [rank - 2, rank - 1] + list(range(rank - 2))
    sigma_times_sigma_t = tf.transpose(sigma_times_sigma_t, perm)
    return sigma_times_sigma_t / 2

  def first_order_coeff_fn(t, coord_grid):
    mu = drift_fn(t, _coord_grid_to_mesh_grid(coord_grid))

    # We currently have [dim] as innermost dimension, but the returned
    # tensor must have [dim] as outermost dimension.
    rank = len(mu.shape.as_list())
    perm = [rank - 1] + list(range(rank - 1))
    mu = tf.transpose(mu, perm)
    return mu

  def zeroth_order_coeff_fn(t, coord_grid):
    if not discounting:
      return None
    return -discounting(t, _coord_grid_to_mesh_grid(coord_grid))

  return second_order_coeff_fn, first_order_coeff_fn, zeroth_order_coeff_fn 
Example #23
Source File: univariate_geometric_brownian_motion.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _backward_pde_coeffs(drift_fn, volatility_fn, discounting):
  """Returns coeffs of the backward PDE."""
  def second_order_coeff_fn(t, coord_grid):
    sigma = volatility_fn(t, _coord_grid_to_mesh_grid(coord_grid))
    sigma_times_sigma_t = tf.linalg.matmul(sigma, sigma, transpose_b=True)

    # We currently have [dim, dim] as innermost dimensions, but the returned
    # tensor must have [dim, dim] as outermost dimensions.
    rank = len(sigma.shape.as_list())
    perm = [rank - 2, rank - 1] + list(range(rank - 2))
    sigma_times_sigma_t = tf.transpose(sigma_times_sigma_t, perm)
    return sigma_times_sigma_t / 2

  def first_order_coeff_fn(t, coord_grid):
    mu = drift_fn(t, _coord_grid_to_mesh_grid(coord_grid))

    # We currently have [dim] as innermost dimension, but the returned
    # tensor must have [dim] as outermost dimension.
    rank = len(mu.shape.as_list())
    perm = [rank - 1] + list(range(rank - 1))
    mu = tf.transpose(mu, perm)
    return mu

  def zeroth_order_coeff_fn(t, coord_grid):
    if not discounting:
      return None
    return -discounting(t, _coord_grid_to_mesh_grid(coord_grid))

  return second_order_coeff_fn, first_order_coeff_fn, zeroth_order_coeff_fn 
Example #24
Source File: univariate_geometric_brownian_motion.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _sample_paths(self,
                    times,
                    num_requested_times,
                    initial_state,
                    num_samples,
                    random_type,
                    seed,
                    skip):
    """Returns a sample of paths from the process."""
    # Normal draws needed for sampling
    normal_draws = utils.generate_mc_normal_draws(
        num_normal_draws=1, num_time_steps=num_requested_times,
        num_sample_paths=num_samples, random_type=random_type,
        seed=seed,
        dtype=self._dtype, skip=skip)
    times = tf.concat([[0], times], -1)
    dt = times[1:] - times[:-1]
    # The logarithm of all the increments between the times.
    log_increments = ((self._mu - self._sigma**2 / 2) * dt
                      + tf.sqrt(dt) * self._sigma
                      * tf.transpose(tf.squeeze(normal_draws, -1)))
    # Since the implementation of tf.math.cumsum is single-threaded we
    # use lower-triangular matrix multiplication instead
    once = tf.ones([num_requested_times, num_requested_times],
                   dtype=self._dtype)
    lower_triangular = tf.linalg.band_part(once, -1, 0)
    cumsum = tf.linalg.matvec(lower_triangular,
                              log_increments)
    samples = initial_state * tf.math.exp(cumsum)
    return tf.expand_dims(samples, -1)

  # TODO(b/152967694): Remove the duplicate methods. 
Example #25
Source File: multivariate_geometric_brownian_motion.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _backward_pde_coeffs(drift_fn, volatility_fn, discounting):
  """Returns coeffs of the backward PDE."""
  def second_order_coeff_fn(t, coord_grid):
    sigma = volatility_fn(t, _coord_grid_to_mesh_grid(coord_grid))
    sigma_times_sigma_t = tf.linalg.matmul(sigma, sigma, transpose_b=True)

    # We currently have [dim, dim] as innermost dimensions, but the returned
    # tensor must have [dim, dim] as outermost dimensions.
    rank = len(sigma.shape.as_list())
    perm = [rank - 2, rank - 1] + list(range(rank - 2))
    sigma_times_sigma_t = tf.transpose(sigma_times_sigma_t, perm)
    return sigma_times_sigma_t / 2

  def first_order_coeff_fn(t, coord_grid):
    mu = drift_fn(t, _coord_grid_to_mesh_grid(coord_grid))

    # We currently have [dim] as innermost dimension, but the returned
    # tensor must have [dim] as outermost dimension.
    rank = len(mu.shape.as_list())
    perm = [rank - 1] + list(range(rank - 1))
    mu = tf.transpose(mu, perm)
    return mu

  def zeroth_order_coeff_fn(t, coord_grid):
    if not discounting:
      return None
    return -discounting(t, _coord_grid_to_mesh_grid(coord_grid))

  return second_order_coeff_fn, first_order_coeff_fn, zeroth_order_coeff_fn 
Example #26
Source File: lsm_v2.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def _expected_exercise_fn(design, continuation_value, exercise_value):
  """Returns the expected continuation value for each path.

  Args:
    design: A real `Tensor` of shape `[basis_size, num_samples]`.
    continuation_value: A `Tensor` of shape `[num_samples, payoff_dim]` and of
      the same dtype as `design`. The optimal value of the option conditional on
      not exercising now or earlier, taking future information into account.
    exercise_value: A `Tensor` of the same shape and dtype as
      `continuation_value`. Value of the option if exercised immideately at
      the current time

  Returns:
    A `Tensor` of the same shape and dtype as `continuation_value` whose
    `(n, v)`-th entry represents the expected continuation value of sample path
    `n` under the `v`-th payoff scheme.
  """
  batch_design = tf.broadcast_to(
      design[..., None], design.shape + [continuation_value.shape[-1]])
  mask = tf.cast(exercise_value > 0, design.dtype)
  # Zero out contributions from samples we'd never exercise at this point (i.e.,
  # these extra observations do not change the regression coefficients).
  masked = tf.transpose(batch_design * mask, perm=(2, 1, 0))
  # For design matrix X and response y, the coefficients beta of the best linear
  # unbiased estimate are contained in the equation X'X beta = X'y. Here `lhs`
  # is X'X and `rhs` is X'y, or rather a tensor of such left hand and right hand
  # sides, one for each payoff dimension.
  lhs = tf.matmul(masked, masked, transpose_a=True)
  # Use pseudo inverse for the regression matrix to ensure stability of the
  # algorithm.
  lhs_pinv = tf.linalg.pinv(lhs)
  rhs = tf.matmul(
      masked,
      tf.expand_dims(tf.transpose(continuation_value), axis=-1),
      transpose_a=True)
  beta = tf.matmul(lhs_pinv, rhs)
  continuation = tf.matmul(tf.transpose(batch_design, perm=(2, 1, 0)), beta)
  return tf.maximum(tf.transpose(tf.squeeze(continuation, -1)), 0.0) 
Example #27
Source File: array_ops.py    From trax with Apache License 2.0 4 votes vote down vote up
def moveaxis(a, source, destination):  # pylint: disable=missing-docstring
  """Raises ValueError if source, destination not in (-ndim(a), ndim(a))."""
  if not source and not destination:
    return a

  a = asarray(a).data

  if isinstance(source, int):
    source = (source,)
  if isinstance(destination, int):
    destination = (destination,)

  a_rank = utils._maybe_static(tf.rank(a))  # pylint: disable=protected-access

  def _correct_axis(axis, rank):
    if axis < 0:
      return axis + rank
    return axis

  source = tuple(_correct_axis(axis, a_rank) for axis in source)
  destination = tuple(_correct_axis(axis, a_rank) for axis in destination)

  if a.shape.rank is not None:
    perm = [i for i in range(a_rank) if i not in source]
    for dest, src in sorted(zip(destination, source)):
      assert dest <= len(perm)
      perm.insert(dest, src)
  else:
    r = tf.range(a_rank)

    def _remove_indices(a, b):
      """Remove indices (`b`) from `a`."""
      items = tf.unstack(tf.sort(tf.stack(b)), num=len(b))

      i = 0
      result = []

      for item in items:
        result.append(a[i:item])
        i = item + 1

      result.append(a[i:])

      return tf.concat(result, 0)

    minus_sources = _remove_indices(r, source)
    minus_dest = _remove_indices(r, destination)

    perm = tf.scatter_nd(tf.expand_dims(minus_dest, 1), minus_sources, [a_rank])
    perm = tf.tensor_scatter_nd_update(perm, tf.expand_dims(destination, 1),
                                       source)
  a = tf.transpose(a, perm)

  return utils.tensor_to_ndarray(a) 
Example #28
Source File: __init__.py    From language with Apache License 2.0 4 votes vote down vote up
def get_tf_tensor(self, rel_name):
    """Get the Tensor that represents a relation.

    Args:
      rel_name: string naming a declared relation

    Returns:
      tf.SparseTensor

    Raises:
      RuntimeError: If the expression has no initial value.
    """
    if rel_name not in self._cached_tensor:
      if rel_name not in self._np_initval:
        raise RuntimeError('KG relation named %r has no initial value.' %
                           rel_name)
      m = self._np_initval[rel_name]
      n_rows, n_cols = m.shape
      if self.is_dense(rel_name):
        self._cached_tensor[rel_name] = tf.Variable(
            m, trainable=self.is_trainable(rel_name), name='nql/' + rel_name)
        self._initializers.append(self._cached_tensor[rel_name].initializer)

      else:
        data_m = np.transpose(np.vstack([m.row, m.col]))
        if not self.is_trainable(rel_name):
          sparse_tensor = tf.SparseTensor(data_m, m.data, [n_rows, n_cols])
        else:
          data_var_name = 'nql/%s_values' % rel_name
          data_var = tf.Variable(m.data, trainable=True, name=data_var_name)
          self._initializers.append(data_var.initializer)
          sparse_tensor = tf.SparseTensor(data_m, data_var, [n_rows, n_cols])
          self._declaration[rel_name].underlying_parameter = data_var
        self._cached_tensor[rel_name] = (sparse_tensor.indices,
                                         sparse_tensor.values,
                                         sparse_tensor.dense_shape)
    if self.is_dense(rel_name):
      return self._cached_tensor[rel_name]  # pytype: disable=bad-return-type
    else:
      return tf.SparseTensor(
          indices=self._cached_tensor[rel_name][0],
          values=self._cached_tensor[rel_name][1],
          dense_shape=self._cached_tensor[rel_name][2],
      ) 
Example #29
Source File: lsm.py    From tf-quant-finance with Apache License 2.0 4 votes vote down vote up
def expected_exercise_fn(design, continuation_value, exercise_value):
  """Returns the expected continuation value for each path.

  Args:
    design: A real `Tensor` of shape `[basis_size, num_samples]`.
    continuation_value: A `Tensor` of shape `[num_samples, payoff_dim]` and of
      the same dtype as `design`. The optimal value of the option conditional on
      not exercising now or earlier, taking future information into account.
    exercise_value: A `Tensor` of the same shape and dtype as
      `continuation_value`. Value of the option if exercised immideately at
      the current time

  Returns:
    A `Tensor` of the same shape and dtype as `continuation_value` whose
    `(n, v)`-th entry represents the expected continuation value of sample path
    `n` under the `v`-th payoff scheme.
  """
  # We wish to value each option under different payoffs, expressed through a
  # multidimensional payoff function. While the basis calculated from the sample
  # paths is the same for each payoff, the LSM algorithm requires us to fit a
  # regression model only on the in-the-money paths, which are payoff dependent,
  # hence we create multiple copies of the regression design (basis) matrix and
  # zero out rows for out of the money paths under each payoff.
  batch_design = tf.broadcast_to(
      tf.expand_dims(design, -1), design.shape + [continuation_value.shape[-1]])
  mask = tf.cast(exercise_value > 0, design.dtype)
  # Zero out contributions from samples we'd never exercise at this point (i.e.,
  # these extra observations do not change the regression coefficients).
  masked = tf.transpose(batch_design * mask, perm=(2, 1, 0))
  # For design matrix X and response y, the coefficients beta of the best linear
  # unbiased estimate are contained in the equation X'X beta = X'y. Here `lhs`
  # is X'X and `rhs` is X'y, or rather a tensor of such left hand and right hand
  # sides, one for each payoff dimension.
  lhs = tf.matmul(masked, masked, transpose_a=True)
  # Use pseudo inverse for the regression matrix to ensure stability of the
  # algorithm.
  lhs_pinv = tf.linalg.pinv(lhs)
  rhs = tf.matmul(
      masked,
      tf.expand_dims(tf.transpose(continuation_value), -1),
      transpose_a=True)
  beta = tf.linalg.matmul(lhs_pinv, rhs)
  continuation = tf.matmul(tf.transpose(batch_design, perm=(2, 1, 0)), beta)
  return tf.maximum(tf.transpose(tf.squeeze(continuation, -1)), 0.0) 
Example #30
Source File: lsm.py    From tf-quant-finance with Apache License 2.0 4 votes vote down vote up
def make_polynomial_basis(degree):
  """Produces a callable from samples to polynomial basis for use in regression.

  The output callable accepts a `Tensor` `X` of shape `[num_samples, dim]`,
  computes a centered value `Y = X - mean(X, axis=0)` and outputs a `Tensor`
  of shape `[degree * dim, num_samples]`, where
  ```
  Z[i*j, k] = X[k, j]**(degree - i) * X[k, j]**i, 0<=i<degree - 1, 0<=j<dim
  ```
  For example, if `degree` and `dim` are both equal to 2, the polynomial basis
  is `1, X, X**2, Y, Y**2, X * Y, X**2 * Y, X * Y**2`, where `X` and `Y` are
  the spatial axes.

  #### Example
  ```python
  basis = make_polynomial_basis(2)
  x = [1.0, 2.0, 3.0, 4.0]
  x = tf.expand_dims(x, -1)
  basis(x)
  # Expected result:
  [[ 1.0, 1.0, 1.0, 1.0], [-1.5, -0.5, 0.5, 1.5]]
  ```

  Args:
    degree: An `int32` scalar `Tensor`. The degree of the desired polynomial
      basis.

  Returns:
    A callable from `Tensor`s of shape `[num_samples, dim]` to `Tensor`s of
    shape `[degree * dim, num_samples]`.

  Raises:
    ValueError: If `degree` is less than `1`.
  """
  tf.debugging.assert_greater_equal(
      degree, 0,
      message='Degree of polynomial basis can not be negative.')
  def basis(sample_paths):
    """Computes polynomial basis expansion at the given sample points.

    Args:
      sample_paths: A `Tensor`s of either `flot32` or `float64` dtype and of
        shape `[num_samples, dim]` where `dim` has to be statically known.

    Returns:
      A `Tensor`s of shape `[degree * dim, num_samples]`.
    """
    samples = tf.convert_to_tensor(sample_paths)
    dim = samples.shape.as_list()[-1]
    grid = tf.range(0, degree + 1, dtype=samples.dtype)

    samples_centered = samples - tf.math.reduce_mean(samples, axis=0)
    samples_centered = tf.expand_dims(samples_centered, -2)
    grid = tf.meshgrid(*(dim * [grid]))
    grid = tf.reshape(tf.stack(grid, -1), [-1, dim])
    # Shape [num_samples, degree * dim]
    basis_expansion = tf.reduce_prod(samples_centered**grid, -1)
    return  tf.transpose(basis_expansion)
  return basis