Python tensorflow.python.ops.array_ops.matrix_diag_part() Examples

The following are 30 code examples of tensorflow.python.ops.array_ops.matrix_diag_part(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.array_ops , or try the search function .
Example #1
Source File: gmm_ops.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _define_full_covariance_probs(self, shard_id, shard):
    """Defines the full covariance probabilties per example in a class.

    Updates a matrix with dimension num_examples X num_classes.

    Args:
      shard_id: id of the current shard.
      shard: current data shard, 1 X num_examples X dimensions.
    """
    diff = shard - self._means
    cholesky = linalg_ops.cholesky(self._covs + self._min_var)
    log_det_covs = 2.0 * math_ops.reduce_sum(
        math_ops.log(array_ops.matrix_diag_part(cholesky)), 1)
    x_mu_cov = math_ops.square(
        linalg_ops.matrix_triangular_solve(
            cholesky, array_ops.transpose(
                diff, perm=[0, 2, 1]), lower=True))
    diag_m = array_ops.transpose(math_ops.reduce_sum(x_mu_cov, 1))
    self._probs[shard_id] = -0.5 * (diag_m + math_ops.to_float(self._dimensions)
                                    * math_ops.log(2 * np.pi) + log_det_covs) 
Example #2
Source File: operator_pd_cholesky.py    From lambda-packs with MIT License 6 votes vote down vote up
def _check_chol(self, chol):
    """Verify that `chol` is proper."""
    chol = ops.convert_to_tensor(chol, name="chol")
    if not self.verify_pd:
      return chol

    shape = array_ops.shape(chol)
    rank = array_ops.rank(chol)

    is_matrix = check_ops.assert_rank_at_least(chol, 2)
    is_square = check_ops.assert_equal(
        array_ops.gather(shape, rank - 2), array_ops.gather(shape, rank - 1))

    deps = [is_matrix, is_square]
    diag = array_ops.matrix_diag_part(chol)
    deps.append(check_ops.assert_positive(diag))

    return control_flow_ops.with_dependencies(deps, chol) 
Example #3
Source File: operator_pd_cholesky.py    From keras-lambda with MIT License 6 votes vote down vote up
def _check_chol(self, chol):
    """Verify that `chol` is proper."""
    chol = ops.convert_to_tensor(chol, name="chol")
    if not self.verify_pd:
      return chol

    shape = array_ops.shape(chol)
    rank = array_ops.rank(chol)

    is_matrix = check_ops.assert_rank_at_least(chol, 2)
    is_square = check_ops.assert_equal(
        array_ops.gather(shape, rank - 2), array_ops.gather(shape, rank - 1))

    deps = [is_matrix, is_square]
    diag = array_ops.matrix_diag_part(chol)
    deps.append(check_ops.assert_positive(diag))

    return control_flow_ops.with_dependencies(deps, chol) 
Example #4
Source File: linear_operator_identity.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def add_to_tensor(self, mat, name="add_to_tensor"):
    """Add matrix represented by this operator to `mat`.  Equiv to `I + mat`.

    Args:
      mat:  `Tensor` with same `dtype` and shape broadcastable to `self`.
      name:  A name to give this `Op`.

    Returns:
      A `Tensor` with broadcast shape and same `dtype` as `self`.
    """
    with self._name_scope(name, values=[mat]):
      # Shape [B1,...,Bb, 1]
      multiplier_vector = array_ops.expand_dims(self.multiplier, -1)

      # Shape [C1,...,Cc, M, M]
      mat = ops.convert_to_tensor(mat, name="mat")

      # Shape [C1,...,Cc, M]
      mat_diag = array_ops.matrix_diag_part(mat)

      # multiplier_vector broadcasts here.
      new_diag = multiplier_vector + mat_diag

      return array_ops.matrix_set_diag(mat, new_diag) 
Example #5
Source File: array_grad.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _MatrixSetDiagGrad(op, grad):
  input_shape = op.inputs[0].get_shape().merge_with(grad.get_shape())
  diag_shape = op.inputs[1].get_shape()
  batch_shape = input_shape[:-2].merge_with(diag_shape[:-1])
  matrix_shape = input_shape[-2:]
  if batch_shape.is_fully_defined() and matrix_shape.is_fully_defined():
    diag_shape = batch_shape.as_list() + [min(matrix_shape.as_list())]
  else:
    with ops.colocate_with(grad):
      grad_shape = array_ops.shape(grad)
      grad_rank = array_ops.rank(grad)
      batch_shape = array_ops.slice(grad_shape, [0], [grad_rank - 2])
      matrix_shape = array_ops.slice(grad_shape, [grad_rank - 2], [2])
      min_dim = math_ops.reduce_min(matrix_shape)
      diag_shape = array_ops.concat(0, [batch_shape, [min_dim]])
  grad_input = array_ops.matrix_set_diag(
      grad, array_ops.zeros(
          diag_shape, dtype=grad.dtype))
  grad_diag = array_ops.matrix_diag_part(grad)
  return (grad_input, grad_diag) 
Example #6
Source File: interpolate_spline.py    From WarpGAN with MIT License 6 votes vote down vote up
def _pairwise_squared_distance_matrix(x):
  """Pairwise squared distance among a (batch) matrix's rows (2nd dim).

  This saves a bit of computation vs. using _cross_squared_distance_matrix(x,x)

  Args:
    x: `[batch_size, n, d]` float `Tensor`

  Returns:
    squared_dists: `[batch_size, n, n]` float `Tensor`, where
    squared_dists[b,i,j] = ||x[b,i,:] - x[b,j,:]||^2
  """

  x_x_transpose = math_ops.matmul(x, x, adjoint_b=True)
  x_norm_squared = array_ops.matrix_diag_part(x_x_transpose)
  x_norm_squared_tile = array_ops.expand_dims(x_norm_squared, 2)

  # squared_dists[b,i,j] = ||x_bi - x_bj||^2 = x_bi'x_bi- 2x_bi'x_bj + x_bj'x_bj
  squared_dists = x_norm_squared_tile - 2 * x_x_transpose + array_ops.transpose(
      x_norm_squared_tile, [0, 2, 1])

  return squared_dists 
Example #7
Source File: array_grad.py    From lambda-packs with MIT License 6 votes vote down vote up
def _MatrixSetDiagGrad(op, grad):
  """Gradient for MatrixSetDiag."""
  input_shape = op.inputs[0].get_shape().merge_with(grad.get_shape())
  diag_shape = op.inputs[1].get_shape()
  batch_shape = input_shape[:-2].merge_with(diag_shape[:-1])
  matrix_shape = input_shape[-2:]
  if batch_shape.is_fully_defined() and matrix_shape.is_fully_defined():
    diag_shape = batch_shape.as_list() + [min(matrix_shape.as_list())]
  else:
    with ops.colocate_with(grad):
      grad_shape = array_ops.shape(grad)
      grad_rank = array_ops.rank(grad)
      batch_shape = array_ops.slice(grad_shape, [0], [grad_rank - 2])
      matrix_shape = array_ops.slice(grad_shape, [grad_rank - 2], [2])
      min_dim = math_ops.reduce_min(matrix_shape)
      diag_shape = array_ops.concat([batch_shape, [min_dim]], 0)
  grad_input = array_ops.matrix_set_diag(
      grad, array_ops.zeros(
          diag_shape, dtype=grad.dtype))
  grad_diag = array_ops.matrix_diag_part(grad)
  return (grad_input, grad_diag) 
Example #8
Source File: linear_operator_identity.py    From keras-lambda with MIT License 6 votes vote down vote up
def add_to_tensor(self, mat, name="add_to_tensor"):
    """Add matrix represented by this operator to `mat`.  Equiv to `I + mat`.

    Args:
      mat:  `Tensor` with same `dtype` and shape broadcastable to `self`.
      name:  A name to give this `Op`.

    Returns:
      A `Tensor` with broadcast shape and same `dtype` as `self`.
    """
    with self._name_scope(name, values=[mat]):
      # Shape [B1,...,Bb, 1]
      multiplier_vector = array_ops.expand_dims(self.multiplier, -1)

      # Shape [C1,...,Cc, M, M]
      mat = ops.convert_to_tensor(mat, name="mat")

      # Shape [C1,...,Cc, M]
      mat_diag = array_ops.matrix_diag_part(mat)

      # multiplier_vector broadcasts here.
      new_diag = multiplier_vector + mat_diag

      return array_ops.matrix_set_diag(mat, new_diag) 
Example #9
Source File: bijector.py    From keras-lambda with MIT License 6 votes vote down vote up
def sqrt_log_abs_det(self):
    """Computes (log o abs o det)(X) for matrix X.

    Doesn't actually do the sqrt! Named as such to agree with API.

    To compute det(M + V D V.T), we use the matrix determinant lemma:
      det(Tril + V D V.T) = det(C) det(D) det(M)
    where C is defined as in `_inverse`, ie,
      C = inv(D) + V.T inv(M) V.

    See: https://en.wikipedia.org/wiki/Matrix_determinant_lemma

    Returns:
      log_abs_det: `Tensor`.
    """
    log_det_c = math_ops.log(math_ops.abs(
        linalg_ops.matrix_determinant(self._woodbury_sandwiched_term())))
    # Reduction is ok because we always prepad inputs to this class.
    log_det_m = math_ops.reduce_sum(math_ops.log(math_ops.abs(
        array_ops.matrix_diag_part(self._m))), reduction_indices=[-1])
    return log_det_c + 2. * self._d.sqrt_log_abs_det() + log_det_m 
Example #10
Source File: affine_impl.py    From lambda-packs with MIT License 6 votes vote down vote up
def sqrt_log_abs_det(self):
    """Computes (log o abs o det)(X) for matrix X.

    Doesn't actually do the sqrt! Named as such to agree with API.

    To compute det(M + V D V.T), we use the matrix determinant lemma:
      det(Tril + V D V.T) = det(C) det(D) det(M)
    where C is defined as in `_inverse`, ie,
      C = inv(D) + V.T inv(M) V.

    See: https://en.wikipedia.org/wiki/Matrix_determinant_lemma

    Returns:
      log_abs_det: `Tensor`.
    """
    log_det_c = math_ops.log(math_ops.abs(
        linalg_ops.matrix_determinant(self._woodbury_sandwiched_term())))
    # Reduction is ok because we always prepad inputs to this class.
    log_det_m = math_ops.reduce_sum(math_ops.log(math_ops.abs(
        array_ops.matrix_diag_part(self._m))), axis=[-1])
    return log_det_c + 2. * self._d.sqrt_log_abs_det() + log_det_m 
Example #11
Source File: array_grad.py    From keras-lambda with MIT License 6 votes vote down vote up
def _MatrixSetDiagGrad(op, grad):
  input_shape = op.inputs[0].get_shape().merge_with(grad.get_shape())
  diag_shape = op.inputs[1].get_shape()
  batch_shape = input_shape[:-2].merge_with(diag_shape[:-1])
  matrix_shape = input_shape[-2:]
  if batch_shape.is_fully_defined() and matrix_shape.is_fully_defined():
    diag_shape = batch_shape.as_list() + [min(matrix_shape.as_list())]
  else:
    with ops.colocate_with(grad):
      grad_shape = array_ops.shape(grad)
      grad_rank = array_ops.rank(grad)
      batch_shape = array_ops.slice(grad_shape, [0], [grad_rank - 2])
      matrix_shape = array_ops.slice(grad_shape, [grad_rank - 2], [2])
      min_dim = math_ops.reduce_min(matrix_shape)
      diag_shape = array_ops.concat([batch_shape, [min_dim]], 0)
  grad_input = array_ops.matrix_set_diag(
      grad, array_ops.zeros(
          diag_shape, dtype=grad.dtype))
  grad_diag = array_ops.matrix_diag_part(grad)
  return (grad_input, grad_diag) 
Example #12
Source File: bijector.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def sqrt_log_abs_det(self):
    """Computes (log o abs o det)(X) for matrix X.

    Doesn't actually do the sqrt! Named as such to agree with API.

    To compute det(M + V D V.T), we use the matrix determinant lemma:
      det(Tril + V D V.T) = det(C) det(D) det(M)
    where C is defined as in `_inverse`, ie,
      C = inv(D) + V.T inv(M) V.

    See: https://en.wikipedia.org/wiki/Matrix_determinant_lemma

    Returns:
      log_abs_det: `Tensor`.
    """
    log_det_c = math_ops.log(math_ops.abs(
        linalg_ops.matrix_determinant(self._woodbury_sandwiched_term())))
    # Reduction is ok because we always prepad inputs to this class.
    log_det_m = math_ops.reduce_sum(math_ops.log(math_ops.abs(
        array_ops.matrix_diag_part(self._m))), reduction_indices=[-1])
    return log_det_c + 2. * self._d.sqrt_log_abs_det() + log_det_m 
Example #13
Source File: linear_operator_test_util.py    From lambda-packs with MIT License 6 votes vote down vote up
def test_diag_part(self):
    self._skip_if_tests_to_skip_contains("diag_part")
    for use_placeholder in False, True:
      for shape in self._shapes_to_test:
        for dtype in self._dtypes_to_test:
          with self.test_session(graph=ops.Graph()) as sess:
            sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
            operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
                shape, dtype, use_placeholder=use_placeholder)
            op_diag_part = operator.diag_part()
            mat_diag_part = array_ops.matrix_diag_part(mat)

            if not use_placeholder:
              self.assertAllEqual(
                  mat_diag_part.get_shape(), op_diag_part.get_shape())

            op_diag_part_, mat_diag_part_ = sess.run(
                [op_diag_part, mat_diag_part], feed_dict=feed_dict)

            self.assertAC(op_diag_part_, mat_diag_part_) 
Example #14
Source File: operator_pd_cholesky.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _check_chol(self, chol):
    """Verify that `chol` is proper."""
    chol = ops.convert_to_tensor(chol, name="chol")
    if not self.verify_pd:
      return chol

    shape = array_ops.shape(chol)
    rank = array_ops.rank(chol)

    is_matrix = check_ops.assert_rank_at_least(chol, 2)
    is_square = check_ops.assert_equal(
        array_ops.gather(shape, rank - 2), array_ops.gather(shape, rank - 1))

    deps = [is_matrix, is_square]
    diag = array_ops.matrix_diag_part(chol)
    deps.append(check_ops.assert_positive(diag))

    return control_flow_ops.with_dependencies(deps, chol) 
Example #15
Source File: linear_operator_identity.py    From lambda-packs with MIT License 6 votes vote down vote up
def add_to_tensor(self, mat, name="add_to_tensor"):
    """Add matrix represented by this operator to `mat`.  Equiv to `I + mat`.

    Args:
      mat:  `Tensor` with same `dtype` and shape broadcastable to `self`.
      name:  A name to give this `Op`.

    Returns:
      A `Tensor` with broadcast shape and same `dtype` as `self`.
    """
    with self._name_scope(name, values=[mat]):
      # Shape [B1,...,Bb, 1]
      multiplier_vector = array_ops.expand_dims(self.multiplier, -1)

      # Shape [C1,...,Cc, M, M]
      mat = ops.convert_to_tensor(mat, name="mat")

      # Shape [C1,...,Cc, M]
      mat_diag = array_ops.matrix_diag_part(mat)

      # multiplier_vector broadcasts here.
      new_diag = multiplier_vector + mat_diag

      return array_ops.matrix_set_diag(mat, new_diag) 
Example #16
Source File: linear_operator_udvh_update.py    From lambda-packs with MIT License 6 votes vote down vote up
def _log_abs_determinant(self):
    # Recall
    #   det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
    #                  = det(C) det(D) det(L)
    log_abs_det_d = self.diag_operator.log_abs_determinant()
    log_abs_det_l = self.base_operator.log_abs_determinant()

    if self._use_cholesky:
      chol_cap_diag = array_ops.matrix_diag_part(self._chol_capacitance)
      log_abs_det_c = 2 * math_ops.reduce_sum(
          math_ops.log(chol_cap_diag), reduction_indices=[-1])
    else:
      det_c = linalg_ops.matrix_determinant(self._capacitance)
      log_abs_det_c = math_ops.log(math_ops.abs(det_c))

    return log_abs_det_c + log_abs_det_d + log_abs_det_l 
Example #17
Source File: array_grad.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def _MatrixSetDiagGrad(op, grad):
  """Gradient for MatrixSetDiag."""
  input_shape = op.inputs[0].get_shape().merge_with(grad.get_shape())
  diag_shape = op.inputs[1].get_shape()
  batch_shape = input_shape[:-2].merge_with(diag_shape[:-1])
  matrix_shape = input_shape[-2:]
  if batch_shape.is_fully_defined() and matrix_shape.is_fully_defined():
    diag_shape = batch_shape.as_list() + [min(matrix_shape.as_list())]
  else:
    with ops.colocate_with(grad):
      grad_shape = array_ops.shape(grad)
      grad_rank = array_ops.rank(grad)
      batch_shape = array_ops.slice(grad_shape, [0], [grad_rank - 2])
      matrix_shape = array_ops.slice(grad_shape, [grad_rank - 2], [2])
      min_dim = math_ops.reduce_min(matrix_shape)
      diag_shape = array_ops.concat([batch_shape, [min_dim]], 0)
  grad_input = array_ops.matrix_set_diag(
      grad, array_ops.zeros(
          diag_shape, dtype=grad.dtype))
  grad_diag = array_ops.matrix_diag_part(grad)
  return (grad_input, grad_diag) 
Example #18
Source File: operator_pd_cholesky.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _check_chol(self, chol):
    """Verify that `chol` is proper."""
    chol = ops.convert_to_tensor(chol, name="chol")
    if not self.verify_pd:
      return chol

    shape = array_ops.shape(chol)
    rank = array_ops.rank(chol)

    is_matrix = check_ops.assert_rank_at_least(chol, 2)
    is_square = check_ops.assert_equal(
        array_ops.gather(shape, rank - 2), array_ops.gather(shape, rank - 1))

    deps = [is_matrix, is_square]
    diag = array_ops.matrix_diag_part(chol)
    deps.append(check_ops.assert_positive(diag))

    return control_flow_ops.with_dependencies(deps, chol) 
Example #19
Source File: array_grad.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _MatrixSetDiagGrad(op, grad):
  input_shape = op.inputs[0].get_shape().merge_with(grad.get_shape())
  diag_shape = op.inputs[1].get_shape()
  batch_shape = input_shape[:-2].merge_with(diag_shape[:-1])
  matrix_shape = input_shape[-2:]
  if batch_shape.is_fully_defined() and matrix_shape.is_fully_defined():
    diag_shape = batch_shape.as_list() + [min(matrix_shape.as_list())]
  else:
    with ops.colocate_with(grad):
      grad_shape = array_ops.shape(grad)
      grad_rank = array_ops.rank(grad)
      batch_shape = array_ops.slice(grad_shape, [0], [grad_rank - 2])
      matrix_shape = array_ops.slice(grad_shape, [grad_rank - 2], [2])
      min_dim = math_ops.reduce_min(matrix_shape)
      diag_shape = array_ops.concat([batch_shape, [min_dim]], 0)
  grad_input = array_ops.matrix_set_diag(
      grad, array_ops.zeros(
          diag_shape, dtype=grad.dtype))
  grad_diag = array_ops.matrix_diag_part(grad)
  return (grad_input, grad_diag) 
Example #20
Source File: gmm_ops.py    From keras-lambda with MIT License 6 votes vote down vote up
def _define_full_covariance_probs(self, shard_id, shard):
    """Defines the full covariance probabilties per example in a class.

    Updates a matrix with dimension num_examples X num_classes.

    Args:
      shard_id: id of the current shard.
      shard: current data shard, 1 X num_examples X dimensions.
    """
    diff = shard - self._means
    cholesky = linalg_ops.cholesky(self._covs + self._min_var)
    log_det_covs = 2.0 * math_ops.reduce_sum(
        math_ops.log(array_ops.matrix_diag_part(cholesky)), 1)
    x_mu_cov = math_ops.square(
        linalg_ops.matrix_triangular_solve(
            cholesky, array_ops.transpose(
                diff, perm=[0, 2, 1]), lower=True))
    diag_m = array_ops.transpose(math_ops.reduce_sum(x_mu_cov, 1))
    self._probs[shard_id] = -0.5 * (diag_m + math_ops.to_float(self._dimensions)
                                    * math_ops.log(2 * np.pi) + log_det_covs) 
Example #21
Source File: bijector.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _inverse_log_det_jacobian(self, y):  # pylint: disable=unused-argument
    abs_diag = math_ops.abs(array_ops.matrix_diag_part(self.scale))
    return -math_ops.reduce_sum(math_ops.log(abs_diag), reduction_indices=[-1]) 
Example #22
Source File: operator_pd_diag.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _add_to_tensor(self, mat):
    mat_diag = array_ops.matrix_diag_part(mat)
    new_diag = self._diag + mat_diag
    return array_ops.matrix_set_diag(mat, new_diag) 
Example #23
Source File: operator_pd_identity.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _add_to_tensor(self, mat):
    # Add to a tensor in O(k) time!
    mat_diag = array_ops.matrix_diag_part(mat)
    new_diag = constant_op.constant(1, dtype=self.dtype) + mat_diag
    return array_ops.matrix_set_diag(mat, new_diag) 
Example #24
Source File: operator_pd_vdvt_update.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _sqrt_log_det(self):
    # The matrix determinant lemma states:
    # det(M + VDV^T) = det(D^{-1} + V^T M^{-1} V) * det(D) * det(M)
    #                = det(C) * det(D) * det(M)
    #
    # Here we compute the Cholesky factor of "C", then pass the result on.
    diag_chol_c = array_ops.matrix_diag_part(
        self._chol_capacitance(batch_mode=False))
    return self._sqrt_log_det_core(diag_chol_c) 
Example #25
Source File: linalg_grad.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _SelfAdjointEigV2Grad(op, grad_e, grad_v):
  """Gradient for SelfAdjointEigV2."""
  e = op.outputs[0]
  v = op.outputs[1]
  # a = op.inputs[0], which satisfies
  # a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i]
  with ops.control_dependencies([grad_e.op, grad_v.op]):
    if grad_v is not None:
      # Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0).
      # Notice that because of the term involving f, the gradient becomes
      # infinite (or NaN in practice) when eigenvalues are not unique.
      # Mathematically this should not be surprising, since for (k-fold)
      # degenerate eigenvalues, the corresponding eigenvectors are only defined
      # up to arbitrary rotation in a (k-dimensional) subspace.
      f = array_ops.matrix_set_diag(
          math_ops.inv(
              array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)),
          array_ops.zeros_like(e))
      grad_a = math_ops.batch_matmul(
          v,
          math_ops.batch_matmul(
              array_ops.matrix_diag(grad_e) + f * math_ops.batch_matmul(
                  v, grad_v, adj_x=True),
              v,
              adj_y=True))
    else:
      grad_a = math_ops.batch_matmul(
          v,
          math_ops.batch_matmul(
              array_ops.matrix_diag(grad_e), v, adj_y=True))
    # The forward op only depends on the lower triangular part of a, so here we
    # symmetrize and take the lower triangle
    grad_a = array_ops.matrix_band_part(
        grad_a + array_ops.matrix_transpose(grad_a), -1, 0)
    grad_a = array_ops.matrix_set_diag(grad_a,
                                       0.5 * array_ops.matrix_diag_part(grad_a))
    return grad_a 
Example #26
Source File: linear_operator_identity.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def add_to_tensor(self, mat, name="add_to_tensor"):
    """Add matrix represented by this operator to `mat`.  Equiv to `I + mat`.

    Args:
      mat:  `Tensor` with same `dtype` and shape broadcastable to `self`.
      name:  A name to give this `Op`.

    Returns:
      A `Tensor` with broadcast shape and same `dtype` as `self`.
    """
    with self._name_scope(name, values=[mat]):
      mat = ops.convert_to_tensor(mat, name="mat")
      mat_diag = array_ops.matrix_diag_part(mat)
      new_diag = 1 + mat_diag
      return array_ops.matrix_set_diag(mat, new_diag) 
Example #27
Source File: operator_pd_vdvt_update.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _batch_sqrt_log_det(self):
    # Here we compute the Cholesky factor of "C", then pass the result on.
    diag_chol_c = array_ops.matrix_diag_part(
        self._chol_capacitance(batch_mode=True))
    return self._sqrt_log_det_core(diag_chol_c) 
Example #28
Source File: operator_pd_cholesky.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _batch_log_det(self):
    """Log determinant of every batch member."""
    # Note that array_ops.diag_part does not seem more efficient for non-batch,
    # and would give a bad result for a batch matrix, so aways use
    # matrix_diag_part.
    diag = array_ops.matrix_diag_part(self._chol)
    det = 2.0 * math_ops.reduce_sum(math_ops.log(diag), reduction_indices=[-1])
    det.set_shape(self.get_shape()[:-2])
    return det 
Example #29
Source File: wishart.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _variance(self):
    x = math_ops.sqrt(self.df) * self.scale_operator_pd.to_dense()
    d = array_ops.expand_dims(array_ops.matrix_diag_part(x), -1)
    v = math_ops.square(x) + math_ops.batch_matmul(d, d, adj_y=True)
    if self.cholesky_input_output_matrices:
      return linalg_ops.cholesky(v)
    return v 
Example #30
Source File: array_grad.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _MatrixDiagGrad(_, grad):
  return array_ops.matrix_diag_part(grad)