Python tensorflow.self_adjoint_eigvals() Examples

The following are 6 code examples of tensorflow.self_adjoint_eigvals(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: losses.py    From QMLT with Apache License 2.0 6 votes vote down vote up
def trace_distance(rho, sigma):
    r""" Trace distance :math:`\frac{1}{2}\tr \{ \sqrt{ (\rho - \sigma})^2  \}` between quantum states :math:`\rho` and :math:`\sigma`.

    The inputs and outputs are tensors of dtype float, and all computations support automatic differentiation.

    Args:
        rho (tf.Tensor): 2-dimensional Hermitian matrix representing state :math:`\rho`.
        sigma (tf.Tensor): 2-dimensional Hermitian matrix of the same dimensions and dtype as rho,
            representing state :math:`\sigma`.

    Returns:
        tf.Tensor: Returns the scalar trace distance.
    """

    if rho.shape != sigma.shape:
        raise ValueError("Cannot compute the trace distance if inputs have"
                         " different shapes {} and {}".format(rho.shape, sigma.shape))

    diff = rho - sigma
    eig = tf.self_adjoint_eigvals(diff)
    abs_eig = tf.abs(eig)
    return 0.5*tf.real(tf.reduce_sum(abs_eig)) 
Example #2
Source File: kronecker_ops.py    From VFF with Apache License 2.0 6 votes vote down vote up
def log_det_kron_sum(L1, L2):
    """
    L1 is a list of lower triangular arrays.
    L2 is a list of lower triangular arrays.

    if S1 = kron(L1) * kron(L1).T, and S2 similarly,
    this function computes the log determinant of S1 + S2
    """
    L1_logdets = [tf.reduce_sum(tf.log(tf.square(tf.diag_part(L)))) for L in L1]
    total_size = reduce(tf.mul, [L.shape[0] for L in L1])
    N_other = [total_size / tf.shape(L)[0] for L in L1]
    L1_logdet = reduce(tf.add, [s*ld for s, ld in zip(N_other, L1_logdets)])
    LiL = [tf.matrix_triangular_solve(L, R) for L, R in zip(L1, L2)]
    eigvals = [tf.self_adjoint_eigvals(tf.matmul(mat, mat.T)) for mat in LiL]
    eigvals_kronned = kron_vec_mul([tf.reshape(e, [-1, 1]) for e in eigvals], tf.ones([1, 1], tf.float64))
    return tf.reduce_sum(tf.log(1 + eigvals_kronned)) + L1_logdet 
Example #3
Source File: full_covariance.py    From tf-example-models with Apache License 2.0 5 votes vote down vote up
def get_log_determinant(self):
        tf_eigvals = tf.self_adjoint_eigvals(self.tf_covariance_matrix)

        return tf.reduce_sum(tf.log(tf_eigvals)) 
Example #4
Source File: spectral_svgp.py    From nssm-gp with MIT License 5 votes vote down vote up
def _check_eigvals(kern, name):
    eigvals = tf.self_adjoint_eigvals(kern)

    def print_op():
        return tf.Print(kern, [eigvals], message='negative eigenvalues %s' % name)

    def identity_op():
        return tf.identity(kern)

    return tf.cond(tf.less(tf.reduce_min(eigvals), 0.0), true_fn=print_op, false_fn=identity_op) 
Example #5
Source File: neural.py    From nssm-gp with MIT License 5 votes vote down vote up
def robust_kernel(kern, shape_X):
    eigvals = tf.self_adjoint_eigvals(kern)
    min_eig = tf.reduce_min(eigvals)
    jitter = settings.numerics.jitter_level

    def abs_min_eig():
        return tf.Print(tf.abs(min_eig), [min_eig], 'kernel had negative eigenvalue')

    def zero():
        return float_type(0.0)

    jitter += tf.cond(tf.less(min_eig, 0.0), abs_min_eig, zero)
    return kern + jitter * tf.eye(shape_X, dtype=settings.dtypes.float_type) 
Example #6
Source File: self_adjoint_eig_op_test.py    From deep_image_model with Apache License 2.0 4 votes vote down vote up
def _GetSelfAdjointEigTest(dtype_, shape_):

  def CompareEigenVectors(self, x, y, tol):
    # Eigenvectors are only unique up to sign so we normalize the signs first.
    signs = np.sign(np.sum(np.divide(x, y), -2, keepdims=True))
    x *= signs
    self.assertAllClose(x, y, atol=tol, rtol=tol)

  def CompareEigenDecompositions(self, x_e, x_v, y_e, y_v, tol):
    num_batches = int(np.prod(x_e.shape[:-1]))
    n = x_e.shape[-1]
    x_e = np.reshape(x_e, [num_batches] + [n])
    x_v = np.reshape(x_v, [num_batches] + [n, n])
    y_e = np.reshape(y_e, [num_batches] + [n])
    y_v = np.reshape(y_v, [num_batches] + [n, n])
    for i in range(num_batches):
      x_ei, x_vi = SortEigenDecomposition(x_e[i, :], x_v[i, :, :])
      y_ei, y_vi = SortEigenDecomposition(y_e[i, :], y_v[i, :, :])
      self.assertAllClose(x_ei, y_ei, atol=tol, rtol=tol)
      CompareEigenVectors(self, x_vi, y_vi, tol)

  def Test(self):
    np.random.seed(1)
    n = shape_[-1]
    batch_shape = shape_[:-2]
    a = np.random.uniform(
        low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(dtype_)
    a += a.T
    a = np.tile(a, batch_shape + (1, 1))
    if dtype_ == np.float32:
      atol = 1e-4
    else:
      atol = 1e-12
    for compute_v in False, True:
      np_e, np_v = np.linalg.eig(a)
      with self.test_session():
        if compute_v:
          tf_e, tf_v = tf.self_adjoint_eig(tf.constant(a))

          # Check that V*diag(E)*V^T is close to A.
          a_ev = tf.batch_matmul(
              tf.batch_matmul(tf_v, tf.matrix_diag(tf_e)), tf_v, adj_y=True)
          self.assertAllClose(a_ev.eval(), a, atol=atol)

          # Compare to numpy.linalg.eig.
          CompareEigenDecompositions(self, np_e, np_v, tf_e.eval(), tf_v.eval(),
                                     atol)
        else:
          tf_e = tf.self_adjoint_eigvals(tf.constant(a))
          self.assertAllClose(
              np.sort(np_e, -1), np.sort(tf_e.eval(), -1), atol=atol)

  return Test