Python tensorflow.cholesky_solve() Examples
The following are 4
code examples of tensorflow.cholesky_solve().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: transforms.py From GPflowOpt with Apache License 2.0 | 6 votes |
def build_backward_variance(self, Yvar): """ Additional method for scaling variance backward (used in :class:`.Normalizer`). Can process both the diagonal variances returned by predict_f, as well as full covariance matrices. :param Yvar: size N x N x P or size N x P :return: Yvar scaled, same rank and size as input """ rank = tf.rank(Yvar) # Because TensorFlow evaluates both fn1 and fn2, the transpose can't be in the same line. If a full cov # matrix is provided fn1 turns it into a rank 4, then tries to transpose it as a rank 3. # Splitting it in two steps however works fine. Yvar = tf.cond(tf.equal(rank, 2), lambda: tf.matrix_diag(tf.transpose(Yvar)), lambda: Yvar) Yvar = tf.cond(tf.equal(rank, 2), lambda: tf.transpose(Yvar, perm=[1, 2, 0]), lambda: Yvar) N = tf.shape(Yvar)[0] D = tf.shape(Yvar)[2] L = tf.cholesky(tf.square(tf.transpose(self.A))) Yvar = tf.reshape(Yvar, [N * N, D]) scaled_var = tf.reshape(tf.transpose(tf.cholesky_solve(L, tf.transpose(Yvar))), [N, N, D]) return tf.cond(tf.equal(rank, 2), lambda: tf.reduce_sum(scaled_var, axis=1), lambda: scaled_var)
Example #2
Source File: transforms.py From GPflowOpt with Apache License 2.0 | 5 votes |
def build_backward(self, Y): """ TensorFlow implementation of the inverse mapping """ L = tf.cholesky(tf.transpose(self.A)) XT = tf.cholesky_solve(L, tf.transpose(Y-self.b)) return tf.transpose(XT)
Example #3
Source File: linalg_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def test_works_with_five_different_random_pos_def_matrices(self): with self.test_session(): for n in range(1, 6): for np_type, atol in [(np.float32, 0.05), (np.float64, 1e-5)]: # Create 2 x n x n matrix array = np.array( [_random_pd_matrix(n, self.rng), _random_pd_matrix(n, self.rng)] ).astype(np_type) chol = tf.cholesky(array) for k in range(1, 3): rhs = self.rng.randn(2, n, k).astype(np_type) x = tf.cholesky_solve(chol, rhs) self.assertAllClose( rhs, tf.batch_matmul(array, x).eval(), atol=atol)
Example #4
Source File: layers.py From Doubly-Stochastic-DGP with Apache License 2.0 | 5 votes |
def KL(self): """ The KL divergence from the variational distribution to the prior :return: KL divergence from N(q_mu, q_sqrt) to N(0, I), independently for each GP """ # if self.white: # return gauss_kl(self.q_mu, self.q_sqrt) # else: # return gauss_kl(self.q_mu, self.q_sqrt, self.Ku) self.build_cholesky_if_needed() KL = -0.5 * self.num_outputs * self.num_inducing KL -= 0.5 * tf.reduce_sum(tf.log(tf.matrix_diag_part(self.q_sqrt) ** 2)) if not self.white: KL += tf.reduce_sum(tf.log(tf.matrix_diag_part(self.Lu))) * self.num_outputs KL += 0.5 * tf.reduce_sum(tf.square(tf.matrix_triangular_solve(self.Lu_tiled, self.q_sqrt, lower=True))) Kinv_m = tf.cholesky_solve(self.Lu, self.q_mu) KL += 0.5 * tf.reduce_sum(self.q_mu * Kinv_m) else: KL += 0.5 * tf.reduce_sum(tf.square(self.q_sqrt)) KL += 0.5 * tf.reduce_sum(self.q_mu**2) return KL