Python tensorflow.matrix_solve() Examples
The following are 19
code examples of tensorflow.matrix_solve().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: matrix_solve_op_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def _verifySolve(self, x, y, batch_dims=None): for adjoint in False, True: for np_type in [np.float32, np.float64, np.complex64, np.complex128]: if np_type is [np.float32, np.float64]: a = x.real().astype(np_type) b = y.real().astype(np_type) else: a = x.astype(np_type) b = y.astype(np_type) if adjoint: a_np = np.conj(np.transpose(a)) else: a_np = a if batch_dims is not None: a = np.tile(a, batch_dims + [1, 1]) a_np = np.tile(a_np, batch_dims + [1, 1]) b = np.tile(b, batch_dims + [1, 1]) np_ans = np.linalg.solve(a_np, b) with self.test_session(): tf_ans = tf.matrix_solve(a, b, adjoint=adjoint) out = tf_ans.eval() self.assertEqual(tf_ans.get_shape(), out.shape) self.assertEqual(np_ans.shape, out.shape) self.assertAllClose(np_ans, out)
Example #2
Source File: hessian.py From neupy with MIT License | 6 votes |
def init_train_updates(self): penalty_const = asfloat(self.penalty_const) n_parameters = self.network.n_parameters variables = self.network.variables parameters = [var for var in variables.values() if var.trainable] param_vector = make_single_vector(parameters) hessian_matrix, full_gradient = find_hessian_and_gradient( self.variables.loss, parameters ) parameter_update = tf.matrix_solve( hessian_matrix + penalty_const * tf.eye(n_parameters), tf.reshape(full_gradient, [-1, 1]) ) updated_parameters = param_vector - flatten(parameter_update) updates = setup_parameter_updates(parameters, updated_parameters) return updates
Example #3
Source File: lev_marq.py From neupy with MIT License | 5 votes |
def init_train_updates(self): training_outputs = self.network.training_outputs last_error = self.variables.last_error error_func = self.variables.loss mu = self.variables.mu new_mu = tf.where( tf.less(last_error, error_func), mu * self.mu_update_factor, mu / self.mu_update_factor, ) err_for_each_sample = flatten((self.target - training_outputs) ** 2) variables = self.network.variables params = [var for var in variables.values() if var.trainable] param_vector = make_single_vector(params) J = compute_jacobian(err_for_each_sample, params) J_T = tf.transpose(J) n_params = J.shape[1] parameter_update = tf.matrix_solve( tf.matmul(J_T, J) + new_mu * tf.eye(n_params.value), tf.matmul(J_T, tf.expand_dims(err_for_each_sample, 1)) ) updated_params = param_vector - flatten(parameter_update) updates = [(mu, new_mu)] parameter_updates = setup_parameter_updates(params, updated_params) updates.extend(parameter_updates) return updates
Example #4
Source File: ops.py From tfdeploy with MIT License | 5 votes |
def test_MatrixSolve(self): t = tf.matrix_solve(*self.random((2, 3, 3, 3), (2, 3, 3, 1)), adjoint=False) self.check(t) t = tf.matrix_solve(*self.random((2, 3, 3, 3), (2, 3, 3, 1)), adjoint=True) self.check(t)
Example #5
Source File: data.py From inverse-compositional-STN with MIT License | 5 votes |
def genPerturbations(opt): with tf.name_scope("genPerturbations"): X = np.tile(opt.canon4pts[:,0],[opt.batchSize,1]) Y = np.tile(opt.canon4pts[:,1],[opt.batchSize,1]) dX = tf.random_normal([opt.batchSize,4])*opt.pertScale \ +tf.random_normal([opt.batchSize,1])*opt.transScale dY = tf.random_normal([opt.batchSize,4])*opt.pertScale \ +tf.random_normal([opt.batchSize,1])*opt.transScale O = np.zeros([opt.batchSize,4],dtype=np.float32) I = np.ones([opt.batchSize,4],dtype=np.float32) # fit warp parameters to generated displacements if opt.warpType=="homography": A = tf.concat([tf.stack([X,Y,I,O,O,O,-X*(X+dX),-Y*(X+dX)],axis=-1), tf.stack([O,O,O,X,Y,I,-X*(Y+dY),-Y*(Y+dY)],axis=-1)],1) b = tf.expand_dims(tf.concat([X+dX,Y+dY],1),-1) pPert = tf.matrix_solve(A,b)[:,:,0] pPert -= tf.to_float([[1,0,0,0,1,0,0,0]]) else: if opt.warpType=="translation": J = np.concatenate([np.stack([I,O],axis=-1), np.stack([O,I],axis=-1)],axis=1) if opt.warpType=="similarity": J = np.concatenate([np.stack([X,Y,I,O],axis=-1), np.stack([-Y,X,O,I],axis=-1)],axis=1) if opt.warpType=="affine": J = np.concatenate([np.stack([X,Y,I,O,O,O],axis=-1), np.stack([O,O,O,X,Y,I],axis=-1)],axis=1) dXY = tf.expand_dims(tf.concat([dX,dY],1),-1) pPert = tf.matrix_solve_ls(J,dXY)[:,:,0] return pPert # make training batch
Example #6
Source File: data.py From inverse-compositional-STN with MIT License | 5 votes |
def genPerturbations(opt): with tf.name_scope("genPerturbations"): X = np.tile(opt.canon4pts[:,0],[opt.batchSize,1]) Y = np.tile(opt.canon4pts[:,1],[opt.batchSize,1]) dX = tf.random_normal([opt.batchSize,4])*opt.pertScale \ +tf.random_normal([opt.batchSize,1])*opt.transScale dY = tf.random_normal([opt.batchSize,4])*opt.pertScale \ +tf.random_normal([opt.batchSize,1])*opt.transScale O = np.zeros([opt.batchSize,4],dtype=np.float32) I = np.ones([opt.batchSize,4],dtype=np.float32) # fit warp parameters to generated displacements if opt.warpType=="homography": A = tf.concat([tf.stack([X,Y,I,O,O,O,-X*(X+dX),-Y*(X+dX)],axis=-1), tf.stack([O,O,O,X,Y,I,-X*(Y+dY),-Y*(Y+dY)],axis=-1)],1) b = tf.expand_dims(tf.concat([X+dX,Y+dY],1),-1) pPert = tf.matrix_solve(A,b)[:,:,0] pPert -= tf.to_float([[1,0,0,0,1,0,0,0]]) else: if opt.warpType=="translation": J = np.concatenate([np.stack([I,O],axis=-1), np.stack([O,I],axis=-1)],axis=1) if opt.warpType=="similarity": J = np.concatenate([np.stack([X,Y,I,O],axis=-1), np.stack([-Y,X,O,I],axis=-1)],axis=1) if opt.warpType=="affine": J = np.concatenate([np.stack([X,Y,I,O,O,O],axis=-1), np.stack([O,O,O,X,Y,I],axis=-1)],axis=1) dXY = tf.expand_dims(tf.concat([dX,dY],1),-1) pPert = tf.matrix_solve_ls(J,dXY)[:,:,0] return pPert # make training batch
Example #7
Source File: tensorflow.py From deepx with MIT License | 5 votes |
def solve(self, a, b): return tf.matrix_solve(a, b)
Example #8
Source File: tensorflow.py From deepx with MIT License | 5 votes |
def matrix_solve(self, matrix, rhs, adjoint=None): return tf.matrix_solve(matrix, rhs, adjoint=adjoint) # Theano interface
Example #9
Source File: osvgpc.py From streaming_sparse_gp with Apache License 2.0 | 5 votes |
def build_correction_term(self): # TODO Mb = tf.shape(self.Z)[0] Ma = self.M_old # jitter = settings.numerics.jitter_level jitter = 1e-4 Saa = self.Su_old ma = self.mu_old obj = 0 # a is old inducing points, b is new mu, Sigma = self.build_predict(self.Z_old, full_cov=True) Sigma = Sigma[:, :, 0] Smm = Sigma + tf.matmul(mu, tf.transpose(mu)) Kaa = self.Kaa_old + np.eye(Ma) * jitter LSa = tf.cholesky(Saa) LKa = tf.cholesky(Kaa) obj += tf.reduce_sum(tf.log(tf.diag_part(LKa))) obj += - tf.reduce_sum(tf.log(tf.diag_part(LSa))) Sainv_ma = tf.matrix_solve(Saa, ma) obj += -0.5 * tf.reduce_sum(ma * Sainv_ma) obj += tf.reduce_sum(mu * Sainv_ma) Sainv_Smm = tf.matrix_solve(Saa, Smm) Kainv_Smm = tf.matrix_solve(Kaa, Smm) obj += -0.5 * tf.reduce_sum(tf.diag_part(Sainv_Smm) - tf.diag_part(Kainv_Smm)) return obj
Example #10
Source File: operator_test_util.py From deep_image_model with Apache License 2.0 | 5 votes |
def testSqrtSolve(self): # Square roots are not unique, but we should still have # S^{-T} S^{-1} x = A^{-1} x. # In our case, we should have S = S^T, so then S^{-1} S^{-1} x = A^{-1} x. with self.test_session(): for batch_shape in [(), (2, 3,)]: for k in [1, 4]: operator, mat = self._build_operator_and_mat(batch_shape, k) # Work with 5 simultaneous systems. 5 is arbitrary. x = self._rng.randn(*(batch_shape + (k, 5))) self._compare_results( expected=tf.matrix_solve(mat, x).eval(), actual=operator.sqrt_solve(operator.sqrt_solve(x)))
Example #11
Source File: operator_test_util.py From deep_image_model with Apache License 2.0 | 5 votes |
def testSolve(self): with self.test_session(): for batch_shape in [(), (2, 3,)]: for k in [1, 4]: operator, mat = self._build_operator_and_mat(batch_shape, k) # Work with 5 simultaneous systems. 5 is arbitrary. x = self._rng.randn(*(batch_shape + (k, 5))) self._compare_results( expected=tf.matrix_solve(mat, x).eval(), actual=operator.solve(x))
Example #12
Source File: operator_pd_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def _batch_solve(self, rhs): return tf.matrix_solve(self._pos_def_matrix, rhs)
Example #13
Source File: matrix_solve_ls_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testBatchResultSize(self): # 3x3x3 matrices, 3x3x1 right-hand sides. matrix = np.array([1., 2., 3., 4., 5., 6., 7., 8., 9.] * 3).reshape(3, 3, 3) rhs = np.array([1., 2., 3.] * 3).reshape(3, 3, 1) answer = tf.matrix_solve(matrix, rhs) ls_answer = tf.matrix_solve_ls(matrix, rhs) self.assertEqual(ls_answer.get_shape(), [3, 3, 1]) self.assertEqual(answer.get_shape(), [3, 3, 1])
Example #14
Source File: matrix_solve_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testNotInvertible(self): # The input should be invertible. with self.test_session(): with self.assertRaisesOpError("Input matrix is not invertible."): # All rows of the matrix below add to zero matrix = tf.constant([[1., 0., -1.], [-1., 1., 0.], [0., -1., 1.]]) tf.matrix_solve(matrix, matrix).eval()
Example #15
Source File: matrix_solve_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testWrongDimensions(self): # The matrix and right-hand sides should have the same number of rows. with self.test_session(): matrix = tf.constant([[1., 0.], [0., 1.]]) rhs = tf.constant([[1., 0.]]) with self.assertRaises(ValueError): tf.matrix_solve(matrix, rhs)
Example #16
Source File: matrix_solve_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testNonSquareMatrix(self): # When the solve of a non-square matrix is attempted we should return # an error with self.test_session(): with self.assertRaises(ValueError): matrix = tf.constant([[1., 2., 3.], [3., 4., 5.]]) tf.matrix_solve(matrix, matrix)
Example #17
Source File: osgpr.py From streaming_sparse_gp with Apache License 2.0 | 4 votes |
def _build_common_terms(self): Mb = tf.shape(self.Z)[0] Ma = self.M_old # jitter = settings.numerics.jitter_level jitter = 1e-4 sigma2 = self.likelihood.variance sigma = tf.sqrt(sigma2) Saa = self.Su_old ma = self.mu_old # a is old inducing points, b is new # f is training points # s is test points Kbf = self.kern.K(self.Z, self.X) Kbb = self.kern.K(self.Z) + tf.eye(Mb, dtype=float_type) * jitter Kba = self.kern.K(self.Z, self.Z_old) Kaa_cur = self.kern.K(self.Z_old) + tf.eye(Ma, dtype=float_type) * jitter Kaa = self.Kaa_old + tf.eye(Ma, dtype=float_type) * jitter err = self.Y - self.mean_function(self.X) Sainv_ma = tf.matrix_solve(Saa, ma) Sinv_y = self.Y / sigma2 c1 = tf.matmul(Kbf, Sinv_y) c2 = tf.matmul(Kba, Sainv_ma) c = c1 + c2 Lb = tf.cholesky(Kbb) Lbinv_c = tf.matrix_triangular_solve(Lb, c, lower=True) Lbinv_Kba = tf.matrix_triangular_solve(Lb, Kba, lower=True) Lbinv_Kbf = tf.matrix_triangular_solve(Lb, Kbf, lower=True) / sigma d1 = tf.matmul(Lbinv_Kbf, tf.transpose(Lbinv_Kbf)) LSa = tf.cholesky(Saa) Kab_Lbinv = tf.transpose(Lbinv_Kba) LSainv_Kab_Lbinv = tf.matrix_triangular_solve( LSa, Kab_Lbinv, lower=True) d2 = tf.matmul(tf.transpose(LSainv_Kab_Lbinv), LSainv_Kab_Lbinv) La = tf.cholesky(Kaa) Lainv_Kab_Lbinv = tf.matrix_triangular_solve( La, Kab_Lbinv, lower=True) d3 = tf.matmul(tf.transpose(Lainv_Kab_Lbinv), Lainv_Kab_Lbinv) D = tf.eye(Mb, dtype=float_type) + d1 + d2 - d3 D = D + tf.eye(Mb, dtype=float_type) * jitter LD = tf.cholesky(D) LDinv_Lbinv_c = tf.matrix_triangular_solve(LD, Lbinv_c, lower=True) return (Kbf, Kba, Kaa, Kaa_cur, La, Kbb, Lb, D, LD, Lbinv_Kba, LDinv_Lbinv_c, err, d1)
Example #18
Source File: osgpr.py From streaming_sparse_gp with Apache License 2.0 | 4 votes |
def build_likelihood(self): """ Construct a tensorflow function to compute the bound on the marginal likelihood. """ Mb = tf.shape(self.Z)[0] Ma = self.M_old jitter = settings.numerics.jitter_level # jitter = 1e-4 sigma2 = self.likelihood.variance sigma = tf.sqrt(sigma2) N = self.num_data Saa = self.Su_old ma = self.mu_old # a is old inducing points, b is new # f is training points Kfdiag = self.kern.Kdiag(self.X) (Kbf, Kba, Kaa, Kaa_cur, La, Kbb, Lb, D, LD, Lbinv_Kba, LDinv_Lbinv_c, err, Qff) = self._build_common_terms() LSa = tf.cholesky(Saa) Lainv_ma = tf.matrix_triangular_solve(LSa, ma, lower=True) bound = 0 # constant term bound = -0.5 * N * np.log(2 * np.pi) # quadratic term bound += -0.5 * tf.reduce_sum(tf.square(err)) / sigma2 # bound += -0.5 * tf.reduce_sum(ma * Sainv_ma) bound += -0.5 * tf.reduce_sum(tf.square(Lainv_ma)) bound += 0.5 * tf.reduce_sum(tf.square(LDinv_Lbinv_c)) # log det term bound += -0.5 * N * tf.reduce_sum(tf.log(sigma2)) bound += - tf.reduce_sum(tf.log(tf.diag_part(LD))) # delta 1: trace term bound += -0.5 * tf.reduce_sum(Kfdiag) / sigma2 bound += 0.5 * tf.reduce_sum(tf.diag_part(Qff)) # delta 2: a and b difference bound += tf.reduce_sum(tf.log(tf.diag_part(La))) bound += - tf.reduce_sum(tf.log(tf.diag_part(LSa))) Kaadiff = Kaa_cur - tf.matmul(tf.transpose(Lbinv_Kba), Lbinv_Kba) Sainv_Kaadiff = tf.matrix_solve(Saa, Kaadiff) Kainv_Kaadiff = tf.matrix_solve(Kaa, Kaadiff) bound += -0.5 * tf.reduce_sum( tf.diag_part(Sainv_Kaadiff) - tf.diag_part(Kainv_Kaadiff)) return bound
Example #19
Source File: osgpr.py From streaming_sparse_gp with Apache License 2.0 | 4 votes |
def build_likelihood(self): """ Construct a tensorflow function to compute the bound on the marginal likelihood. """ Mb = tf.shape(self.Z)[0] Ma = self.M_old sigma2 = self.likelihood.variance sigma = tf.sqrt(sigma2) alpha = self.alpha Saa = self.Su_old ma = self.mu_old N = self.num_data # a is old inducing points, b is new # f is training points (Kbf, Kba, Kaa, Kaa_cur, LSa, La, Kbb, Lb, D, LD, Lbinv_Kba, LDinv_c, err, Dff, Kaadiff, Sainv_ma, Q, LQ, LM) = self._build_common_terms() Lainv_ma = tf.matrix_triangular_solve(LSa, ma, lower=True) bound = 0 # constant term bound = -0.5 * N * np.log(2 * np.pi) # quadratic term bound += -0.5 * tf.reduce_sum(tf.square(err) / tf.reshape(Dff, [N, 1])) bound += -0.5 * tf.reduce_sum(tf.square(Lainv_ma)) bound += 0.5 * tf.reduce_sum(tf.square(LDinv_c)) ma_Sainv_LM = tf.matmul(tf.transpose(Sainv_ma), LM) Qinv_LM_Sainv_ma = tf.matrix_solve(Q, tf.transpose(ma_Sainv_LM)) bound += 0.5 * alpha * \ tf.reduce_sum(tf.matmul(ma_Sainv_LM, Qinv_LM_Sainv_ma)) # log det term bound += -0.5 * tf.reduce_sum(tf.log(Dff)) bound += - tf.reduce_sum(tf.log(tf.diag_part(LD))) # delta 1: trace-like term bound += - 0.5 * (1 - alpha) / alpha * \ tf.reduce_sum(tf.log(Dff / sigma2)) # delta 2 bound += - 1.0 / alpha * tf.reduce_sum(tf.log(tf.diag_part(LQ))) bound += tf.reduce_sum(tf.log(tf.diag_part(La))) bound += - tf.reduce_sum(tf.log(tf.diag_part(LSa))) return bound