Python tensorflow.matrix_solve_ls() Examples
The following are 13
code examples of tensorflow.matrix_solve_ls().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
![](https://www.programcreek.com/common/static/images/search.png)
Example #1
Source File: geometry_utils.py From SPFN with MIT License | 6 votes |
def guarded_matrix_solve_ls(A, b, W, condition_number_cap=1e5): # Solve weighted least square ||\sqrt(W)(Ax-b)||^2 # A - BxNxD # b - BxNx1 # W - BxN sqrt_W = tf.sqrt(tf.maximum(W, SQRT_EPS)) # BxN A *= tf.expand_dims(sqrt_W, axis=2) # BxNxD b *= tf.expand_dims(sqrt_W, axis=2) # BxNx1 # Compute singular value, trivializing the problem when condition number is too large AtA = tf.matmul(a=A, b=A, transpose_a=True) s, _, _ = [tf.stop_gradient(u) for u in tf.svd(AtA)] # s will be BxD mask = tf.less(s[:, 0] / s[:, -1], condition_number_cap) # B A *= tf.to_float(tf.expand_dims(tf.expand_dims(mask, axis=1), axis=2)) # zero out badly conditioned data x = tf.matrix_solve_ls(A, b, l2_regularizer=LS_L2_REGULARIZER, fast=True) # BxDx1 return tf.squeeze(x, axis=2) # BxD
Example #2
Source File: matrix_solve_ls_op_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def _verifySolve(self, x, y): for np_type in [np.float32, np.float64]: a = x.astype(np_type) b = y.astype(np_type) np_ans, _, _, _ = np.linalg.lstsq(a, b) for fast in [True, False]: with self.test_session(): tf_ans = tf.matrix_solve_ls(a, b, fast=fast) ans = tf_ans.eval() self.assertEqual(np_ans.shape, tf_ans.get_shape()) self.assertEqual(np_ans.shape, ans.shape) # Check residual norm. tf_r = b - BatchMatMul(a, ans) tf_r_norm = np.sum(tf_r * tf_r) np_r = b - BatchMatMul(a, np_ans) np_r_norm = np.sum(np_r * np_r) self.assertAllClose(np_r_norm, tf_r_norm) # Check solution. self.assertAllClose(np_ans, ans, atol=1e-5, rtol=1e-5)
Example #3
Source File: matrix_solve_ls_op_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def _verifyRegularized(self, x, y, l2_regularizer): for np_type in [np.float32, np.float64]: # Test with a single matrix. a = x.astype(np_type) b = y.astype(np_type) np_ans = BatchRegularizedLeastSquares(a, b, l2_regularizer) with self.test_session(): # Test matrix_solve_ls on regular matrices tf_ans = tf.matrix_solve_ls( a, b, l2_regularizer=l2_regularizer, fast=True).eval() self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5) # Test with a 2x3 batch of matrices. a = np.tile(x.astype(np_type), [2, 3, 1, 1]) b = np.tile(y.astype(np_type), [2, 3, 1, 1]) np_ans = BatchRegularizedLeastSquares(a, b, l2_regularizer) with self.test_session(): tf_ans = tf.matrix_solve_ls( a, b, l2_regularizer=l2_regularizer, fast=True).eval() self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5)
Example #4
Source File: tf_image.py From burst-denoising with Apache License 2.0 | 6 votes |
def solve_convolve(noisy, truth, final_K, excl_edges=False): kpad = final_K//2 ch = noisy.get_shape().as_list()[-1] ch1 = truth.get_shape().as_list()[-1] sh = tf.shape(noisy) h, w = sh[1], sh[2] img_stack = [] noisy = tf.pad(noisy, [[0,0],[kpad,kpad],[kpad,kpad],[0,0]]) for i in range(final_K): for j in range(final_K): img_stack.append(noisy[:, i:h+i, j:w+j, :]) img_stack = tf.stack(img_stack, axis=-2) is0 = img_stack if excl_edges: img_stack = img_stack[:, kpad:-kpad, kpad:-kpad, :] truth = truth[:, kpad:-kpad, kpad:-kpad] h = h - 2*kpad w = w - 2*kpad A = tf.reshape(img_stack, [tf.shape(img_stack)[0], h*w, final_K**2 * ch]) b = tf.reshape(truth, [tf.shape(truth)[0], h*w, ch1]) x_ = tf.matrix_solve_ls(A, b, fast=False) x = tf.reshape(x_, [tf.shape(truth)[0], final_K, final_K, ch, ch1]) return x
Example #5
Source File: model.py From rgn with MIT License | 5 votes |
def _curriculum(config, step, loss_history, dependency_ops): """ Creates TF ops for maintaining and advancing the curriculum. """ # assign appropriate curriculum increment value for case in switch(config['behavior']): if case('fixed_rate'): # fixed rate, always return same number increment = tf.constant(config['rate'], name='curriculum_increment') elif case('loss_threshold'): # return fixed increment if last loss is below threshold, zero otherwise increment_pred = tf.less(loss_history[-1], config['threshold'], name='curriculum_predicate') full_increment_func = lambda: tf.constant(config['rate'], name='full_curriculum_increment') zero_increment_func = lambda: tf.constant(0.0, name='zero_curriculum_increment') increment = tf.cond(increment_pred, full_increment_func, zero_increment_func) elif case('loss_change'): # predicate for increment type increment_pred = tf.not_equal(loss_history[0], DUMMY_LOSS, name='curriculum_predicate') # increment function for when loss history is still def full_increment_func(): lin_seq = tf.expand_dims(tf.linspace(0., 1., config['change_num_iterations']), 1) ls_matrix = tf.concat([tf.ones_like(lin_seq), lin_seq], 1) ls_rhs = tf.expand_dims(loss_history, 1) ls_slope = tf.matrix_solve_ls(ls_matrix, ls_rhs)[1, 0] full_increment = tf.div(config['rate'], tf.pow(tf.abs(ls_slope) + 1, config['sharpness']), name='full_curriculum_increment') return full_increment # dummy increment function for when loss history is changing rapidly zero_increment_func = lambda: tf.constant(0.0, name='zero_curriculum_increment') # final conditional increment increment = tf.cond(increment_pred, full_increment_func, zero_increment_func) # create updating op. the semantics are such that training / gradient update is first performed before the curriculum is incremented. with tf.control_dependencies(dependency_ops): update_op = tf.assign_add(step, increment, name='update_curriculum_op') return update_op
Example #6
Source File: ops.py From tfdeploy with MIT License | 5 votes |
def test_MatrixSolveLs(self): t = tf.matrix_solve_ls(*self.random((2, 3, 3, 3), (2, 3, 3, 1))) self.check(t)
Example #7
Source File: matrix_solve_ls_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def _verifySolveBatch(self, x, y): # Since numpy.linalg.lsqr does not support batch solves, as opposed # to numpy.linalg.solve, we just perform this test for a fixed batch size # of 2x3. for np_type in [np.float32, np.float64]: a = np.tile(x.astype(np_type), [2, 3, 1, 1]) b = np.tile(y.astype(np_type), [2, 3, 1, 1]) np_ans = np.empty([2, 3, a.shape[-1], b.shape[-1]]) for dim1 in range(2): for dim2 in range(3): np_ans[dim1, dim2, :, :], _, _, _ = np.linalg.lstsq( a[dim1, dim2, :, :], b[dim1, dim2, :, :]) for fast in [True, False]: with self.test_session(): tf_ans = tf.matrix_solve_ls(a, b, fast=fast).eval() self.assertEqual(np_ans.shape, tf_ans.shape) # Check residual norm. tf_r = b - BatchMatMul(a, tf_ans) tf_r_norm = np.sum(tf_r * tf_r) np_r = b - BatchMatMul(a, np_ans) np_r_norm = np.sum(np_r * np_r) self.assertAllClose(np_r_norm, tf_r_norm) # Check solution. if fast or a.shape[-2] >= a.shape[-1]: # We skip this test for the underdetermined case when using the # slow path, because Eigen does not return a minimum norm solution. # TODO(rmlarsen): Enable this check for all paths if/when we fix # Eigen's solver. self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5)
Example #8
Source File: matrix_solve_ls_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testWrongDimensions(self): # The matrix and right-hand sides should have the same number of rows. with self.test_session(): matrix = tf.constant([[1., 0.], [0., 1.]]) rhs = tf.constant([[1., 0.]]) with self.assertRaises(ValueError): tf.matrix_solve_ls(matrix, rhs)
Example #9
Source File: matrix_solve_ls_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testEmpty(self): full = np.array([[1., 2.], [3., 4.], [5., 6.]]) empty0 = np.empty([3, 0]) empty1 = np.empty([0, 2]) for fast in [True, False]: with self.test_session(): tf_ans = tf.matrix_solve_ls(empty0, empty0, fast=fast).eval() self.assertEqual(tf_ans.shape, (0, 0)) tf_ans = tf.matrix_solve_ls(empty0, full, fast=fast).eval() self.assertEqual(tf_ans.shape, (0, 2)) tf_ans = tf.matrix_solve_ls(full, empty0, fast=fast).eval() self.assertEqual(tf_ans.shape, (2, 0)) tf_ans = tf.matrix_solve_ls(empty1, empty1, fast=fast).eval() self.assertEqual(tf_ans.shape, (2, 2))
Example #10
Source File: data.py From inverse-compositional-STN with MIT License | 5 votes |
def genPerturbations(opt): with tf.name_scope("genPerturbations"): X = np.tile(opt.canon4pts[:,0],[opt.batchSize,1]) Y = np.tile(opt.canon4pts[:,1],[opt.batchSize,1]) dX = tf.random_normal([opt.batchSize,4])*opt.pertScale \ +tf.random_normal([opt.batchSize,1])*opt.transScale dY = tf.random_normal([opt.batchSize,4])*opt.pertScale \ +tf.random_normal([opt.batchSize,1])*opt.transScale O = np.zeros([opt.batchSize,4],dtype=np.float32) I = np.ones([opt.batchSize,4],dtype=np.float32) # fit warp parameters to generated displacements if opt.warpType=="homography": A = tf.concat([tf.stack([X,Y,I,O,O,O,-X*(X+dX),-Y*(X+dX)],axis=-1), tf.stack([O,O,O,X,Y,I,-X*(Y+dY),-Y*(Y+dY)],axis=-1)],1) b = tf.expand_dims(tf.concat([X+dX,Y+dY],1),-1) pPert = tf.matrix_solve(A,b)[:,:,0] pPert -= tf.to_float([[1,0,0,0,1,0,0,0]]) else: if opt.warpType=="translation": J = np.concatenate([np.stack([I,O],axis=-1), np.stack([O,I],axis=-1)],axis=1) if opt.warpType=="similarity": J = np.concatenate([np.stack([X,Y,I,O],axis=-1), np.stack([-Y,X,O,I],axis=-1)],axis=1) if opt.warpType=="affine": J = np.concatenate([np.stack([X,Y,I,O,O,O],axis=-1), np.stack([O,O,O,X,Y,I],axis=-1)],axis=1) dXY = tf.expand_dims(tf.concat([dX,dY],1),-1) pPert = tf.matrix_solve_ls(J,dXY)[:,:,0] return pPert # make training batch
Example #11
Source File: data.py From inverse-compositional-STN with MIT License | 5 votes |
def genPerturbations(opt): with tf.name_scope("genPerturbations"): X = np.tile(opt.canon4pts[:,0],[opt.batchSize,1]) Y = np.tile(opt.canon4pts[:,1],[opt.batchSize,1]) dX = tf.random_normal([opt.batchSize,4])*opt.pertScale \ +tf.random_normal([opt.batchSize,1])*opt.transScale dY = tf.random_normal([opt.batchSize,4])*opt.pertScale \ +tf.random_normal([opt.batchSize,1])*opt.transScale O = np.zeros([opt.batchSize,4],dtype=np.float32) I = np.ones([opt.batchSize,4],dtype=np.float32) # fit warp parameters to generated displacements if opt.warpType=="homography": A = tf.concat([tf.stack([X,Y,I,O,O,O,-X*(X+dX),-Y*(X+dX)],axis=-1), tf.stack([O,O,O,X,Y,I,-X*(Y+dY),-Y*(Y+dY)],axis=-1)],1) b = tf.expand_dims(tf.concat([X+dX,Y+dY],1),-1) pPert = tf.matrix_solve(A,b)[:,:,0] pPert -= tf.to_float([[1,0,0,0,1,0,0,0]]) else: if opt.warpType=="translation": J = np.concatenate([np.stack([I,O],axis=-1), np.stack([O,I],axis=-1)],axis=1) if opt.warpType=="similarity": J = np.concatenate([np.stack([X,Y,I,O],axis=-1), np.stack([-Y,X,O,I],axis=-1)],axis=1) if opt.warpType=="affine": J = np.concatenate([np.stack([X,Y,I,O,O,O],axis=-1), np.stack([O,O,O,X,Y,I],axis=-1)],axis=1) dXY = tf.expand_dims(tf.concat([dX,dY],1),-1) pPert = tf.matrix_solve_ls(J,dXY)[:,:,0] return pPert # make training batch
Example #12
Source File: stneuronet.py From STNeuroNet with Apache License 2.0 | 5 votes |
def random_transform(self,batch_size): if self._transform is None: corners = [[[-1.,-1.,-1.],[-1.,-1.,1.],[-1.,1.,-1.],[-1.,1.,1.],[1.,-1.,-1.],[1.,-1.,1.],[1.,1.,-1.],[1.,1.,1.]]] corners = tf.tile(corners,[batch_size,1,1]) corners2 = corners * \ (1-tf.random_uniform([batch_size,8,3],0,self.scale)) corners_homog = tf.concat([corners,tf.ones([batch_size,8,1])],2) corners2_homog = tf.concat([corners2,tf.ones([batch_size,8,1])],2) _transform = tf.matrix_solve_ls(corners_homog,corners2_homog) self._transform = tf.transpose(_transform,[0,2,1]) return self._transform
Example #13
Source File: memory.py From dynamic-kanerva-machines with Apache License 2.0 | 5 votes |
def _solve_w_mean(self, new_z_mean, M): """Minimise the conditional KL-divergence between z wrt w.""" w_matrix = tf.matmul(M, M, transpose_b=True) w_rhs = tf.einsum('bmc,sbc->bms', M, new_z_mean) w_mean = tf.matrix_solve_ls( matrix=w_matrix, rhs=w_rhs, l2_regularizer=self._obs_noise_stddev**2 / self._w_prior_stddev**2) w_mean = tf.einsum('bms->sbm', w_mean) return w_mean