Python tensorflow.svd() Examples
The following are 30
code examples of tensorflow.svd().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: MleGenerator.py From Texygen with MIT License | 6 votes |
def set_similarity(self, valid_examples=None, pca=True): if valid_examples == None: if pca: valid_examples = np.array(range(20)) else: valid_examples = np.array(range(self.num_vocabulary)) self.valid_dataset = tf.constant(valid_examples, dtype=tf.int32) self.norm = tf.sqrt(tf.reduce_sum(tf.square(self.g_embeddings), 1, keep_dims=True)) self.normalized_embeddings = self.g_embeddings / self.norm # PCA if self.num_vocabulary >= 20 and pca == True: emb = tf.matmul(self.normalized_embeddings, tf.transpose(self.normalized_embeddings)) s, u, v = tf.svd(emb) u_r = tf.strided_slice(u, begin=[0, 0], end=[20, self.num_vocabulary], strides=[1, 1]) self.normalized_embeddings = tf.matmul(u_r, self.normalized_embeddings) self.valid_embeddings = tf.nn.embedding_lookup( self.normalized_embeddings, self.valid_dataset) self.similarity = tf.matmul(self.valid_embeddings, tf.transpose(self.normalized_embeddings))
Example #2
Source File: geometry_utils.py From SPFN with MIT License | 6 votes |
def guarded_matrix_solve_ls(A, b, W, condition_number_cap=1e5): # Solve weighted least square ||\sqrt(W)(Ax-b)||^2 # A - BxNxD # b - BxNx1 # W - BxN sqrt_W = tf.sqrt(tf.maximum(W, SQRT_EPS)) # BxN A *= tf.expand_dims(sqrt_W, axis=2) # BxNxD b *= tf.expand_dims(sqrt_W, axis=2) # BxNx1 # Compute singular value, trivializing the problem when condition number is too large AtA = tf.matmul(a=A, b=A, transpose_a=True) s, _, _ = [tf.stop_gradient(u) for u in tf.svd(AtA)] # s will be BxD mask = tf.less(s[:, 0] / s[:, -1], condition_number_cap) # B A *= tf.to_float(tf.expand_dims(tf.expand_dims(mask, axis=1), axis=2)) # zero out badly conditioned data x = tf.matrix_solve_ls(A, b, l2_regularizer=LS_L2_REGULARIZER, fast=True) # BxDx1 return tf.squeeze(x, axis=2) # BxD
Example #3
Source File: svp.py From BERT with Apache License 2.0 | 6 votes |
def SVD(X, n, name = None): with tf.variable_scope(name): sz = X.get_shape().as_list() if len(sz)>2: x = tf.reshape(X,[-1,sz[1]*sz[2],sz[3]]) n = min(n, sz[1]*sz[2], sz[3]) else: x = tf.expand_dims(X, 1) n = 1 with tf.device('CPU'): g = tf.get_default_graph() with g.gradient_override_map({"Svd": "Svd_"}): s,u,v = tf.svd(x,full_matrices=False) s = removenan(s) v = removenan(v) u = removenan(u) s = tf.nn.l2_normalize(tf.slice(s,[0,0],[-1,n]),1) U = tf.nn.l2_normalize(tf.slice(u,[0,0,0],[-1,-1,n]),1) V = tf.nn.l2_normalize(tf.slice(v,[0,0,0],[-1,-1,n]),1) return s, U, V
Example #4
Source File: tfops.py From glow with MIT License | 6 votes |
def _symmetric_matrix_square_root(mat, eps=1e-10): """Compute square root of a symmetric matrix. Note that this is different from an elementwise square root. We want to compute M' where M' = sqrt(mat) such that M' * M' = mat. Also note that this method **only** works for symmetric matrices. Args: mat: Matrix to take the square root of. eps: Small epsilon such that any element less than eps will not be square rooted to guard against numerical instability. Returns: Matrix square root of mat. """ # Unlike numpy, tensorflow's return order is (s, u, v) s, u, v = tf.svd(mat) # sqrt is unstable around 0, just use 0 in such case si = tf.where(tf.less(s, eps), s, tf.sqrt(s)) # Note that the v returned by Tensorflow is v = V # (when referencing the equation A = U S V^T) # This is unlike Numpy which returns v = V^T return tf.matmul( tf.matmul(u, tf.diag(si)), v, transpose_b=True)
Example #5
Source File: functions.py From safe_learning with MIT License | 6 votes |
def lipschitz(self): """Return the Lipschitz constant as a Tensor. This assumes that only contractive nonlinearities are used! Examples are ReLUs and Sigmoids. Returns ------- lipschitz : Tensor The Lipschitz constant of the neural network. """ lipschitz = tf.constant(1, config.dtype) for W, b in self._parameter_iter(): # lipschitz *= tf.reduce_max(tf.svd(W, compute_uv=False)) lipschitz *= tf.reduce_max(self._svd(W)) return lipschitz
Example #6
Source File: functions.py From safe_learning with MIT License | 6 votes |
def _svd(A, name=None): """Tensorflow svd with gradient. Parameters ---------- A : Tensor The matrix for which to compute singular values. name : string, optional Returns ------- s : Tensor The singular values of A. """ S0, U0, V0 = map(tf.stop_gradient, tf.svd(A, full_matrices=True, name=name)) # A = U * S * V.T # S = inv(U) * A * inv(V.T) = U.T * A * V (orthogonal matrices) S = tf.matmul(U0, tf.matmul(A, V0), transpose_a=True) return tf.matrix_diag_part(S)
Example #7
Source File: layers.py From calc2.0 with Apache License 2.0 | 6 votes |
def estimate_hom(src, dst): rx = src[:,:,0:1] ry = src[:,:,1:2] x = dst[:,:,0:1] y = dst[:,:,1:2] num_batch = tf.shape(src)[0] num_pts = tf.shape(src)[1] _0 = tf.zeros([num_batch, num_pts, 3]) _1 = tf.ones([num_batch, num_pts, 1]) A_even_rows = tf.concat([-rx, -ry, -_1, _0, rx*x, ry*x, x], axis=-1) A_odd_rows = tf.concat([_0, -rx, -ry, -_1, rx*y, ry*y, y], axis=-1) A = tf.concat([A_even_rows, A_odd_rows], axis=-1) A = tf.reshape(A, [num_batch, 2*num_pts, 9]) _, _, V = tf.svd(A, full_matrices=True) return tf.reshape(V[:,:,-1], [num_batch, 3, 3])
Example #8
Source File: tfops.py From pix2pix-flow with MIT License | 6 votes |
def _symmetric_matrix_square_root(mat, eps=1e-10): """Compute square root of a symmetric matrix. Note that this is different from an elementwise square root. We want to compute M' where M' = sqrt(mat) such that M' * M' = mat. Also note that this method **only** works for symmetric matrices. Args: mat: Matrix to take the square root of. eps: Small epsilon such that any element less than eps will not be square rooted to guard against numerical instability. Returns: Matrix square root of mat. """ # Unlike numpy, tensorflow's return order is (s, u, v) s, u, v = tf.svd(mat) # sqrt is unstable around 0, just use 0 in such case si = tf.where(tf.less(s, eps), s, tf.sqrt(s)) # Note that the v returned by Tensorflow is v = V # (when referencing the equation A = U S V^T) # This is unlike Numpy which returns v = V^T return tf.matmul( tf.matmul(u, tf.diag(si)), v, transpose_b=True)
Example #9
Source File: TextganGenerator.py From Texygen with MIT License | 6 votes |
def set_similarity(self, valid_examples=None, pca=True): if valid_examples == None: if pca: valid_examples = np.array(range(20)) else: valid_examples = np.array(range(self.num_vocabulary)) self.valid_dataset = tf.constant(valid_examples, dtype=tf.int32) self.norm = tf.sqrt(tf.reduce_sum(tf.square(self.g_embeddings), 1, keep_dims=True)) self.normalized_embeddings = self.g_embeddings / self.norm # PCA if self.num_vocabulary >= 20 and pca == True: emb = tf.matmul(self.normalized_embeddings, tf.transpose(self.normalized_embeddings)) s, u, v = tf.svd(emb) u_r = tf.strided_slice(u, begin=[0, 0], end=[20, self.num_vocabulary], strides=[1, 1]) self.normalized_embeddings = tf.matmul(u_r, self.normalized_embeddings) self.valid_embeddings = tf.nn.embedding_lookup( self.normalized_embeddings, self.valid_dataset) self.similarity = tf.matmul(self.valid_embeddings, tf.transpose(self.normalized_embeddings))
Example #10
Source File: RankganGenerator.py From Texygen with MIT License | 6 votes |
def set_similarity(self, valid_examples=None, pca=True): if valid_examples == None: if pca: valid_examples = np.array(range(20)) else: valid_examples = np.array(range(self.num_vocabulary)) self.valid_dataset = tf.constant(valid_examples, dtype=tf.int32) self.norm = tf.sqrt(tf.reduce_sum(tf.square(self.g_embeddings), 1, keep_dims=True)) self.normalized_embeddings = self.g_embeddings / self.norm # PCA if self.num_vocabulary >= 20 and pca == True: emb = tf.matmul(self.normalized_embeddings, tf.transpose(self.normalized_embeddings)) s, u, v = tf.svd(emb) u_r = tf.strided_slice(u, begin=[0, 0], end=[20, self.num_vocabulary], strides=[1, 1]) self.normalized_embeddings = tf.matmul(u_r, self.normalized_embeddings) self.valid_embeddings = tf.nn.embedding_lookup( self.normalized_embeddings, self.valid_dataset) self.similarity = tf.matmul(self.valid_embeddings, tf.transpose(self.normalized_embeddings))
Example #11
Source File: OracleGru.py From Texygen with MIT License | 6 votes |
def set_similarity(self, valid_examples=None, pca=True): if valid_examples == None: if pca: valid_examples = np.array(range(20)) else: valid_examples = np.array(range(self.num_vocabulary)) self.valid_dataset = tf.constant(valid_examples, dtype=tf.int32) self.norm = tf.sqrt(tf.reduce_sum(tf.square(self.g_embeddings), 1, keep_dims=True)) self.normalized_embeddings = self.g_embeddings / self.norm # PCA if self.num_vocabulary >= 20 and pca == True: emb = tf.matmul(self.normalized_embeddings, tf.transpose(self.normalized_embeddings)) s, u, v = tf.svd(emb) u_r = tf.strided_slice(u, begin=[0, 0], end=[20, self.num_vocabulary], strides=[1, 1]) self.normalized_embeddings = tf.matmul(u_r, self.normalized_embeddings) self.valid_embeddings = tf.nn.embedding_lookup( self.normalized_embeddings, self.valid_dataset) self.similarity = tf.matmul(self.valid_embeddings, tf.transpose(self.normalized_embeddings))
Example #12
Source File: OracleSru.py From Texygen with MIT License | 6 votes |
def set_similarity(self, valid_examples=None, pca=True): if valid_examples == None: if pca: valid_examples = np.array(range(20)) else: valid_examples = np.array(range(self.num_vocabulary)) self.valid_dataset = tf.constant(valid_examples, dtype=tf.int32) self.norm = tf.sqrt(tf.reduce_sum(tf.square(self.g_embeddings), 1, keep_dims=True)) self.normalized_embeddings = self.g_embeddings / self.norm # PCA if self.num_vocabulary >= 20 and pca == True: emb = tf.matmul(self.normalized_embeddings, tf.transpose(self.normalized_embeddings)) s, u, v = tf.svd(emb) u_r = tf.strided_slice(u, begin=[0, 0], end=[20, self.num_vocabulary], strides=[1, 1]) self.normalized_embeddings = tf.matmul(u_r, self.normalized_embeddings) self.valid_embeddings = tf.nn.embedding_lookup( self.normalized_embeddings, self.valid_dataset) self.similarity = tf.matmul(self.valid_embeddings, tf.transpose(self.normalized_embeddings))
Example #13
Source File: OracleLstm.py From Texygen with MIT License | 6 votes |
def set_similarity(self, valid_examples=None, pca=True): if valid_examples == None: if pca: valid_examples = np.array(range(20)) else: valid_examples = np.array(range(self.num_vocabulary)) self.valid_dataset = tf.constant(valid_examples, dtype=tf.int32) self.norm = tf.sqrt(tf.reduce_sum(tf.square(self.g_embeddings), 1, keep_dims=True)) self.normalized_embeddings = self.g_embeddings / self.norm # PCA if self.num_vocabulary >= 20 and pca == True: emb = tf.matmul(self.normalized_embeddings, tf.transpose(self.normalized_embeddings)) s, u, v = tf.svd(emb) u_r = tf.strided_slice(u, begin=[0, 0], end=[20, self.num_vocabulary], strides=[1, 1]) self.normalized_embeddings = tf.matmul(u_r, self.normalized_embeddings) self.valid_embeddings = tf.nn.embedding_lookup( self.normalized_embeddings, self.valid_dataset) self.similarity = tf.matmul(self.valid_embeddings, tf.transpose(self.normalized_embeddings))
Example #14
Source File: OracleLstm.py From Texygen with MIT License | 6 votes |
def set_similarity(self, valid_examples=None, pca=True): if valid_examples == None: if pca: valid_examples = np.array(range(20)) else: valid_examples = np.array(range(self.num_vocabulary)) self.valid_dataset = tf.constant(valid_examples, dtype=tf.int32) self.norm = tf.sqrt(tf.reduce_sum(tf.square(self.g_embeddings), 1, keep_dims=True)) self.normalized_embeddings = self.g_embeddings / self.norm # PCA if self.num_vocabulary >= 20 and pca == True: emb = tf.matmul(self.normalized_embeddings, tf.transpose(self.normalized_embeddings)) s, u, v = tf.svd(emb) u_r = tf.strided_slice(u, begin=[0, 0], end=[20, self.num_vocabulary], strides=[1, 1]) self.normalized_embeddings = tf.matmul(u_r, self.normalized_embeddings) self.valid_embeddings = tf.nn.embedding_lookup( self.normalized_embeddings, self.valid_dataset) self.similarity = tf.matmul(self.valid_embeddings, tf.transpose(self.normalized_embeddings))
Example #15
Source File: GsganGenerator.py From Texygen with MIT License | 6 votes |
def set_similarity(self, valid_examples=None, pca=True): one_hot = tf.constant(np.eye(self.num_vocabulary)) if valid_examples == None: if pca: valid_examples = np.array(range(20)) else: valid_examples = np.array(range(self.num_vocabulary)) self.valid_dataset = tf.constant(valid_examples, dtype=tf.int32) self.norm = tf.sqrt(tf.reduce_sum(tf.square(one_hot), 1, keep_dims=True)) self.normalized_embeddings = one_hot / self.norm # PCA if self.num_vocabulary >= 20 and pca == True: emb = tf.matmul(self.normalized_embeddings, tf.transpose(self.normalized_embeddings)) s, u, v = tf.svd(emb) u_r = tf.strided_slice(u, begin=[0, 0], end=[20, self.num_vocabulary], strides=[1, 1]) self.normalized_embeddings = tf.matmul(u_r, self.normalized_embeddings) self.valid_embeddings = tf.nn.embedding_lookup( self.normalized_embeddings, self.valid_dataset) self.similarity = tf.matmul(self.valid_embeddings, tf.transpose(self.normalized_embeddings))
Example #16
Source File: layer_func.py From MMD-GAN with Apache License 2.0 | 6 votes |
def spectral_norm_variable_initializer(shape, dtype=tf.float32, partition_info=None): """ This function provides customized initializer for tf.get_variable() :param shape: :param dtype: :param partition_info: this is required by tf.layers, but ignored in many tf.initializer. Here we ignore it. :return: """ variable = tf.random_normal(shape=shape, stddev=1.0, dtype=dtype) if len(shape) > 2: var_reshaped = tf.reshape(variable, shape=[-1, shape[-1]]) sigma = tf.svd(var_reshaped, full_matrices=False, compute_uv=False)[0] else: sigma = tf.svd(variable, full_matrices=False, compute_uv=False)[0] return variable / (sigma + FLAGS.EPSI) ########################################################################
Example #17
Source File: PgbleuGenerator.py From Texygen with MIT License | 6 votes |
def set_similarity(self, valid_examples=None, pca=True): if valid_examples == None: if pca: valid_examples = np.array(range(20)) else: valid_examples = np.array(range(self.num_vocabulary)) self.valid_dataset = tf.constant(valid_examples, dtype=tf.int32) self.norm = tf.sqrt(tf.reduce_sum(tf.square(self.g_embeddings), 1, keep_dims=True)) self.normalized_embeddings = self.g_embeddings / self.norm # PCA if self.num_vocabulary >= 20 and pca == True: emb = tf.matmul(self.normalized_embeddings, tf.transpose(self.normalized_embeddings)) s, u, v = tf.svd(emb) u_r = tf.strided_slice(u, begin=[0, 0], end=[20, self.num_vocabulary], strides=[1, 1]) self.normalized_embeddings = tf.matmul(u_r, self.normalized_embeddings) self.valid_embeddings = tf.nn.embedding_lookup( self.normalized_embeddings, self.valid_dataset) self.similarity = tf.matmul(self.valid_embeddings, tf.transpose(self.normalized_embeddings))
Example #18
Source File: LeakganGenerator.py From Texygen with MIT License | 6 votes |
def set_similarity(self, valid_examples=None, pca=True): if valid_examples == None: if pca: valid_examples = np.array(range(20)) else: valid_examples = np.array(range(self.num_vocabulary)) self.valid_dataset = tf.constant(valid_examples, dtype=tf.int32) self.norm = tf.sqrt(tf.reduce_sum(tf.square(self.g_embeddings), 1, keep_dims=True)) self.normalized_embeddings = self.g_embeddings / self.norm # PCA if self.num_vocabulary >= 20 and pca == True: emb = tf.matmul(self.normalized_embeddings, tf.transpose(self.normalized_embeddings)) s, u, v = tf.svd(emb) u_r = tf.strided_slice(u, begin=[0, 0], end=[20, self.num_vocabulary], strides=[1, 1]) self.normalized_embeddings = tf.matmul(u_r, self.normalized_embeddings) self.valid_embeddings = tf.nn.embedding_lookup( self.normalized_embeddings, self.valid_dataset) self.similarity = tf.matmul(self.valid_embeddings, tf.transpose(self.normalized_embeddings))
Example #19
Source File: sparse_covariance.py From tf-example-models with Apache License 2.0 | 6 votes |
def get_value_updater(self, data, new_mean, gamma_weighted, gamma_sum): tf_new_differences = tf.subtract(data, tf.expand_dims(new_mean, 0)) tf_sq_dist_matrix = tf.matmul(tf.expand_dims(tf_new_differences, 2), tf.expand_dims(tf_new_differences, 1)) tf_new_covariance = tf.reduce_sum(tf_sq_dist_matrix * tf.expand_dims(tf.expand_dims(gamma_weighted, 1), 2), 0) if self.has_prior: tf_new_covariance = self.get_prior_adjustment(tf_new_covariance, gamma_sum) tf_s, tf_u, _ = tf.svd(tf_new_covariance) tf_required_eigvals = tf_s[:self.rank] tf_required_eigvecs = tf_u[:, :self.rank] tf_new_baseline = (tf.trace(tf_new_covariance) - tf.reduce_sum(tf_required_eigvals)) / self.tf_rest tf_new_eigvals = tf_required_eigvals - tf_new_baseline tf_new_eigvecs = tf.transpose(tf_required_eigvecs) return tf.group( self.tf_baseline.assign(tf_new_baseline), self.tf_eigvals.assign(tf_new_eigvals), self.tf_eigvecs.assign(tf_new_eigvecs) )
Example #20
Source File: skip_thoughts_model.py From text_embedding with MIT License | 5 votes |
def random_orthonormal_initializer(shape, dtype=tf.float32, partition_info=None): # pylint: disable=unused-argument """Variable initializer that produces a random orthonormal matrix.""" if len(shape) != 2 or shape[0] != shape[1]: raise ValueError("Expecting square shape, got %s" % shape) _, u, _ = tf.svd(tf.random_normal(shape, dtype=dtype), full_matrices=True) return u
Example #21
Source File: main.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def estimate_rotation(xyz0, xyz1, pconf, noise): """Estimates the rotation between two sets of keypoints. The rotation is estimated by first subtracting mean from each set of keypoints and computing SVD of the covariance matrix. Args: xyz0: [batch, num_kp, 3] The first set of keypoints. xyz1: [batch, num_kp, 3] The second set of keypoints. pconf: [batch, num_kp] The weights used to compute the rotation estimate. noise: A number indicating the noise added to the keypoints. Returns: [batch, 3, 3] A batch of transposed 3 x 3 rotation matrices. """ xyz0 += tf.random_normal(tf.shape(xyz0), mean=0, stddev=noise) xyz1 += tf.random_normal(tf.shape(xyz1), mean=0, stddev=noise) pconf2 = tf.expand_dims(pconf, 2) cen0 = tf.reduce_sum(xyz0 * pconf2, 1, keepdims=True) cen1 = tf.reduce_sum(xyz1 * pconf2, 1, keepdims=True) x = xyz0 - cen0 y = xyz1 - cen1 cov = tf.matmul(tf.matmul(x, tf.matrix_diag(pconf), transpose_a=True), y) _, u, v = tf.svd(cov, full_matrices=True) d = tf.matrix_determinant(tf.matmul(v, u, transpose_b=True)) ud = tf.concat( [u[:, :, :-1], u[:, :, -1:] * tf.expand_dims(tf.expand_dims(d, 1), 1)], axis=2) return tf.matmul(ud, v, transpose_b=True)
Example #22
Source File: main.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def estimate_rotation(xyz0, xyz1, pconf, noise): """Estimates the rotation between two sets of keypoints. The rotation is estimated by first subtracting mean from each set of keypoints and computing SVD of the covariance matrix. Args: xyz0: [batch, num_kp, 3] The first set of keypoints. xyz1: [batch, num_kp, 3] The second set of keypoints. pconf: [batch, num_kp] The weights used to compute the rotation estimate. noise: A number indicating the noise added to the keypoints. Returns: [batch, 3, 3] A batch of transposed 3 x 3 rotation matrices. """ xyz0 += tf.random_normal(tf.shape(xyz0), mean=0, stddev=noise) xyz1 += tf.random_normal(tf.shape(xyz1), mean=0, stddev=noise) pconf2 = tf.expand_dims(pconf, 2) cen0 = tf.reduce_sum(xyz0 * pconf2, 1, keepdims=True) cen1 = tf.reduce_sum(xyz1 * pconf2, 1, keepdims=True) x = xyz0 - cen0 y = xyz1 - cen1 cov = tf.matmul(tf.matmul(x, tf.matrix_diag(pconf), transpose_a=True), y) _, u, v = tf.svd(cov, full_matrices=True) d = tf.matrix_determinant(tf.matmul(v, u, transpose_b=True)) ud = tf.concat( [u[:, :, :-1], u[:, :, -1:] * tf.expand_dims(tf.expand_dims(d, 1), 1)], axis=2) return tf.matmul(ud, v, transpose_b=True)
Example #23
Source File: ortho_gru_cell.py From neuralmonkey with BSD 3-Clause "New" or "Revised" License | 5 votes |
def orthogonal_initializer(): """Return an orthogonal initializer. Random orthogonal matrix is byproduct of singular value decomposition applied on a matrix initialized with normal distribution. The initializer works with 2D square matrices and matrices that can be splitted along axis 1 to several 2D matrices. In the latter case, each submatrix is initialized independently and the resulting orthogonal matrices are concatenated along axis 1. Note this is a higher order function in order to mimic the tensorflow initializer API. """ # pylint: disable=unused-argument def func(shape, dtype, partition_info=None): if len(shape) != 2: raise ValueError( "Orthogonal initializer only works with 2D matrices.") if shape[1] % shape[0] != 0: raise ValueError("Shape {} is not compatible with orthogonal " "initializer.".format(str(shape))) mult = int(shape[1] / shape[0]) dim = shape[0] orthogonals = [] for _ in range(mult): matrix = tf.random_normal([dim, dim], dtype=dtype) orthogonals.append(tf.svd(matrix)[1]) return tf.concat(orthogonals, 1) # pylint: enable=unused-argument return func # pylint: disable=too-few-public-methods
Example #24
Source File: skip_thoughts_model.py From HumanRecognition with MIT License | 5 votes |
def random_orthonormal_initializer(shape, dtype=tf.float32, partition_info=None): # pylint: disable=unused-argument """Variable initializer that produces a random orthonormal matrix.""" if len(shape) != 2 or shape[0] != shape[1]: raise ValueError("Expecting square shape, got %s" % shape) _, u, _ = tf.svd(tf.random_normal(shape, dtype=dtype), full_matrices=True) return u
Example #25
Source File: skip_thoughts_model.py From Gun-Detector with Apache License 2.0 | 5 votes |
def random_orthonormal_initializer(shape, dtype=tf.float32, partition_info=None): # pylint: disable=unused-argument """Variable initializer that produces a random orthonormal matrix.""" if len(shape) != 2 or shape[0] != shape[1]: raise ValueError("Expecting square shape, got %s" % shape) _, u, _ = tf.svd(tf.random_normal(shape, dtype=dtype), full_matrices=True) return u
Example #26
Source File: skip_thoughts_model.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def random_orthonormal_initializer(shape, dtype=tf.float32, partition_info=None): # pylint: disable=unused-argument """Variable initializer that produces a random orthonormal matrix.""" if len(shape) != 2 or shape[0] != shape[1]: raise ValueError("Expecting square shape, got %s" % shape) _, u, _ = tf.svd(tf.random_normal(shape, dtype=dtype), full_matrices=True) return u
Example #27
Source File: main.py From models with Apache License 2.0 | 5 votes |
def estimate_rotation(xyz0, xyz1, pconf, noise): """Estimates the rotation between two sets of keypoints. The rotation is estimated by first subtracting mean from each set of keypoints and computing SVD of the covariance matrix. Args: xyz0: [batch, num_kp, 3] The first set of keypoints. xyz1: [batch, num_kp, 3] The second set of keypoints. pconf: [batch, num_kp] The weights used to compute the rotation estimate. noise: A number indicating the noise added to the keypoints. Returns: [batch, 3, 3] A batch of transposed 3 x 3 rotation matrices. """ xyz0 += tf.random_normal(tf.shape(xyz0), mean=0, stddev=noise) xyz1 += tf.random_normal(tf.shape(xyz1), mean=0, stddev=noise) pconf2 = tf.expand_dims(pconf, 2) cen0 = tf.reduce_sum(xyz0 * pconf2, 1, keepdims=True) cen1 = tf.reduce_sum(xyz1 * pconf2, 1, keepdims=True) x = xyz0 - cen0 y = xyz1 - cen1 cov = tf.matmul(tf.matmul(x, tf.matrix_diag(pconf), transpose_a=True), y) _, u, v = tf.svd(cov, full_matrices=True) d = tf.matrix_determinant(tf.matmul(v, u, transpose_b=True)) ud = tf.concat( [u[:, :, :-1], u[:, :, -1:] * tf.expand_dims(tf.expand_dims(d, 1), 1)], axis=2) return tf.matmul(ud, v, transpose_b=True)
Example #28
Source File: skip_thoughts_model.py From parallax with Apache License 2.0 | 5 votes |
def random_orthonormal_initializer(shape, dtype=tf.float32, partition_info=None): # pylint: disable=unused-argument """Variable initializer that produces a random orthonormal matrix.""" if len(shape) != 2 or shape[0] != shape[1]: raise ValueError("Expecting square shape, got %s" % shape) _, u, _ = tf.svd(tf.random_normal(shape, dtype=dtype), full_matrices=True) return u
Example #29
Source File: SVP.py From KD_methods_with_TF with MIT License | 5 votes |
def SVD(X, n, name = None): with tf.variable_scope(name): sz = X.get_shape().as_list() if len(sz)==4: x = tf.reshape(X,[-1,sz[1]*sz[2],sz[3]]) elif len(sz)==3: x = X else: x = tf.expand_dims(X, 1) n = 1 _, HW, D = x.get_shape().as_list() with tf.device('CPU'): g = tf.get_default_graph() with g.gradient_override_map({"Svd": "Svd_"}): s,u,v = tf.svd(x,full_matrices=False) s = removenan(s) v = removenan(v) u = removenan(u) if n > 0: s = tf.nn.l2_normalize(tf.slice(s,[0,0],[-1,n]),1) u = tf.nn.l2_normalize(tf.slice(u,[0,0,0],[-1,-1,n]),1) v = tf.nn.l2_normalize(tf.slice(v,[0,0,0],[-1,-1,n]),1) return s, u, v
Example #30
Source File: skip_thoughts_model.py From models with Apache License 2.0 | 5 votes |
def random_orthonormal_initializer(shape, dtype=tf.float32, partition_info=None): # pylint: disable=unused-argument """Variable initializer that produces a random orthonormal matrix.""" if len(shape) != 2 or shape[0] != shape[1]: raise ValueError("Expecting square shape, got %s" % shape) _, u, _ = tf.svd(tf.random_normal(shape, dtype=dtype), full_matrices=True) return u