Python tensorflow.matrix_transpose() Examples
The following are 30
code examples of tensorflow.matrix_transpose().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: multivariate.py From zhusuan with MIT License | 6 votes |
def _sample(self, n_samples): mean, u_tril, v_tril = self.mean, self.u_tril, self.v_tril if not self.is_reparameterized: mean = tf.stop_gradient(mean) u_tril = tf.stop_gradient(u_tril) v_tril = tf.stop_gradient(v_tril) def tile(t): new_shape = tf.concat([[n_samples], tf.ones_like(tf.shape(t))], 0) return tf.tile(tf.expand_dims(t, 0), new_shape) batch_u_tril = tile(u_tril) batch_v_tril = tile(v_tril) noise = tf.random_normal( tf.concat([[n_samples], tf.shape(mean)], axis=0), dtype=self.dtype) samples = mean + \ tf.matmul(tf.matmul(batch_u_tril, noise), tf.matrix_transpose(batch_v_tril)) # Update static shape static_n_samples = n_samples if isinstance(n_samples, int) else None samples.set_shape(tf.TensorShape([static_n_samples]) .concatenate(self.get_batch_shape()) .concatenate(self.get_value_shape())) return samples
Example #2
Source File: operator_pd_vdvt_update_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def _updated_mat(self, mat, v, diag): # Get dense matrix defined by its square root, which is an update of `mat`: # A = (mat + v D v^T) (mat + v D v^T)^T # D is the diagonal matrix with `diag` on the diagonal. # If diag is None, then it defaults to the identity matrix, so DV^T = V^T if diag is None: diag_vt = tf.matrix_transpose(v) else: diag_mat = tf.matrix_diag(diag) diag_vt = tf.batch_matmul(diag_mat, v, adj_y=True) v_diag_vt = tf.batch_matmul(v, diag_vt) sqrt = mat + v_diag_vt a = tf.batch_matmul(sqrt, sqrt, adj_y=True) return a.eval()
Example #3
Source File: __init__.py From MolGAN with MIT License | 6 votes |
def decoder_rnn(inputs, units, vertexes, edges, nodes, training, dropout_rate=0.): output = multi_dense_layers(inputs, units[:-1], activation=tf.nn.tanh, dropout_rate=dropout_rate, training=training) with tf.variable_scope('edges_logits'): edges_logits, _ = tf.nn.dynamic_rnn(cell=tf.nn.rnn_cell.LSTMCell(units[-1] * 4), inputs=tf.tile(tf.expand_dims(output, axis=1), (1, vertexes, 1)), dtype=output.dtype) edges_logits = tf.layers.dense(edges_logits, edges * units[-1]) edges_logits = tf.transpose(tf.reshape(edges_logits, (-1, vertexes, edges, units[-1])), (0, 2, 1, 3)) edges_logits = tf.transpose(tf.matmul(edges_logits, tf.matrix_transpose(edges_logits)), (0, 2, 3, 1)) edges_logits = tf.layers.dropout(edges_logits, dropout_rate, training=training) with tf.variable_scope('nodes_logits'): nodes_logits, _ = tf.nn.dynamic_rnn(cell=tf.nn.rnn_cell.LSTMCell(units[-1] * 4), inputs=tf.tile(tf.expand_dims(output, axis=1), (1, vertexes, 1)), dtype=output.dtype) nodes_logits = tf.layers.dense(nodes_logits, nodes) nodes_logits = tf.layers.dropout(nodes_logits, dropout_rate, training=training) return edges_logits, nodes_logits
Example #4
Source File: homography.py From stereo-magnification with Apache License 2.0 | 6 votes |
def inv_homography(k_s, k_t, rot, t, n_hat, a): """Computes inverse homography matrix between two cameras via a plane. Args: k_s: intrinsics for source cameras, [..., 3, 3] matrices k_t: intrinsics for target cameras, [..., 3, 3] matrices rot: relative rotations between source and target, [..., 3, 3] matrices t: [..., 3, 1], translations from source to target camera. Mapping a 3D point p from source to target is accomplished via rot * p + t. n_hat: [..., 1, 3], plane normal w.r.t source camera frame a: [..., 1, 1], plane equation displacement Returns: homography: [..., 3, 3] inverse homography matrices (homographies mapping pixel coordinates from target to source). """ with tf.name_scope('inv_homography'): rot_t = tf.matrix_transpose(rot) k_t_inv = tf.matrix_inverse(k_t, name='k_t_inv') denom = a - tf.matmul(tf.matmul(n_hat, rot_t), t) numerator = tf.matmul(tf.matmul(tf.matmul(rot_t, t), n_hat), rot_t) inv_hom = tf.matmul( tf.matmul(k_s, rot_t + divide_safe(numerator, denom)), k_t_inv, name='inv_hom') return inv_hom
Example #5
Source File: components.py From strsum with Apache License 2.0 | 6 votes |
def get_matrix_tree(r, A): L = tf.reduce_sum(A, 1) L = tf.matrix_diag(L) L = L - A r_diag = tf.matrix_diag(r) LL = L + r_diag LL_inv = tf.matrix_inverse(LL) #batch_l, doc_l, doc_l LL_inv_diag_ = tf.matrix_diag_part(LL_inv) d0 = tf.multiply(r, LL_inv_diag_) LL_inv_diag = tf.expand_dims(LL_inv_diag_, 2) tmp1 = tf.multiply(A, tf.matrix_transpose(LL_inv_diag)) tmp2 = tf.multiply(A, tf.matrix_transpose(LL_inv)) d = tmp1 - tmp2 d = tf.concat([tf.expand_dims(d0,[1]), d], 1) return d
Example #6
Source File: temporal_convolutional_network.py From nlp-architect with Apache License 2.0 | 6 votes |
def define_projection_layer(self, prediction, tied_weights=True): """ Define the output word embedding layer Args: prediction: tf.tensor, the prediction from the model tied_weights: boolean, whether or not to tie weights from the input embedding layer Returns: Probability distribution over vocabulary """ with tf.device("/cpu:0"): if tied_weights: # tie projection layer and embedding layer with tf.variable_scope("embedding_layer", reuse=tf.AUTO_REUSE): softmax_w = tf.matrix_transpose(self.word_embeddings_tf) softmax_b = tf.get_variable("softmax_b", [self.num_words]) _, l, k = prediction.shape.as_list() prediction_reshaped = tf.reshape(prediction, [-1, k]) mult_out = tf.nn.bias_add(tf.matmul(prediction_reshaped, softmax_w), softmax_b) projection_out = tf.reshape(mult_out, [-1, l, self.num_words]) else: with tf.variable_scope("projection_layer", reuse=False): projection_out = tf.layers.Dense(self.num_words)(prediction) return projection_out
Example #7
Source File: im_caption_full.py From unsupervised_captioning with MIT License | 5 votes |
def sentence_ae(gan_model, features, labels, add_summaries=True): """Sentence auto-encoder.""" with tf.variable_scope(gan_model.discriminator_scope, reuse=True): feat = discriminator(features['key'], [None, features['lk']])[2] with tf.variable_scope(gan_model.generator_scope, reuse=True): embedding = tf.get_variable( name='embedding', shape=[FLAGS.vocab_size, FLAGS.emb_dim], initializer=tf.random_uniform_initializer(-0.08, 0.08)) softmax_w = tf.matrix_transpose(embedding) softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size]) sentence, ls = labels['sentence'], labels['len'] targets = sentence[:, 1:] sentence = sentence[:, :-1] ls -= 1 sentence = tf.nn.embedding_lookup(embedding, sentence) batch_size = tf.shape(feat)[0] cell = tf.nn.rnn_cell.BasicLSTMCell(FLAGS.mem_dim) cell = tf.nn.rnn_cell.DropoutWrapper(cell, FLAGS.keep_prob, FLAGS.keep_prob) zero_state = cell.zero_state(batch_size, tf.float32) _, state = cell(feat, zero_state) tf.get_variable_scope().reuse_variables() out, state = tf.nn.dynamic_rnn(cell, sentence, ls, state) out = tf.reshape(out, [-1, FLAGS.mem_dim]) logits = tf.nn.bias_add(tf.matmul(out, softmax_w), softmax_b) logits = tf.reshape(logits, [batch_size, -1, FLAGS.vocab_size]) mask = tf.sequence_mask(ls, tf.shape(sentence)[1]) targets = tf.boolean_mask(targets, mask) logits = tf.boolean_mask(logits, mask) loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets, logits=logits) loss = tf.reduce_mean(loss) if add_summaries: tf.summary.scalar('losses/sentence_ae', loss) return loss
Example #8
Source File: bcnn.py From tensorflow-DSMM with MIT License | 5 votes |
def _expand_input(self, x1, x2, att_mat, seq_len, d, name): # att_mat: [batch, s, s] aW = tf.get_variable(name=name, shape=(seq_len, d)) # [batch, s, s] * [s,d] => [batch, s, d] # expand dims => [batch, s, d, 1] x1_a = tf.expand_dims(tf.einsum("ijk,kl->ijl", att_mat, aW), -1) x2_a = tf.expand_dims(tf.einsum("ijk,kl->ijl", tf.matrix_transpose(att_mat), aW), -1) # [batch, s, d, 2] x1 = tf.concat([x1, x1_a], axis=3) x2 = tf.concat([x2, x2_a], axis=3) return x1, x2
Example #9
Source File: caption_infer.py From unsupervised_captioning with MIT License | 5 votes |
def _tower_fn(im, is_training=False): with slim.arg_scope(inception_v4.inception_v4_arg_scope()): net, _ = inception_v4.inception_v4(im, None, is_training=False) net = tf.squeeze(net, [1, 2]) with tf.variable_scope('Generator'): feat = slim.fully_connected(net, FLAGS.mem_dim, activation_fn=None) feat = tf.nn.l2_normalize(feat, axis=1) embedding = tf.get_variable( name='embedding', shape=[FLAGS.vocab_size, FLAGS.emb_dim], initializer=tf.random_uniform_initializer(-0.08, 0.08)) softmax_w = tf.matrix_transpose(embedding) softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size]) cell = tf.nn.rnn_cell.BasicLSTMCell(FLAGS.mem_dim) if is_training: cell = tf.nn.rnn_cell.DropoutWrapper(cell, FLAGS.keep_prob, FLAGS.keep_prob) zero_state = cell.zero_state(FLAGS.batch_size, tf.float32) _, state = cell(feat, zero_state) init_state = state tf.get_variable_scope().reuse_variables() state_feed = tf.placeholder(dtype=tf.float32, shape=[None, sum(cell.state_size)], name="state_feed") state_tuple = tf.split(value=state_feed, num_or_size_splits=2, axis=1) input_feed = tf.placeholder(dtype=tf.int64, shape=[None], # batch_size name="input_feed") inputs = tf.nn.embedding_lookup(embedding, input_feed) out, state_tuple = cell(inputs, state_tuple) tf.concat(axis=1, values=state_tuple, name="state") logits = tf.nn.bias_add(tf.matmul(out, softmax_w), softmax_b) tower_pred = tf.nn.softmax(logits, name="softmax") return tf.concat(init_state, axis=1, name='initial_state')
Example #10
Source File: SVP.py From KD_methods_with_TF with MIT License | 5 votes |
def gradient_svd(op, ds, dU, dV): s, U, V = op.outputs u_sz = tf.squeeze(tf.slice(tf.shape(dU),[1],[1])) v_sz = tf.squeeze(tf.slice(tf.shape(dV),[1],[1])) s_sz = tf.squeeze(tf.slice(tf.shape(ds),[1],[1])) S = tf.matrix_diag(s) s_2 = tf.square(s) eye = tf.expand_dims(tf.eye(s_sz),0) k = (1 - eye)/(tf.expand_dims(s_2,2)-tf.expand_dims(s_2,1) + eye) KT = tf.matrix_transpose(k) KT = removenan(KT) def msym(X): return (X+tf.matrix_transpose(X)) def left_grad(U,S,V,dU,dV): U, V = (V, U); dU, dV = (dV, dU) D = tf.matmul(dU,tf.matrix_diag(1/(s+1e-8))) grad = tf.matmul(D - tf.matmul(U, tf.matrix_diag(tf.matrix_diag_part(tf.matmul(U,D,transpose_a=True))) + 2*tf.matmul(S, msym(KT*(tf.matmul(D,tf.matmul(U,S),transpose_a=True))))), V,transpose_b=True) grad = tf.matrix_transpose(grad) return grad def right_grad(U,S,V,dU,dV): grad = tf.matmul(2*tf.matmul(U, tf.matmul(S, msym(KT*(tf.matmul(V,dV,transpose_a=True)))) ),V,transpose_b=True) return grad grad = tf.cond(tf.greater(v_sz, u_sz), lambda : left_grad(U,S,V,dU,dV), lambda : right_grad(U,S,V,dU,dV)) return [grad]
Example #11
Source File: preprocessing_util.py From TwinGAN with Apache License 2.0 | 5 votes |
def transform(W, img): shape = img.shape img = tf.tensordot(img, tf.matrix_transpose(W), axes=1) img = tf.reshape(img, shape=shape) return img
Example #12
Source File: __init__.py From MolGAN with MIT License | 5 votes |
def decoder_dot(inputs, units, vertexes, edges, nodes, training, dropout_rate=0.): output = multi_dense_layers(inputs, units[:-1], activation=tf.nn.tanh, dropout_rate=dropout_rate, training=training) with tf.variable_scope('edges_logits'): edges_logits = tf.reshape(tf.layers.dense(inputs=output, units=edges * vertexes * units[-1], activation=None), (-1, edges, vertexes, units[-1])) edges_logits = tf.transpose(tf.matmul(edges_logits, tf.matrix_transpose(edges_logits)), (0, 2, 3, 1)) edges_logits = tf.layers.dropout(edges_logits, dropout_rate, training=training) with tf.variable_scope('nodes_logits'): nodes_logits = tf.layers.dense(inputs=output, units=vertexes * nodes, activation=None) nodes_logits = tf.reshape(nodes_logits, (-1, vertexes, nodes)) nodes_logits = tf.layers.dropout(nodes_logits, dropout_rate, training=training) return edges_logits, nodes_logits
Example #13
Source File: __init__.py From MolGAN with MIT License | 5 votes |
def decoder_adj(inputs, units, vertexes, edges, nodes, training, dropout_rate=0.): output = multi_dense_layers(inputs, units, activation=tf.nn.tanh, dropout_rate=dropout_rate, training=training) with tf.variable_scope('edges_logits'): edges_logits = tf.reshape(tf.layers.dense(inputs=output, units=edges * vertexes * vertexes, activation=None), (-1, edges, vertexes, vertexes)) edges_logits = tf.transpose((edges_logits + tf.matrix_transpose(edges_logits)) / 2, (0, 2, 3, 1)) edges_logits = tf.layers.dropout(edges_logits, dropout_rate, training=training) with tf.variable_scope('nodes_logits'): nodes_logits = tf.layers.dense(inputs=output, units=vertexes * nodes, activation=None) nodes_logits = tf.reshape(nodes_logits, (-1, vertexes, nodes)) nodes_logits = tf.layers.dropout(nodes_logits, dropout_rate, training=training) return edges_logits, nodes_logits
Example #14
Source File: multivariate.py From zhusuan with MIT License | 5 votes |
def _log_prob(self, given): mean, u_tril, v_tril = (self.path_param(self.mean), self.path_param(self.u_tril), self.path_param(self.v_tril)) log_det_u = 2 * tf.reduce_sum( tf.log(tf.matrix_diag_part(u_tril)), axis=-1) log_det_v = 2 * tf.reduce_sum( tf.log(tf.matrix_diag_part(v_tril)), axis=-1) n_row = tf.cast(self._n_row, self.dtype) n_col = tf.cast(self._n_col, self.dtype) logZ = - (n_row * n_col) / 2. * \ tf.log(2. * tf.constant(np.pi, dtype=self.dtype)) - \ n_row / 2. * log_det_v - n_col / 2. * log_det_u # logZ.shape == batch_shape if self._check_numerics: logZ = tf.check_numerics(logZ, "log[det(Cov)]") y = given - mean y_with_last_dim_changed = tf.expand_dims(tf.ones(tf.shape(y)[:-1]), -1) Lu, _ = maybe_explicit_broadcast( u_tril, y_with_last_dim_changed, 'MatrixVariateNormalCholesky.u_tril', 'expand_dims(given, -1)') y_with_sec_last_dim_changed = tf.expand_dims(tf.ones( tf.concat([tf.shape(y)[:-2], tf.shape(y)[-1:]], axis=0)), -1) Lv, _ = maybe_explicit_broadcast( v_tril, y_with_sec_last_dim_changed, 'MatrixVariateNormalCholesky.v_tril', 'expand_dims(given, -1)') x_Lb_inv_t = tf.matrix_triangular_solve(Lu, y, lower=True) x_t = tf.matrix_triangular_solve(Lv, tf.matrix_transpose(x_Lb_inv_t), lower=True) stoc_dist = -0.5 * tf.reduce_sum(tf.square(x_t), [-1, -2]) return logZ + stoc_dist
Example #15
Source File: tensorflow.py From deepx with MIT License | 5 votes |
def kronecker_vec(self, X, m, n): leading_dim = tf.shape(X)[:-2] blocks = [] for i in range(n): blocks.append([]) for j in range(m): idx = i * m + j block = tf.matrix_transpose(tf.reshape(X[..., idx, :], tf.concat([leading_dim, [n, m]], 0))) blocks[-1].append(block) return tf.concat([tf.concat(b, -2) for b in blocks], -1)
Example #16
Source File: tensorflow.py From deepx with MIT License | 5 votes |
def unvec(self, v, m, n): leading_dim = self.shape(v)[:-1] return self.matrix_transpose(self.reshape(v, self.concat([ leading_dim, [n, m] ], 0)))
Example #17
Source File: tensorflow.py From deepx with MIT License | 5 votes |
def vec(self, A): A = self.matrix_transpose(A) leading_dim = self.shape(A)[:-2] return self.reshape(A, self.concat([ leading_dim, [-1] ], 0))
Example #18
Source File: tensorflow.py From deepx with MIT License | 5 votes |
def logdet(self, A, **kwargs): A = (A + self.matrix_transpose(A)) / 2. term = tf.log(tf.matrix_diag_part(self.cholesky(A, **kwargs))) return 2 * tf.reduce_sum(term, -1)
Example #19
Source File: tensorflow.py From deepx with MIT License | 5 votes |
def matrix_transpose(self, a): return tf.matrix_transpose(a)
Example #20
Source File: abcnn.py From nlp_research with MIT License | 5 votes |
def make_attention_mat(self, x1, x2): # x1, x2 = [batch, height, width, 1] = [batch, d, s, 1] # x2 => [batch, height, 1, width] # [batch, width, wdith] = [batch, s, s] euclidean = tf.sqrt(tf.reduce_sum(tf.square(x1 - tf.matrix_transpose(x2)), axis=1)+1e-8) return 1 / (1 + euclidean)
Example #21
Source File: array_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testTensorWithStaticRankLessThanTwoRaisesBecauseNotAMatrix(self): vector = [1, 2, 3] with self.test_session(): with self.assertRaisesRegexp(ValueError, "should be a "): tf.matrix_transpose(vector)
Example #22
Source File: array_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testBatchMatrixDynamicallyDefined(self): matrix_0 = [[1, 2, 3], [4, 5, 6]] matrix_0_t = [[1, 4], [2, 5], [3, 6]] matrix_1 = [[11, 22, 33], [44, 55, 66]] matrix_1_t = [[11, 44], [22, 55], [33, 66]] batch_matrix = [matrix_0, matrix_1] # Shape (2, 2, 3) expected_transposed = [matrix_0_t, matrix_1_t] # Shape (2, 3, 2) with self.test_session(): batch_matrix_ph = tf.placeholder(tf.int32) transposed = tf.matrix_transpose(batch_matrix_ph) self.assertAllEqual( expected_transposed, transposed.eval(feed_dict={batch_matrix_ph: batch_matrix}))
Example #23
Source File: array_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testNonBatchMatrixDynamicallyDefined(self): matrix = [[1, 2, 3], [4, 5, 6]] # Shape (2, 3) expected_transposed = [[1, 4], [2, 5], [3, 6]] # Shape (3, 2) with self.test_session(): matrix_ph = tf.placeholder(tf.int32) transposed = tf.matrix_transpose(matrix_ph) self.assertAllEqual( expected_transposed, transposed.eval(feed_dict={matrix_ph: matrix}))
Example #24
Source File: array_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testBatchMatrix(self): matrix_0 = [[1, 2, 3], [4, 5, 6]] matrix_0_t = [[1, 4], [2, 5], [3, 6]] matrix_1 = [[11, 22, 33], [44, 55, 66]] matrix_1_t = [[11, 44], [22, 55], [33, 66]] batch_matrix = [matrix_0, matrix_1] # Shape (2, 2, 3) expected_transposed = [matrix_0_t, matrix_1_t] # Shape (2, 3, 2) with self.test_session(): transposed = tf.matrix_transpose(batch_matrix) self.assertEqual((2, 3, 2), transposed.get_shape()) self.assertAllEqual(expected_transposed, transposed.eval())
Example #25
Source File: array_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testNonBatchMatrix(self): matrix = [[1, 2, 3], [4, 5, 6]] # Shape (2, 3) expected_transposed = [[1, 4], [2, 5], [3, 6]] # Shape (3, 2) with self.test_session(): transposed = tf.matrix_transpose(matrix) self.assertEqual((3, 2), transposed.get_shape()) self.assertAllEqual(expected_transposed, transposed.eval())
Example #26
Source File: svp.py From BERT with Apache License 2.0 | 5 votes |
def gradient_svd(op, ds, dU, dV): s, U, V = op.outputs u_sz = tf.squeeze(tf.slice(tf.shape(dU),[1],[1])) v_sz = tf.squeeze(tf.slice(tf.shape(dV),[1],[1])) s_sz = tf.squeeze(tf.slice(tf.shape(ds),[1],[1])) S = tf.matrix_diag(s) s_2 = tf.square(s) eye = tf.expand_dims(tf.eye(s_sz),0) k = (1 - eye)/(tf.expand_dims(s_2,2)-tf.expand_dims(s_2,1) + eye) KT = tf.matrix_transpose(k) KT = removenan(KT) def msym(X): return (X+tf.matrix_transpose(X)) def left_grad(U,S,V,dU,dV): U, V = (V, U); dU, dV = (dV, dU) D = tf.matmul(dU,tf.matrix_diag(1/(s+1e-8))) US = tf.matmul(U,S) grad = tf.matmul(D, V, transpose_b=True)\ +tf.matmul(tf.matmul(U,tf.matrix_diag(tf.matrix_diag_part(-tf.matmul(U,D,transpose_a=True)))), V, transpose_b=True)\ +tf.matmul(2*tf.matmul(US, msym(KT*(tf.matmul(V,-tf.matmul(V,tf.matmul(D,US,transpose_a=True)),transpose_a=True)))),V,transpose_b=True) grad = tf.matrix_transpose(grad) return grad def right_grad(U,S,V,dU,dV): US = tf.matmul(U,S) grad = tf.matmul(2*tf.matmul(US, msym(KT*(tf.matmul(V,dV,transpose_a=True))) ),V,transpose_b=True) return grad grad = tf.cond(tf.greater(v_sz, u_sz), lambda : left_grad(U,S,V,dU,dV), lambda : right_grad(U,S,V,dU,dV)) return [grad]
Example #27
Source File: utils.py From zhusuan with MIT License | 5 votes |
def gp_conditional(z, fz, x, full_cov, kernel, Kzz_chol=None): ''' GP gp_conditional f(x) | f(z)==fz :param z: shape [n_z, n_covariates] :param fz: shape [n_particles, n_z] :param x: shape [n_x, n_covariates] :return: a distribution with shape [n_particles, n_x] ''' n_z = int(z.shape[0]) n_particles = tf.shape(fz)[0] if Kzz_chol is None: Kzz_chol = tf.cholesky(kernel(z, z)) # Mean[fx|fz] = Kxz @ inv(Kzz) @ fz; Cov[fx|z] = Kxx - Kxz @ inv(Kzz) @ Kzx # With ill-conditioned Kzz, the inverse is often asymmetric, which # breaks further cholesky decomposition. We compute a symmetric one. Kzz_chol_inv = tf.matrix_triangular_solve(Kzz_chol, tf.eye(n_z)) Kzz_inv = tf.matmul(tf.transpose(Kzz_chol_inv), Kzz_chol_inv) Kxz = kernel(x, z) # [n_x, n_z] Kxziz = tf.matmul(Kxz, Kzz_inv) mean_fx_given_fz = tf.matmul(fz, tf.matrix_transpose(Kxziz)) if full_cov: cov_fx_given_fz = kernel(x, x) - tf.matmul(Kxziz, tf.transpose(Kxz)) cov_fx_given_fz = tf.tile( tf.expand_dims(tf.cholesky(cov_fx_given_fz), 0), [n_particles, 1, 1]) fx_given_fz = zs.distributions.MultivariateNormalCholesky( mean_fx_given_fz, cov_fx_given_fz) else: # diag(AA^T) = sum(A**2, axis=-1) var = kernel.Kdiag(x) - \ tf.reduce_sum(tf.matmul( Kxz, tf.matrix_transpose(Kzz_chol_inv)) ** 2, axis=-1) std = tf.sqrt(var) fx_given_fz = zs.distributions.Normal( mean=mean_fx_given_fz, std=std, group_ndims=1) return fx_given_fz
Example #28
Source File: layers.py From face_landmark_dnn with MIT License | 5 votes |
def AffineTransformLayer(Image, Param): ''' Image: [N, IMGSIZE, IMGSIZE, 2] Param: [N, 6] return: [N, IMGSIZE, IMGSIZE, 2] ''' A = tf.reshape(Param[:, 0:4], (-1, 2, 2)) T = tf.reshape(Param[:, 4:6], (-1, 1, 2)) A = tf.matrix_inverse(A) T = tf.matmul(-T, A) T = tf.reverse(T, (-1,)) A = tf.matrix_transpose(A) def affine_transform(I, A, T): I = tf.reshape(I, [IMGSIZE, IMGSIZE]) SrcPixels = tf.matmul(tf.reshape(Pixels, [IMGSIZE * IMGSIZE,2]), A) + T SrcPixels = tf.clip_by_value(SrcPixels, 0, IMGSIZE - 2) outPixelsMinMin = tf.to_float(tf.to_int32(SrcPixels)) dxdy = SrcPixels - outPixelsMinMin dx = dxdy[:, 0] dy = dxdy[:, 1] outPixelsMinMin = tf.reshape(tf.to_int32(outPixelsMinMin),[IMGSIZE * IMGSIZE, 2]) outPixelsMaxMin = tf.reshape(outPixelsMinMin + [1, 0], [IMGSIZE * IMGSIZE, 2]) outPixelsMinMax = tf.reshape(outPixelsMinMin + [0, 1], [IMGSIZE * IMGSIZE, 2]) outPixelsMaxMax = tf.reshape(outPixelsMinMin + [1, 1], [IMGSIZE * IMGSIZE, 2]) OutImage = (1 - dx) * (1 - dy) * tf.gather_nd(I, outPixelsMinMin) + dx * (1 - dy) * tf.gather_nd(I, outPixelsMaxMin) \ + (1 - dx) * dy * tf.gather_nd(I, outPixelsMinMax) + dx * dy * tf.gather_nd(I, outPixelsMaxMax) return tf.reshape(OutImage,[IMGSIZE,IMGSIZE,1]) return tf.map_fn(lambda args: affine_transform(args[0], args[1], args[2]),(Image, A, T), dtype=tf.float32)
Example #29
Source File: array_ops.py From keras-lambda with MIT License | 4 votes |
def matrix_transpose(a, name="matrix_transpose"): """Transposes last two dimensions of tensor `a`. For example: ```python # Matrix with no batch dimension. # 'x' is [[1 2 3] # [4 5 6]] tf.matrix_transpose(x) ==> [[1 4] [2 5] [3 6]] # Matrix with two batch dimensions. # x.shape is [1, 2, 3, 4] # tf.matrix_transpose(x) is shape [1, 2, 4, 3] ``` Args: a: A `Tensor` with `rank >= 2`. name: A name for the operation (optional). Returns: A transposed batch matrix `Tensor`. Raises: ValueError: If `a` is determined statically to have `rank < 2`. """ with ops.name_scope(name, values=[a]): a = ops.convert_to_tensor(a, name="a") # If we know the number of dimensions (statically), we can do two things: # 1. Check that `a` is a (batch) matrix. # 2. Use a python list for perm. This preserves static shape information # and avoids extra computations. a_shape = a.get_shape() ndims = a_shape.ndims if ndims is not None: if ndims < 2: raise ValueError( "Argument 'a' should be a (batch) matrix, with rank >= 2. Found: " "%s" % a_shape) perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2] else: a_rank = rank(a) perm = concat( (gen_math_ops._range(0, a_rank - 2, 1), [a_rank - 1, a_rank - 2]), 0) return transpose(a, perm=perm) # pylint: enable=invalid-name
Example #30
Source File: biaffine_units.py From multilabel-image-classification-tensorflow with MIT License | 4 votes |
def create(self, fixed_embeddings, linked_embeddings, context_tensor_arrays, attention_tensor, during_training, stride=None): """Requires |stride|; otherwise see base class.""" check.NotNone(stride, 'BiaffineDigraphNetwork requires "stride" and must be called ' 'in the bulk feature extractor component.') # TODO(googleuser): Add dropout during training. del during_training # Retrieve (possibly averaged) weights. weights_arc = self._component.get_variable('weights_arc') weights_source = self._component.get_variable('weights_source') root = self._component.get_variable('root') # Extract the source and target token activations. Use |stride| to collapse # batch and beam into a single dimension. sources = network_units.lookup_named_tensor('sources', linked_embeddings) targets = network_units.lookup_named_tensor('targets', linked_embeddings) source_tokens_bxnxs = tf.reshape(sources.tensor, [stride, -1, self._source_dim]) target_tokens_bxnxt = tf.reshape(targets.tensor, [stride, -1, self._target_dim]) num_tokens = tf.shape(source_tokens_bxnxs)[1] # Compute the arc, source, and root potentials. arcs_bxnxn = digraph_ops.ArcPotentialsFromTokens( source_tokens_bxnxs, target_tokens_bxnxt, weights_arc) sources_bxnxn = digraph_ops.ArcSourcePotentialsFromTokens( source_tokens_bxnxs, weights_source) roots_bxn = digraph_ops.RootPotentialsFromTokens( root, target_tokens_bxnxt, weights_arc, weights_source) # Combine them into a single matrix with the roots on the diagonal. adjacency_bxnxn = digraph_ops.CombineArcAndRootPotentials( arcs_bxnxn + sources_bxnxn, roots_bxn) # The adjacency matrix currently has sources on rows and targets on columns, # but we want targets on rows so that maximizing within a row corresponds to # selecting sources for a given target. adjacency_bxnxn = tf.matrix_transpose(adjacency_bxnxn) return [tf.reshape(adjacency_bxnxn, [-1, num_tokens])]