Python tensorflow.keras.backend.transpose() Examples
The following are 7
code examples of tensorflow.keras.backend.transpose().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras.backend
, or try the search function
.
Example #1
Source File: base.py From spektral with MIT License | 5 votes |
def call(self, inputs): if self.trainable_kernel: output = K.dot(K.dot(inputs, self.kernel), K.transpose(inputs)) else: output = K.dot(inputs, K.transpose(inputs)) if self.activation is not None: output = self.activation(output) return output
Example #2
Source File: base.py From spektral with MIT License | 5 votes |
def call(self, inputs): F = K.int_shape(inputs)[-1] minkowski_prod_mat = np.eye(F) minkowski_prod_mat[-1, -1] = -1. minkowski_prod_mat = K.constant(minkowski_prod_mat) output = K.dot(inputs, minkowski_prod_mat) output = K.dot(output, K.transpose(inputs)) output = K.clip(output, -10e9, -1.) if self.activation is not None: output = self.activation(output) return output
Example #3
Source File: layers.py From neuron with GNU General Public License v3.0 | 5 votes |
def build(self, input_shape): # Create a trainable weight variable for this layer. self.kernel = self.add_weight(name='mult-kernel', shape=(np.prod(self.orig_input_shape), self.output_len), initializer=self.kernel_initializer, trainable=True) M = K.reshape(self.kernel, [-1, self.output_len]) # D x d mt = K.transpose(M) # d x D mtm_inv = tf.matrix_inverse(K.dot(mt, M)) # d x d self.W = K.dot(mtm_inv, mt) # d x D if self.use_bias: self.bias = self.add_weight(name='bias-kernel', shape=(self.output_len, ), initializer=self.bias_initializer, trainable=True) # self.sigma_sq = self.add_weight(name='bias-kernel', # shape=(1, ), # initializer=self.initializer, # trainable=True) super(SpatiallySparse_Dense, self).build(input_shape) # Be sure to call this somewhere!
Example #4
Source File: FcDEC.py From DEC-DA with MIT License | 5 votes |
def call(self, inputs, **kwargs): """ student t-distribution, as same as used in t-SNE algorithm. q_ij = 1/(1+dist(x_i, u_j)^2), then normalize it. Arguments: inputs: the variable containing data, shape=(n_samples, n_features) Return: q: student's t-distribution, or soft labels for each sample. shape=(n_samples, n_clusters) """ q = 1.0 / (1.0 + (K.sum(K.square(K.expand_dims(inputs, axis=1) - self.clusters), axis=2) / self.alpha)) q **= (self.alpha + 1.0) / 2.0 q = K.transpose(K.transpose(q) / K.sum(q, axis=1)) return q
Example #5
Source File: iic-13.5.1.py From Advanced-Deep-Learning-with-Keras with MIT License | 5 votes |
def mi_loss(self, y_true, y_pred): """Mutual information loss computed from the joint distribution matrix and the marginals Arguments: y_true (tensor): Not used since this is unsupervised learning y_pred (tensor): stack of softmax predictions for the Siamese latent vectors (Z and Zbar) """ size = self.args.batch_size n_labels = y_pred.shape[-1] # lower half is Z Z = y_pred[0: size, :] Z = K.expand_dims(Z, axis=2) # upper half is Zbar Zbar = y_pred[size: y_pred.shape[0], :] Zbar = K.expand_dims(Zbar, axis=1) # compute joint distribution (Eq 10.3.2 & .3) P = K.batch_dot(Z, Zbar) P = K.sum(P, axis=0) # enforce symmetric joint distribution (Eq 10.3.4) P = (P + K.transpose(P)) / 2.0 # normalization of total probability to 1.0 P = P / K.sum(P) # marginal distributions (Eq 10.3.5 & .6) Pi = K.expand_dims(K.sum(P, axis=1), axis=1) Pj = K.expand_dims(K.sum(P, axis=0), axis=0) Pi = K.repeat_elements(Pi, rep=n_labels, axis=1) Pj = K.repeat_elements(Pj, rep=n_labels, axis=0) P = K.clip(P, K.epsilon(), np.finfo(float).max) Pi = K.clip(Pi, K.epsilon(), np.finfo(float).max) Pj = K.clip(Pj, K.epsilon(), np.finfo(float).max) # negative MI loss (Eq 10.3.7) neg_mi = K.sum((P * (K.log(Pi) + K.log(Pj) - K.log(P)))) # each head contribute 1/n_heads to the total loss return neg_mi/self.args.heads
Example #6
Source File: shapelets.py From tslearn with BSD 2-Clause "Simplified" License | 5 votes |
def call(self, x, **kwargs): # (x - y)^2 = x^2 + y^2 - 2 * x * y x_sq = K.expand_dims(K.sum(x ** 2, axis=2), axis=-1) y_sq = K.reshape(K.sum(self.kernel ** 2, axis=1), (1, 1, self.n_shapelets)) xy = K.dot(x, K.transpose(self.kernel)) return (x_sq + y_sq - 2 * xy) / K.int_shape(self.kernel)[1]
Example #7
Source File: bilstm_siamese_network.py From DeepPavlov with Apache License 2.0 | 5 votes |
def _pairwise_distances(self, inputs: List[Tensor]) -> Tensor: emb_c, emb_r = inputs bs = K.shape(emb_c)[0] embeddings = K.concatenate([emb_c, emb_r], 0) dot_product = K.dot(embeddings, K.transpose(embeddings)) square_norm = K.batch_dot(embeddings, embeddings, axes=1) distances = K.transpose(square_norm) - 2.0 * dot_product + square_norm distances = distances[0:bs, bs:bs+bs] distances = K.clip(distances, 0.0, None) mask = K.cast(K.equal(distances, 0.0), K.dtype(distances)) distances = distances + mask * 1e-16 distances = K.sqrt(distances) distances = distances * (1.0 - mask) return distances