Python tensorflow.tensordot() Examples
The following are 30
code examples of tensorflow.tensordot().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: network.py From generative-compression with MIT License | 6 votes |
def quantizer(w, config, reuse=False, temperature=1, L=5, scope='image'): """ Quantize feature map over L centers to obtain discrete $\hat{w}$ + Centers: {-2,-1,0,1,2} + TODO: Toggle learnable centers? """ with tf.variable_scope('quantizer_{}'.format(scope, reuse=reuse)): centers = tf.cast(tf.range(-2,3), tf.float32) # Partition W into the Voronoi tesellation over the centers w_stack = tf.stack([w for _ in range(L)], axis=-1) w_hard = tf.cast(tf.argmin(tf.abs(w_stack - centers), axis=-1), tf.float32) + tf.reduce_min(centers) smx = tf.nn.softmax(-1.0/temperature * tf.abs(w_stack - centers), dim=-1) # Contract last dimension w_soft = tf.einsum('ijklm,m->ijkl', smx, centers) # w_soft = tf.tensordot(smx, centers, axes=((-1),(0))) # Treat quantization as differentiable for optimization w_bar = tf.round(tf.stop_gradient(w_hard - w_soft) + w_soft) return w_bar
Example #2
Source File: core.py From icme2019 with MIT License | 6 votes |
def call(self, inputs, **kwargs): query, keys = inputs keys_len = keys.get_shape()[1] queries = K.repeat_elements(query, keys_len, 1) att_input = tf.concat( [queries, keys, queries - keys, queries * keys], axis=-1) att_out = MLP(self.hidden_size, self.activation, self.l2_reg, self.keep_prob, self.use_bn, seed=self.seed)(att_input) attention_score = tf.nn.bias_add(tf.tensordot( att_out, self.kernel, axes=(-1, 0)), self.bias) return attention_score
Example #3
Source File: core.py From icme2019 with MIT License | 6 votes |
def call(self, inputs, training=None, **kwargs): deep_input = inputs for i in range(len(self.hidden_size)): fc = tf.nn.bias_add(tf.tensordot( deep_input, self.kernels[i], axes=(-1, 0)), self.bias[i]) # fc = Dense(self.hidden_size[i], activation=None, \ # kernel_initializer=glorot_normal(seed=self.seed), \ # kernel_regularizer=l2(self.l2_reg))(deep_input) if self.use_bn: fc = tf.keras.layers.BatchNormalization()(fc) fc = activation_fun(self.activation, fc) #fc = tf.nn.dropout(fc, self.keep_prob) fc = tf.keras.layers.Dropout(1 - self.keep_prob)(fc,) deep_input = fc return deep_input
Example #4
Source File: layers.py From DGFraud with Apache License 2.0 | 6 votes |
def node_attention(inputs, adj, return_weights=False): hidden_size = inputs.shape[-1].value H_v = tf.Variable(tf.random_normal([hidden_size, 1], stddev=0.1)) # convert adj to sparse tensor zero = tf.constant(0, dtype=tf.float32) where = tf.not_equal(adj, zero) indices = tf.where(where) values = tf.gather_nd(adj, indices) adj = tf.SparseTensor(indices=indices, values=values, dense_shape=adj.shape) with tf.name_scope('v'): v = adj * tf.squeeze(tf.tensordot(inputs, H_v, axes=1)) weights = tf.sparse_softmax(v, name='alphas') # [nodes,nodes] output = tf.sparse_tensor_dense_matmul(weights, inputs) if not return_weights: return output else: return output, weights # view-level attention (equation (4) in SemiGNN)
Example #5
Source File: Model.py From stocknet-code with MIT License | 6 votes |
def _create_corpus_embed(self): """ msg_embed: batch_size * max_n_days * max_n_msgs * msg_embed_size => corpus_embed: batch_size * max_n_days * corpus_embed_size """ with tf.name_scope('corpus_embed'): with tf.variable_scope('u_t'): proj_u = self._linear(self.msg_embed, self.msg_embed_size, 'tanh', use_bias=False) w_u = tf.get_variable('w_u', shape=(self.msg_embed_size, 1), initializer=self.initializer) u = tf.reduce_mean(tf.tensordot(proj_u, w_u, axes=1), axis=-1) # batch_size * max_n_days * max_n_msgs mask_msgs = tf.sequence_mask(self.n_msgs_ph, maxlen=self.max_n_msgs, dtype=tf.bool, name='mask_msgs') ninf = tf.fill(tf.shape(mask_msgs), np.NINF) masked_score = tf.where(mask_msgs, u, ninf) u = neural.softmax(masked_score) # batch_size * max_n_days * max_n_msgs u = tf.where(tf.is_nan(u), tf.zeros_like(u), u) # replace nan with 0.0 u = tf.expand_dims(u, axis=-2) # batch_size * max_n_days * 1 * max_n_msgs corpus_embed = tf.matmul(u, self.msg_embed) # batch_size * max_n_days * 1 * msg_embed_size corpus_embed = tf.reduce_mean(corpus_embed, axis=-2) # batch_size * max_n_days * msg_embed_size self.corpus_embed = tf.nn.dropout(corpus_embed, keep_prob=1-self.dropout_ce, name='corpus_embed')
Example #6
Source File: networks.py From auto_yolo with MIT License | 6 votes |
def _call(self, _inp, output_size, is_training): batch_size = tf.shape(_inp)[0] H, W, B, A = tuple(int(i) for i in _inp.shape[1:]) if self.embedding is None: self.embedding = tf.get_variable( "embedding", shape=(int(A/2), self.n_objects), dtype=tf.float32) inp = tf.reshape(_inp, (batch_size, H * W * B, A)) key, value = tf.split(inp, 2, axis=2) raw_attention = tf.tensordot(key, self.embedding, [[2], [0]]) attention = tf.nn.softmax(raw_attention, axis=1) attention_t = tf.transpose(attention, (0, 2, 1)) weighted_value = tf.matmul(attention_t, value) flat_weighted_value = tf.reshape( weighted_value, (batch_size, self.n_objects * int(A/2))) if self.output_network is None: self.output_network = cfg.build_math_output(scope="math_output") return self.output_network(flat_weighted_value, output_size, is_training)
Example #7
Source File: embedder_utils.py From Counterfactual-StoryRW with MIT License | 6 votes |
def soft_embedding_lookup(embedding, soft_ids): """Transforms soft ids (e.g., probability distribution over ids) into embeddings, by mixing the embedding vectors with the soft weights. Args: embedding: A Tensor of shape `[num_classes] + embedding-dim` containing the embedding vectors. Embedding can have dimensionality > 1, i.e., :attr:`embedding` can be of shape `[num_classes, emb_dim_1, emb_dim_2, ...]` soft_ids: A Tensor of weights (probabilities) used to mix the embedding vectors. Returns: A Tensor of shape `shape(soft_ids)[:-1] + shape(embedding)[1:]`. For example, if `shape(soft_ids) = [batch_size, max_time, vocab_size]` and `shape(embedding) = [vocab_size, emb_dim]`, then the return tensor has shape `[batch_size, max_time, emb_dim]`. Example:: decoder_outputs, ... = decoder(...) soft_seq_emb = soft_embedding_lookup( embedding, tf.nn.softmax(decoder_outputs.logits)) """ return tf.tensordot(tf.to_float(soft_ids), embedding, [-1, 0])
Example #8
Source File: hagcn_layers.py From deepchem with MIT License | 6 votes |
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs): act_fn = activations.get('sigmoid') if in_layers is None: in_layers = self.in_layers in_layers = convert_to_layers(in_layers) self._build() A_tilda_k = in_layers[0].out_tensor X = in_layers[1].out_tensor if self.combine_method == "linear": concatenated = tf.concat([A_tilda_k, X], axis=2) adp_fn_val = act_fn( tf.tensordot(concatenated, self.trainable_weights[0], axes=1)) else: adp_fn_val = act_fn(tf.matmul(A_tilda_k, tf.tensordot(X, self.Q, axes=1))) out_tensor = adp_fn_val if set_tensors: self.variables = self.trainable_weights self.out_tensor = out_tensor return out_tensor
Example #9
Source File: man_utils.py From BERT with Apache License 2.0 | 6 votes |
def bilinear_attention(query, context, query_mask, context_mask, dropout_ratio, scope, reuse=None): with tf.variable_scope(scope+"_Context_to_Query_Attention_Layer", reuse=reuse): context_ = tf.transpose(context, [0,2,1]) hidden_dim = query.get_shape()[-1] attn_W = tf.get_variable("AttnW", dtype=tf.float32, shape=[hidden_dim, hidden_dim], initializer=initializer) weighted_query = tf.tensordot(query, attn_W, axes=[[2], [0]]) S = tf.matmul(weighted_query, context_) # batch x q_len x c_len mask_q = tf.expand_dims(query_mask, 1) mask_c = tf.expand_dims(context_mask, 1) S_ = tf.nn.softmax(qanet_layers.mask_logits(S, mask = mask_c)) c2q = tf.matmul(S_, context) S_T = tf.nn.softmax(qanet_layers.mask_logits(tf.transpose(S, [0,2,1]), mask = mask_q)) q2c = tf.matmul(S_T, query) return c2q, q2c
Example #10
Source File: attention.py From Attention-Based-BiLSTM-relation-extraction with Apache License 2.0 | 6 votes |
def attention(inputs): # Trainable parameters hidden_size = inputs.shape[2].value u_omega = tf.get_variable("u_omega", [hidden_size], initializer=tf.keras.initializers.glorot_normal()) with tf.name_scope('v'): v = tf.tanh(inputs) # For each of the timestamps its vector of size A from `v` is reduced with `u` vector vu = tf.tensordot(v, u_omega, axes=1, name='vu') # (B,T) shape alphas = tf.nn.softmax(vu, name='alphas') # (B,T) shape # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape output = tf.reduce_sum(inputs * tf.expand_dims(alphas, -1), 1) # Final output with tanh output = tf.tanh(output) return output, alphas
Example #11
Source File: network.py From generative-compression with MIT License | 6 votes |
def quantizer(w, config, reuse=False, temperature=1, L=5, scope='image'): """ Quantize feature map over L centers to obtain discrete $\hat{w}$ + Centers: {-2,-1,0,1,2} + TODO: Toggle learnable centers? """ with tf.variable_scope('quantizer_{}'.format(scope, reuse=reuse)): centers = tf.cast(tf.range(-2,3), tf.float32) # Partition W into the Voronoi tesellation over the centers w_stack = tf.stack([w for _ in range(L)], axis=-1) w_hard = tf.cast(tf.argmin(tf.abs(w_stack - centers), axis=-1), tf.float32) + tf.reduce_min(centers) smx = tf.nn.softmax(-1.0/temperature * tf.abs(w_stack - centers), dim=-1) # Contract last dimension w_soft = tf.einsum('ijklm,m->ijkl', smx, centers) # w_soft = tf.tensordot(smx, centers, axes=((-1),(0))) # Treat quantization as differentiable for optimization w_bar = tf.round(tf.stop_gradient(w_hard - w_soft) + w_soft) return w_bar
Example #12
Source File: networks.py From auto_yolo with MIT License | 5 votes |
def addition(left, right): m = left.shape[1] n = right.shape[1] mat = tf.to_float( tf.equal( tf.reshape(tf.range(m)[:, None] + tf.range(n)[None, :], (-1, 1)), tf.range(m + n - 1)[None, :])) outer_product = tf.matmul(left[:, :, None], right[:, None, :]) outer_product = tf.reshape(outer_product, (-1, m * n)) return tf.tensordot(outer_product, mat)
Example #13
Source File: Model.py From stocknet-code with MIT License | 5 votes |
def _linear(self, args, output_size, activation=None, use_bias=True, use_bn=False): if type(args) not in (list, tuple): args = [args] shape = [a if a else -1 for a in args[0].get_shape().as_list()[:-1]] shape.append(output_size) sizes = [a.get_shape()[-1].value for a in args] total_arg_size = sum(sizes) scope = tf.get_variable_scope() x = args[0] if len(args) == 1 else tf.concat(args, -1) with tf.variable_scope(scope): weight = tf.get_variable('weight', [total_arg_size, output_size], dtype=tf.float32, initializer=self.initializer) res = tf.tensordot(x, weight, axes=1) if use_bias: bias = tf.get_variable('bias', [output_size], dtype=tf.float32, initializer=self.bias_initializer) res = tf.nn.bias_add(res, bias) res = tf.reshape(res, shape) if use_bn: res = batch_norm(res, center=True, scale=True, decay=0.99, updates_collections=None, is_training=self.is_training_phase, scope=scope) if activation == 'tanh': res = tf.nn.tanh(res) elif activation == 'sigmoid': res = tf.nn.sigmoid(res) elif activation == 'relu': res = tf.nn.relu(res) elif activation == 'softmax': res = tf.nn.softmax(res) return res
Example #14
Source File: layers.py From visil with Apache License 2.0 | 5 votes |
def __call__(self, logits): weights = tf.tensordot(logits, self.context_vector, axes=1) / 2.0 + 0.5 return tf.multiply(logits, weights), weights
Example #15
Source File: dnn_crf.py From DeepLearning_NLP with MIT License | 5 votes |
def get_output_layer(self, layer: tf.Tensor) -> tf.Tensor: output_weight = self.__get_variable([self.hidden_units, self.tags_count], 'output_weight') output_bias = self.__get_variable([1, 1, self.tags_count], 'output_bias') self.params += [output_weight, output_bias] return tf.tensordot(layer, output_weight, [[2], [0]]) + output_bias
Example #16
Source File: layers.py From visil with Apache License 2.0 | 5 votes |
def __call__(self, logits): logits = logits - self.mean logits = tf.tensordot(logits, self.weights, axes=1) return logits
Example #17
Source File: tpu.py From lasertagger with Apache License 2.0 | 5 votes |
def embedding_matmul(embedding_table, values, mask, name="embedding_matmul"): """Performs embedding lookup via a matmul. The matrix to be multiplied by the embedding table Tensor is constructed via an implementation of scatter based on broadcasting embedding indices and performing an equality comparison against a broadcasted range(num_embedding_table_rows). All masked positions will produce an embedding vector of zeros. Args: embedding_table: Tensor of embedding table. Rank 2 (table_size x embedding dim) values: Tensor of embedding indices. Rank 2 (batch x n_indices) mask: Tensor of mask / weights. Rank 2 (batch x n_indices) name: Optional name scope for created ops Returns: Rank 3 tensor of embedding vectors. """ with tf.name_scope(name): n_embeddings = embedding_table.get_shape().as_list()[0] batch_size, padded_size = values.shape.as_list() emb_idcs = tf.tile( tf.reshape(values, (batch_size, padded_size, 1)), (1, 1, n_embeddings)) emb_weights = tf.tile( tf.reshape(mask, (batch_size, padded_size, 1)), (1, 1, n_embeddings)) col_idcs = tf.tile( tf.reshape(tf.range(n_embeddings), (1, 1, n_embeddings)), (batch_size, padded_size, 1)) one_hot = tf.where( tf.equal(emb_idcs, col_idcs), emb_weights, tf.zeros((batch_size, padded_size, n_embeddings))) return tf.tensordot(one_hot, embedding_table, 1)
Example #18
Source File: utils.py From trRosetta with MIT License | 5 votes |
def reweight(msa1hot, cutoff): with tf.name_scope('reweight'): id_min = tf.cast(tf.shape(msa1hot)[1], tf.float32) * cutoff id_mtx = tf.tensordot(msa1hot, msa1hot, [[1,2], [1,2]]) id_mask = id_mtx > id_min w = 1.0/tf.reduce_sum(tf.cast(id_mask, dtype=tf.float32),-1) return w # shrunk covariance inversion
Example #19
Source File: utils.py From StegaStamp with MIT License | 5 votes |
def dct_8x8(image): image = image - 128 tensor = np.zeros((8, 8, 8, 8), dtype=np.float32) for x, y, u, v in itertools.product(range(8), repeat=4): tensor[x, y, u, v] = np.cos((2 * x + 1) * u * np.pi / 16) * np.cos( (2 * y + 1) * v * np.pi / 16) alpha = np.array([1. / np.sqrt(2)] + [1] * 7) scale = np.outer(alpha, alpha) * 0.25 result = scale * tf.tensordot(image, tensor, axes=2) result.set_shape(image.shape.as_list()) return result # 5. Quantizaztion
Example #20
Source File: tf_wrapper.py From SPFN with MIT License | 5 votes |
def fully_connected(scope, inputs, num_outputs, is_training, bn_decay=None, activation_fn=tf.nn.relu): # Input: inputs is ...xN # Returns: ...x[num_outputs] with tf.variable_scope(scope): weights = _variable_on_gpu('weights', [inputs.get_shape()[-1].value, num_outputs], initializer=tf.contrib.layers.xavier_initializer()) biases = _variable_on_gpu('biases', [num_outputs], initializer=tf.constant_initializer(0.0)) outputs = tf.nn.bias_add(tf.tensordot(inputs, weights, axes=[[-1], [0]]), biases) if bn_decay is not None: outputs = _batch_norm_simple('bn', outputs, is_training, bn_decay) if activation_fn is not None: outputs = activation_fn(outputs) return outputs
Example #21
Source File: HAN.py From TextSentimentClassification with MIT License | 5 votes |
def _encoder_attention(self, inputs,state_size,sequence_length,attention_dim,scope): with tf.variable_scope(scope,reuse=tf.AUTO_REUSE): batch_size=tf.shape(inputs)[0] seq_len=tf.shape(inputs)[1] cell_fw = tf.nn.rnn_cell.GRUCell(state_size,name="cell_fw", kernel_initializer=tf.glorot_uniform_initializer(), bias_initializer=tf.zeros_initializer()) cell_bw = tf.nn.rnn_cell.GRUCell(state_size,name="cell_bw", kernel_initializer=tf.glorot_uniform_initializer(), bias_initializer=tf.zeros_initializer()) if self.dropout_value is not None: cell_fw = tf.nn.rnn_cell.DropoutWrapper(cell_fw, output_keep_prob=1 - self.dropout) cell_bw = tf.nn.rnn_cell.DropoutWrapper(cell_bw, output_keep_prob=1 - self.dropout) (rnn_outputs_fw, rnn_outputs_bw), final_state = \ tf.nn.bidirectional_dynamic_rnn(cell_fw=cell_fw, cell_bw=cell_bw, inputs=inputs,sequence_length=sequence_length, dtype=tf.float32) rnn_outputs = tf.concat([rnn_outputs_fw,rnn_outputs_bw], axis=-1) W=tf.get_variable(name="weights_lt",shape=[2*state_size,attention_dim], dtype=tf.float32,initializer=tf.glorot_uniform_initializer()) b=tf.get_variable(name="biases_lt",shape=[attention_dim], dtype=tf.float32,initializer=tf.zeros_initializer()) final_rnn_outputs=tf.tensordot(rnn_outputs,W,axes=1)+b final_rnn_outputs=tf.reshape(final_rnn_outputs, shape=[batch_size,seq_len,attention_dim]) self.context_vector = tf.get_variable(name="context_vector", shape=(attention_dim,), dtype=tf.float32, initializer=tf.glorot_uniform_initializer()) tmp = tf.tensordot(final_rnn_outputs, self.context_vector, axes=1) tmp = tf.reshape(tmp, shape=[batch_size,seq_len]) tmp = tf.exp(tmp) self.attention_weights = tmp / tf.reduce_sum(tmp, reduction_indices=1, keepdims=True) self.attention_weights=tf.expand_dims(self.attention_weights,axis=-1) weighted_outputs = tf.reduce_sum(final_rnn_outputs*self.attention_weights,axis=1) final_weighted_outputs=tf.reshape(weighted_outputs,shape=[batch_size,attention_dim]) return final_weighted_outputs
Example #22
Source File: tpu.py From nsfw with Apache License 2.0 | 5 votes |
def embedding_matmul(embedding_table, values, mask, name="embedding_matmul"): """Performs embedding lookup via a matmul. The matrix to be multiplied by the embedding table Tensor is constructed via an implementation of scatter based on broadcasting embedding indices and performing an equality comparison against a broadcasted range(num_embedding_table_rows). All masked positions will produce an embedding vector of zeros. Args: embedding_table: Tensor of embedding table. Rank 2 (table_size x embedding dim) values: Tensor of embedding indices. Rank 2 (batch x n_indices) mask: Tensor of mask / weights. Rank 2 (batch x n_indices) name: Optional name scope for created ops Returns: Rank 3 tensor of embedding vectors. """ with tf.name_scope(name): n_embeddings = embedding_table.get_shape().as_list()[0] batch_size, padded_size = values.shape.as_list() emb_idcs = tf.tile( tf.reshape(values, (batch_size, padded_size, 1)), (1, 1, n_embeddings)) emb_weights = tf.tile( tf.reshape(mask, (batch_size, padded_size, 1)), (1, 1, n_embeddings)) col_idcs = tf.tile( tf.reshape(tf.range(n_embeddings), (1, 1, n_embeddings)), (batch_size, padded_size, 1)) one_hot = tf.where( tf.equal(emb_idcs, col_idcs), emb_weights, tf.zeros((batch_size, padded_size, n_embeddings))) return tf.tensordot(one_hot, embedding_table, 1)
Example #23
Source File: bayes_test.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def testBayesianLinearModel(self): """Tests that model makes reasonable predictions.""" np.random.seed(42) train_batch_size = 5 test_batch_size = 2 num_features = 3 noise_variance = 0.01 coeffs = tf.range(num_features, dtype=tf.float32) features = tf.to_float(np.random.randn(train_batch_size, num_features)) labels = (tf.tensordot(features, coeffs, [[-1], [0]]) + noise_variance * tf.to_float(np.random.randn(train_batch_size))) model = bayes.BayesianLinearModel(noise_variance=noise_variance) model.fit(features, labels) test_features = tf.to_float(np.random.randn(test_batch_size, num_features)) test_labels = tf.tensordot(test_features, coeffs, [[-1], [0]]) outputs = model(test_features) test_predictions = outputs.distribution.mean() test_predictions_variance = outputs.distribution.variance() [ test_labels_val, test_predictions_val, test_predictions_variance_val, ] = self.evaluate( [test_labels, test_predictions, test_predictions_variance]) self.assertEqual(test_predictions_val.shape, (test_batch_size,)) self.assertEqual(test_predictions_variance_val.shape, (test_batch_size,)) self.assertAllClose(test_predictions_val, test_labels_val, atol=0.1) self.assertAllLessEqual(test_predictions_variance_val, noise_variance)
Example #24
Source File: common_attention.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def compute_attention_component(antecedent, total_depth, filter_width=1, padding="VALID", name="c", vars_3d_num_heads=0): """Computes attention compoenent (query, key or value). Args: antecedent: a Tensor with shape [batch, length, channels] total_depth: an integer filter_width: An integer specifying how wide you want the attention component to be. padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. name: a string specifying scope name. vars_3d_num_heads: an optional integer (if we want to use 3d variables) Returns: c : [batch, length, depth] tensor """ if vars_3d_num_heads > 0: assert filter_width == 1 input_depth = antecedent.get_shape().as_list()[-1] depth_per_head = total_depth // vars_3d_num_heads initializer_stddev = input_depth ** -0.5 if "q" in name: initializer_stddev *= depth_per_head ** -0.5 var = tf.get_variable( name, [input_depth, vars_3d_num_heads, total_depth // vars_3d_num_heads], initializer=tf.random_normal_initializer(stddev=initializer_stddev)) var = tf.cast(var, antecedent.dtype) var = tf.reshape(var, [input_depth, total_depth]) return tf.tensordot(antecedent, var, axes=1) if filter_width == 1: return common_layers.dense( antecedent, total_depth, use_bias=False, name=name) else: return common_layers.conv1d( antecedent, total_depth, filter_width, padding=padding, name=name)
Example #25
Source File: common_layers.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def cumsum(x, axis=0, exclusive=False): """TPU hack for tf.cumsum. This is equivalent to tf.cumsum and is faster on TPU as of 04/2018 unless the axis dimension is very large. Args: x: a Tensor axis: an integer exclusive: a boolean Returns: Tensor of the same shape as x. """ if not is_xla_compiled(): return tf.cumsum(x, axis=axis, exclusive=exclusive) x_shape = shape_list(x) rank = len(x_shape) length = x_shape[axis] my_range = tf.range(length) comparator = tf.less if exclusive else tf.less_equal mask = tf.cast( comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)), x.dtype) ret = tf.tensordot(x, mask, axes=[[axis], [0]]) if axis != rank - 1: ret = tf.transpose( ret, list(range(axis)) + [rank - 1] + list(range(axis, rank - 1))) return ret
Example #26
Source File: vqvae.py From vqvae-speech with MIT License | 5 votes |
def _C(self, x): ''' Nearest neighbor (to z_emb) search `x`: [b, T, c] `z_e`: [K, c] ''' with tf.name_scope('Classifier'): similarity = tf.tensordot(x, self.z_emb, [[-1], [-1]]) tf.summary.histogram('x_dot_ze', similarity) z2 = tf.reduce_sum(tf.square(self.z_emb), axis=-1) # [K] tf.summary.histogram('z_norm_2', z2) tf.summary.histogram('z', self.z_emb) x2 = tf.reduce_sum(tf.square(x), axis=-1) # [b, T] tf.summary.histogram('x_norm_2', x2) tf.summary.histogram('x', x) dist2 = tf.nn.bias_add(- 2. * similarity, z2) + tf.expand_dims(x2, axis=-1) # x2 -2xz + z2 u, v = tf.nn.moments(x, axes=[-1]) tf.summary.histogram('x_mu', u) tf.summary.histogram('x_var', v) u, v = tf.nn.moments(self.z_emb, axes=[-1]) tf.summary.histogram('z_mu', u) tf.summary.histogram('z_var', v) z_ids = tf.argmin(dist2, axis=-1) tf.summary.histogram('z_ids', z_ids) return z_ids
Example #27
Source File: utils.py From StegaStamp with MIT License | 5 votes |
def idct_8x8(image): alpha = np.array([1. / np.sqrt(2)] + [1] * 7) alpha = np.outer(alpha, alpha) image = image * alpha tensor = np.zeros((8, 8, 8, 8), dtype=np.float32) for x, y, u, v in itertools.product(range(8), repeat=4): tensor[x, y, u, v] = np.cos((2 * u + 1) * x * np.pi / 16) * np.cos( (2 * v + 1) * y * np.pi / 16) result = 0.25 * tf.tensordot(image, tensor, axes=2) + 128 result.set_shape(image.shape.as_list()) return result # -3. Block joining
Example #28
Source File: loss.py From mobile-segmentation with Apache License 2.0 | 5 votes |
def lovasz_softmax_flat(probas, labels, classes='present'): """ Multi-class Lovasz-Softmax loss probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1) labels: [P] Tensor, ground truth labels (between 0 and C - 1) classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average. """ C = probas.shape[1] losses = [] present = [] class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes for c in class_to_sum: # foreground for class c fg = tf.cast(tf.equal(labels, c), probas.dtype) if classes == 'present': present.append(tf.reduce_sum(fg) > 0) if C == 1: if len(classes) > 1: raise ValueError('Sigmoid output possible only with 1 class') class_pred = probas[:, 0] else: class_pred = probas[:, c] errors = tf.abs(fg - class_pred) errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape( errors)[0], name="descending_sort_{}".format(c)) fg_sorted = tf.gather(fg, perm) grad = lovasz_grad(fg_sorted) losses.append( tf.tensordot(errors_sorted, tf.stop_gradient( grad), 1, name="loss_class_{}".format(c)) ) if len(class_to_sum) == 1: # short-circuit mean when only one class return losses[0] losses_tensor = tf.stack(losses) if classes == 'present': present = tf.stack(present) losses_tensor = tf.boolean_mask(losses_tensor, present) loss = tf.reduce_mean(losses_tensor) return loss
Example #29
Source File: loss.py From mobile-segmentation with Apache License 2.0 | 5 votes |
def lovasz_hinge_flat(logits, labels): """ Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\infty and +\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore """ def compute_loss(): labelsf = tf.cast(labels, logits.dtype) signs = 2. * labelsf - 1. errors = 1. - logits * tf.stop_gradient(signs) errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[ 0], name="descending_sort") gt_sorted = tf.gather(labelsf, perm) grad = lovasz_grad(gt_sorted) loss = tf.tensordot(tf.nn.relu(errors_sorted), tf.stop_gradient(grad), 1, name="loss_non_void") return loss # deal with the void prediction case (only void pixels) loss = tf.cond(tf.equal(tf.shape(logits)[0], 0), lambda: tf.reduce_sum(logits) * 0., compute_loss, strict=True, name="loss" ) return loss
Example #30
Source File: balanced_positive_negative_sampler.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 5 votes |
def _get_values_from_start_and_end(self, input_tensor, num_start_samples, num_end_samples, total_num_samples): """slices num_start_samples and last num_end_samples from input_tensor. Args: input_tensor: An int32 tensor of shape [N] to be sliced. num_start_samples: Number of examples to be sliced from the beginning of the input tensor. num_end_samples: Number of examples to be sliced from the end of the input tensor. total_num_samples: Sum of is num_start_samples and num_end_samples. This should be a scalar. Returns: A tensor containing the first num_start_samples and last num_end_samples from input_tensor. """ input_length = tf.shape(input_tensor)[0] start_positions = tf.less(tf.range(input_length), num_start_samples) end_positions = tf.greater_equal( tf.range(input_length), input_length - num_end_samples) selected_positions = tf.logical_or(start_positions, end_positions) selected_positions = tf.cast(selected_positions, tf.int32) indexed_positions = tf.multiply(tf.cumsum(selected_positions), selected_positions) one_hot_selector = tf.one_hot(indexed_positions - 1, total_num_samples, dtype=tf.int32) return tf.tensordot(input_tensor, one_hot_selector, axes=[0, 0])