Python tensorflow.transpose() Examples
The following are 30
code examples of tensorflow.transpose().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: box_list_ops.py From DOTA_models with Apache License 2.0 | 6 votes |
def intersection(boxlist1, boxlist2, scope=None): """Compute pairwise intersection areas between boxes. Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding M boxes scope: name scope. Returns: a tensor with shape [N, M] representing pairwise intersections """ with tf.name_scope(scope, 'Intersection'): y_min1, x_min1, y_max1, x_max1 = tf.split( value=boxlist1.get(), num_or_size_splits=4, axis=1) y_min2, x_min2, y_max2, x_max2 = tf.split( value=boxlist2.get(), num_or_size_splits=4, axis=1) all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2)) all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2)) intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin) all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2)) all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2)) intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin) return intersect_heights * intersect_widths
Example #2
Source File: util.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def train_lr_rfeinman(densities_pos, densities_neg, uncerts_pos, uncerts_neg): """ TODO :param densities_pos: :param densities_neg: :param uncerts_pos: :param uncerts_neg: :return: """ values_neg = np.concatenate( (densities_neg.reshape((1, -1)), uncerts_neg.reshape((1, -1))), axis=0).transpose([1, 0]) values_pos = np.concatenate( (densities_pos.reshape((1, -1)), uncerts_pos.reshape((1, -1))), axis=0).transpose([1, 0]) values = np.concatenate((values_neg, values_pos)) labels = np.concatenate( (np.zeros_like(densities_neg), np.ones_like(densities_pos))) lr = LogisticRegressionCV(n_jobs=-1).fit(values, labels) return values, labels, lr
Example #3
Source File: __init__.py From spleeter with MIT License | 6 votes |
def _inverse_stft(self, stft_t, time_crop=None): """ Inverse and reshape the given STFT :param stft_t: input STFT :returns: inverse STFT (waveform) """ inversed = inverse_stft( tf.transpose(stft_t, perm=[2, 0, 1]), self._frame_length, self._frame_step, window_fn=lambda frame_length, dtype: ( hann_window(frame_length, periodic=True, dtype=dtype)) ) * self.WINDOW_COMPENSATION_FACTOR reshaped = tf.transpose(inversed) if time_crop is None: time_crop = tf.shape(self._features['waveform'])[0] return reshaped[:time_crop, :]
Example #4
Source File: __init__.py From spleeter with MIT License | 6 votes |
def _build_stft_feature(self): """ Compute STFT of waveform and slice the STFT in segment with the right length to feed the network. """ stft_name = self.stft_name spec_name = self.spectrogram_name if stft_name not in self._features: stft_feature = tf.transpose( stft( tf.transpose(self._features['waveform']), self._frame_length, self._frame_step, window_fn=lambda frame_length, dtype: ( hann_window(frame_length, periodic=True, dtype=dtype)), pad_end=True), perm=[1, 2, 0]) self._features[f'{self._mix_name}_stft'] = stft_feature if spec_name not in self._features: self._features[spec_name] = tf.abs( pad_and_partition(self._features[stft_name], self._T))[:, :, :self._F, :]
Example #5
Source File: modalities.py From fine-lm with MIT License | 6 votes |
def top(self, body_output, _): num_channels = self._model_hparams.problem.num_channels num_frames = self._model_hparams.video_num_target_frames with tf.variable_scope("rgb_softmax"): body_output_shape = common_layers.shape_list(body_output) reshape_shape = body_output_shape[:3] reshape_shape.extend([num_channels, num_frames, self.top_dimensionality]) res = tf.layers.dense(body_output, self.top_dimensionality * num_channels * num_frames) res = tf.reshape(res, reshape_shape) res = tf.transpose(res, [0, 4, 1, 2, 3, 5]) if not tf.get_variable_scope().reuse: res_argmax = tf.argmax(res[:, -1, :, :, :, :], axis=-1) tf.summary.image( "result", common_layers.tpu_safe_image_summary(res_argmax), max_outputs=1) return res
Example #6
Source File: slicenet.py From fine-lm with MIT License | 6 votes |
def rank_loss(sentence_emb, image_emb, margin=0.2): """Experimental rank loss, thanks to kkurach@ for the code.""" with tf.name_scope("rank_loss"): # Normalize first as this is assumed in cosine similarity later. sentence_emb = tf.nn.l2_normalize(sentence_emb, 1) image_emb = tf.nn.l2_normalize(image_emb, 1) # Both sentence_emb and image_emb have size [batch, depth]. scores = tf.matmul(image_emb, tf.transpose(sentence_emb)) # [batch, batch] diagonal = tf.diag_part(scores) # [batch] cost_s = tf.maximum(0.0, margin - diagonal + scores) # [batch, batch] cost_im = tf.maximum( 0.0, margin - tf.reshape(diagonal, [-1, 1]) + scores) # [batch, batch] # Clear diagonals. batch_size = tf.shape(sentence_emb)[0] empty_diagonal_mat = tf.ones_like(cost_s) - tf.eye(batch_size) cost_s *= empty_diagonal_mat cost_im *= empty_diagonal_mat return tf.reduce_mean(cost_s) + tf.reduce_mean(cost_im)
Example #7
Source File: transformer_nat.py From fine-lm with MIT License | 6 votes |
def vq_nearest_neighbor(x, hparams): """Find the nearest element in means to elements in x.""" bottleneck_size = 2**hparams.bottleneck_bits means = hparams.means x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True) means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True) scalar_prod = tf.matmul(x, means, transpose_b=True) dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod if hparams.bottleneck_kind == "em": x_means_idx = tf.multinomial(-dist, num_samples=hparams.num_samples) x_means_hot = tf.one_hot( x_means_idx, depth=bottleneck_size) x_means_hot = tf.reduce_mean(x_means_hot, axis=1) else: x_means_idx = tf.argmax(-dist, axis=-1) x_means_hot = tf.one_hot(x_means_idx, depth=bottleneck_size) x_means = tf.matmul(x_means_hot, means) e_loss = tf.reduce_mean(tf.square(x - tf.stop_gradient(x_means))) return x_means_hot, e_loss
Example #8
Source File: layers.py From ARU-Net with GNU General Public License v2.0 | 6 votes |
def images_to_sequence(tensor): """Convert a batch of images into a batch of sequences. Args: tensor: a (num_images, height, width, depth) tensor Returns: (width, num_images*height, depth) sequence tensor """ transposed = tf.transpose(tensor, [2, 0, 1, 3]) shapeT = tf.shape(transposed) shapeL = transposed.get_shape().as_list() # Calculate the ouput size of the upsampled tensor n_shape = tf.stack([ shapeT[0], shapeT[1]*shapeT[2], shapeL[3] ]) reshaped = tf.reshape(transposed, n_shape) return reshaped
Example #9
Source File: neural_gpu.py From fine-lm with MIT License | 6 votes |
def neural_gpu_body(inputs, hparams, name=None): """The core Neural GPU.""" with tf.variable_scope(name, "neural_gpu"): def step(state, inp): # pylint: disable=missing-docstring x = tf.nn.dropout(state, 1.0 - hparams.dropout) for layer in range(hparams.num_hidden_layers): x = common_layers.conv_gru( x, (hparams.kernel_height, hparams.kernel_width), hparams.hidden_size, name="cgru_%d" % layer) # Padding input is zeroed-out in the modality, we check this by summing. padding_inp = tf.less(tf.reduce_sum(tf.abs(inp), axis=[1, 2]), 0.00001) new_state = tf.where(padding_inp, state, x) # No-op where inp is padding. return new_state return tf.foldl( step, tf.transpose(inputs, [1, 0, 2, 3]), initializer=inputs, parallel_iterations=1, swap_memory=True)
Example #10
Source File: seq2seq_attention_model.py From DOTA_models with Apache License 2.0 | 6 votes |
def decode_topk(self, sess, latest_tokens, enc_top_states, dec_init_states): """Return the topK results and new decoder states.""" feed = { self._enc_top_states: enc_top_states, self._dec_in_state: np.squeeze(np.array(dec_init_states)), self._abstracts: np.transpose(np.array([latest_tokens])), self._abstract_lens: np.ones([len(dec_init_states)], np.int32)} results = sess.run( [self._topk_ids, self._topk_log_probs, self._dec_out_state], feed_dict=feed) ids, probs, states = results[0], results[1], results[2] new_states = [s for s in states] return ids, probs, new_states
Example #11
Source File: shapes.py From DOTA_models with Apache License 2.0 | 6 votes |
def rotate_dimensions(num_dims, src_dim, dest_dim): """Returns a list of dimension indices that will rotate src_dim to dest_dim. src_dim is moved to dest_dim, with all intervening dimensions shifted towards the hole left by src_dim. Eg: num_dims = 4, src_dim=3, dest_dim=1 Returned list=[0, 3, 1, 2] For a tensor with dims=[5, 4, 3, 2] a transpose would yield [5, 2, 4, 3]. Args: num_dims: The number of dimensions to handle. src_dim: The dimension to move. dest_dim: The dimension to move src_dim to. Returns: A list of rotated dimension indices. """ # List of dimensions for transpose. dim_list = range(num_dims) # Shuffle src_dim to dest_dim by swapping to shuffle up the other dims. step = 1 if dest_dim > src_dim else -1 for x in xrange(src_dim, dest_dim, step): dim_list[x], dim_list[x + step] = dim_list[x + step], dim_list[x] return dim_list
Example #12
Source File: network_units.py From DOTA_models with Apache License 2.0 | 6 votes |
def convert_network_state_tensorarray(tensorarray): """Converts a source TensorArray to a source Tensor. Performs a permutation between the steps * [stride, D] shape of a source TensorArray and the (flattened) [stride * steps, D] shape of a source Tensor. The TensorArrays used during recurrence have an additional zeroth step that needs to be removed. Args: tensorarray: TensorArray object to be converted. Returns: Tensor object after conversion. """ tensor = tensorarray.stack() # Results in a [steps, stride, D] tensor. tensor = tf.slice(tensor, [1, 0, 0], [-1, -1, -1]) # Lop off the 0th step. tensor = tf.transpose(tensor, [1, 0, 2]) # Switch steps and stride. return tf.reshape(tensor, [-1, tf.shape(tensor)[2]])
Example #13
Source File: real_nvp_utils.py From DOTA_models with Apache License 2.0 | 6 votes |
def unsqueeze_2x2(input_): """Unsqueezing operation: reshape to convert channels into space.""" if isinstance(input_, (float, int)): return input_ shape = input_.get_shape().as_list() batch_size = shape[0] height = shape[1] width = shape[2] channels = shape[3] if channels % 4 != 0: raise ValueError("Number of channels not divisible by 4.") res = tf.reshape(input_, [batch_size, height, width, channels // 4, 2, 2]) res = tf.transpose(res, [0, 1, 4, 2, 5, 3]) res = tf.reshape(res, [batch_size, 2 * height, 2 * width, channels // 4]) return res # batch norm
Example #14
Source File: expert_utils.py From fine-lm with MIT License | 6 votes |
def __init__(self, num_experts, gates): """Create a SparseDispatcher. Args: num_experts: an integer. gates: a `Tensor` of shape `[batch_size, num_experts]`. Returns: a SparseDispatcher """ self._gates = gates self._num_experts = num_experts where = tf.to_int32(tf.where(tf.transpose(gates) > 0)) self._expert_index, self._batch_index = tf.unstack(where, num=2, axis=1) self._part_sizes_tensor = tf.reduce_sum(tf.to_int32(gates > 0), [0]) self._nonzero_gates = tf.gather( tf.reshape(self._gates, [-1]), self._batch_index * num_experts + self._expert_index)
Example #15
Source File: layers.py From ARU-Net with GNU General Public License v2.0 | 6 votes |
def sequence_to_images(tensor, num_batches): """Convert a batch of sequences into a batch of images. Args: tensor: (num_steps, num_batchesRNN, depth) sequence tensor num_batches: the number of image batches Returns: (num_batches, height, width, depth) tensor """ shapeT = tf.shape(tensor) shapeL = tensor.get_shape().as_list() # Calculate the ouput size of the upsampled tensor height = tf.to_int32(shapeT[1] / num_batches) n_shape = tf.stack([ shapeT[0], num_batches, height, shapeL[2] ]) reshaped = tf.reshape(tensor, n_shape) return tf.transpose(reshaped, [1, 2, 0, 3])
Example #16
Source File: model.py From DOTA_models with Apache License 2.0 | 6 votes |
def compute_first_or_last(self, select, first=True): #perform first ot last operation on row select with probabilistic row selection answer = tf.zeros_like(select) running_sum = tf.zeros([self.batch_size, 1], self.data_type) for i in range(self.max_elements): if (first): current = tf.slice(select, [0, i], [self.batch_size, 1]) else: current = tf.slice(select, [0, self.max_elements - 1 - i], [self.batch_size, 1]) curr_prob = current * (1 - running_sum) curr_prob = curr_prob * tf.cast(curr_prob >= 0.0, self.data_type) running_sum += curr_prob temp_ans = [] curr_prob = tf.expand_dims(tf.reshape(curr_prob, [self.batch_size]), 0) for i_ans in range(self.max_elements): if (not (first) and i_ans == self.max_elements - 1 - i): temp_ans.append(curr_prob) elif (first and i_ans == i): temp_ans.append(curr_prob) else: temp_ans.append(tf.zeros_like(curr_prob)) temp_ans = tf.transpose(tf.concat(axis=0, values=temp_ans)) answer += temp_ans return answer
Example #17
Source File: common_attention.py From fine-lm with MIT License | 5 votes |
def _relative_attention_inner(x, y, z, transpose): """Relative position-aware dot-product attention inner calculation. This batches matrix multiply calculations to avoid unnecessary broadcasting. Args: x: Tensor with shape [batch_size, heads, length, length or depth]. y: Tensor with shape [batch_size, heads, length, depth]. z: Tensor with shape [length, length, depth]. transpose: Whether to transpose inner matrices of y and z. Should be true if last dimension of x is depth, not length. Returns: A Tensor with shape [batch_size, heads, length, length or depth]. """ batch_size = tf.shape(x)[0] heads = x.get_shape().as_list()[1] length = tf.shape(x)[2] # xy_matmul is [batch_size, heads, length, length or depth] xy_matmul = tf.matmul(x, y, transpose_b=transpose) # x_t is [length, batch_size, heads, length or depth] x_t = tf.transpose(x, [2, 0, 1, 3]) # x_t_r is [length, batch_size * heads, length or depth] x_t_r = tf.reshape(x_t, [length, heads * batch_size, -1]) # x_tz_matmul is [length, batch_size * heads, length or depth] x_tz_matmul = tf.matmul(x_t_r, z, transpose_b=transpose) # x_tz_matmul_r is [length, batch_size, heads, length or depth] x_tz_matmul_r = tf.reshape(x_tz_matmul, [length, batch_size, heads, -1]) # x_tz_matmul_r_t is [batch_size, heads, length, length or depth] x_tz_matmul_r_t = tf.transpose(x_tz_matmul_r, [1, 2, 0, 3]) return xy_matmul + x_tz_matmul_r_t
Example #18
Source File: common_layers.py From fine-lm with MIT License | 5 votes |
def running_global_pool_1d(inputs, pooling_type="MAX"): """Same global pool, but only for the elements up to the current element. Useful for outputs where the state of future elements is not known. Takes no mask as all elements up to the current element are assumed to exist. Currently only supports maximum. Equivalent to using a lower triangle bias. Args: inputs: A tensor of shape [batch_size, sequence_length, input_dims] containing the sequences of input vectors. pooling_type: Pooling type to use. Currently only supports 'MAX'. Returns: A tensor of shape [batch_size, sequence_length, input_dims] containing the running 'totals'. """ del pooling_type with tf.name_scope("running_global_pool", values=[inputs]): scan_fct = tf.maximum # Permute inputs so seq_length is first. elems = tf.transpose(inputs, [1, 0, 2]) # Perform scan. cumulatives = tf.scan(scan_fct, elems, swap_memory=True) # Permute output to get back to original order. output = tf.transpose(cumulatives, [1, 0, 2]) return output
Example #19
Source File: utility.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def lambda_return(reward, value, length, discount, lambda_): """TD-lambda returns.""" timestep = tf.range(reward.shape[1].value) mask = tf.cast(timestep[None, :] < length[:, None], tf.float32) sequence = mask * reward + discount * value * (1 - lambda_) discount = mask * discount * lambda_ sequence = tf.stack([sequence, discount], 2) return_ = tf.reverse(tf.transpose(tf.scan( lambda agg, cur: cur[0] + cur[1] * agg, tf.transpose(tf.reverse(sequence, [1]), [1, 2, 0]), tf.zeros_like(value[:, -1]), 1, False), [1, 0]), [1]) return tf.check_numerics(tf.stop_gradient(return_), 'return')
Example #20
Source File: common_attention.py From fine-lm with MIT License | 5 votes |
def combine_heads_2d(x): """Inverse of split_heads_2d. Args: x: a Tensor with shape [batch, num_heads, height, width, channels / num_heads] Returns: a Tensor with shape [batch, height, width, channels] """ return combine_last_two_dimensions(tf.transpose(x, [0, 2, 3, 1, 4]))
Example #21
Source File: utility.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def discounted_return(reward, length, discount): """Discounted Monte-Carlo returns.""" timestep = tf.range(reward.shape[1].value) mask = tf.cast(timestep[None, :] < length[:, None], tf.float32) return_ = tf.reverse(tf.transpose(tf.scan( lambda agg, cur: cur + discount * agg, tf.transpose(tf.reverse(mask * reward, [1]), [1, 0]), tf.zeros_like(reward[:, -1]), 1, False), [1, 0]), [1]) return tf.check_numerics(tf.stop_gradient(return_), 'return')
Example #22
Source File: utility.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def lambda_advantage(reward, value, length, discount): """Generalized Advantage Estimation.""" timestep = tf.range(reward.shape[1].value) mask = tf.cast(timestep[None, :] < length[:, None], tf.float32) next_value = tf.concat([value[:, 1:], tf.zeros_like(value[:, -1:])], 1) delta = reward + discount * next_value - value advantage = tf.reverse(tf.transpose(tf.scan( lambda agg, cur: cur + discount * agg, tf.transpose(tf.reverse(mask * delta, [1]), [1, 0]), tf.zeros_like(delta[:, -1]), 1, False), [1, 0]), [1]) return tf.check_numerics(tf.stop_gradient(advantage), 'advantage')
Example #23
Source File: utility.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def lambda_return(reward, value, length, discount, lambda_): """TD-lambda returns.""" timestep = tf.range(reward.shape[1].value) mask = tf.cast(timestep[None, :] < length[:, None], tf.float32) sequence = mask * reward + discount * value * (1 - lambda_) discount = mask * discount * lambda_ sequence = tf.stack([sequence, discount], 2) return_ = tf.reverse(tf.transpose(tf.scan( lambda agg, cur: cur[0] + cur[1] * agg, tf.transpose(tf.reverse(sequence, [1]), [1, 2, 0]), tf.zeros_like(value[:, -1]), 1, False), [1, 0]), [1]) return tf.check_numerics(tf.stop_gradient(return_), 'return')
Example #24
Source File: common_attention.py From fine-lm with MIT License | 5 votes |
def _generate_relative_positions_matrix(length, max_relative_position): """Generates matrix of relative positions between inputs.""" range_vec = tf.range(length) range_mat = tf.reshape(tf.tile(range_vec, [length]), [length, length]) distance_mat = range_mat - tf.transpose(range_mat) distance_mat_clipped = tf.clip_by_value(distance_mat, -max_relative_position, max_relative_position) # Shift values to be >= 0. Each integer still uniquely identifies a relative # position difference. final_mat = distance_mat_clipped + max_relative_position return final_mat
Example #25
Source File: utility.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def discounted_return(reward, length, discount): """Discounted Monte-Carlo returns.""" timestep = tf.range(reward.shape[1].value) mask = tf.cast(timestep[None, :] < length[:, None], tf.float32) return_ = tf.reverse(tf.transpose(tf.scan( lambda agg, cur: cur + discount * agg, tf.transpose(tf.reverse(mask * reward, [1]), [1, 0]), tf.zeros_like(reward[:, -1]), 1, False), [1, 0]), [1]) return tf.check_numerics(tf.stop_gradient(return_), 'return')
Example #26
Source File: utils.py From DOTA_models with Apache License 2.0 | 5 votes |
def compute_pairwise_distances(x, y): """Computes the squared pairwise Euclidean distances between x and y. Args: x: a tensor of shape [num_x_samples, num_features] y: a tensor of shape [num_y_samples, num_features] Returns: a distance matrix of dimensions [num_x_samples, num_y_samples]. Raises: ValueError: if the inputs do no matched the specified dimensions. """ if not len(x.get_shape()) == len(y.get_shape()) == 2: raise ValueError('Both inputs should be matrices.') if x.get_shape().as_list()[1] != y.get_shape().as_list()[1]: raise ValueError('The number of features should be the same.') norm = lambda x: tf.reduce_sum(tf.square(x), 1) # By making the `inner' dimensions of the two matrices equal to 1 using # broadcasting then we are essentially substracting every pair of rows # of x and y. # x will be num_samples x num_features x 1, # and y will be 1 x num_features x num_samples (after broadcasting). # After the substraction we will get a # num_x_samples x num_features x num_y_samples matrix. # The resulting dist will be of shape num_y_samples x num_x_samples. # and thus we need to transpose it again. return tf.transpose(norm(tf.expand_dims(x, 2) - tf.transpose(y)))
Example #27
Source File: objective.py From DOTA_models with Apache License 2.0 | 5 votes |
def discounted_two_sided_sum(values, discount, rollout): """Discounted two-sided sum of time-major values.""" roll = float(rollout) discount_filter = tf.reshape( discount ** tf.abs(tf.range(-roll + 1, roll)), [-1, 1, 1]) expanded_values = tf.concat( [tf.zeros([rollout - 1, tf.shape(values)[1]]), values, tf.zeros([rollout - 1, tf.shape(values)[1]])], 0) conv_values = tf.transpose(tf.squeeze(tf.nn.conv1d( tf.expand_dims(tf.transpose(expanded_values), -1), discount_filter, stride=1, padding='VALID'), -1)) return conv_values
Example #28
Source File: objective.py From DOTA_models with Apache License 2.0 | 5 votes |
def discounted_future_sum(values, discount, rollout): """Discounted future sum of time-major values.""" discount_filter = tf.reshape( discount ** tf.range(float(rollout)), [-1, 1, 1]) expanded_values = tf.concat( [values, tf.zeros([rollout - 1, tf.shape(values)[1]])], 0) conv_values = tf.transpose(tf.squeeze(tf.nn.conv1d( tf.expand_dims(tf.transpose(expanded_values), -1), discount_filter, stride=1, padding='VALID'), -1)) return conv_values
Example #29
Source File: box_list_ops.py From DOTA_models with Apache License 2.0 | 5 votes |
def sq_dist(boxlist1, boxlist2, scope=None): """Computes the pairwise squared distances between box corners. This op treats each box as if it were a point in a 4d Euclidean space and computes pairwise squared distances. Mathematically, we are given two matrices of box coordinates X and Y, where X(i,:) is the i'th row of X, containing the 4 numbers defining the corners of the i'th box in boxlist1. Similarly Y(j,:) corresponds to boxlist2. We compute Z(i,j) = ||X(i,:) - Y(j,:)||^2 = ||X(i,:)||^2 + ||Y(j,:)||^2 - 2 X(i,:)' * Y(j,:), Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding M boxes scope: name scope. Returns: a tensor with shape [N, M] representing pairwise distances """ with tf.name_scope(scope, 'SqDist'): sqnorm1 = tf.reduce_sum(tf.square(boxlist1.get()), 1, keep_dims=True) sqnorm2 = tf.reduce_sum(tf.square(boxlist2.get()), 1, keep_dims=True) innerprod = tf.matmul(boxlist1.get(), boxlist2.get(), transpose_a=False, transpose_b=True) return sqnorm1 + tf.transpose(sqnorm2) - 2.0 * innerprod
Example #30
Source File: common_attention.py From fine-lm with MIT License | 5 votes |
def gather_blocks_2d(x, indices): """Gathers flattened blocks from x.""" x_shape = common_layers.shape_list(x) x = reshape_range(x, 2, 4, [tf.reduce_prod(x_shape[2:4])]) # [length, batch, heads, dim] x_t = tf.transpose(x, [2, 0, 1, 3]) x_new = tf.gather(x_t, indices) # returns [batch, heads, num_blocks, block_length ** 2, dim] return tf.transpose(x_new, [2, 3, 0, 1, 4])