Python tensorflow.gather_nd() Examples
The following are 30
code examples of tensorflow.gather_nd().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: misc_test.py From model-optimization with Apache License 2.0 | 6 votes |
def test_empty_input_dynamic(self): # Tests that the encoding works when the input shape is [0], but not # statically known. y = tf.zeros((10,)) indices = tf.compat.v2.where(tf.abs(y) > 1e-8) x = tf.gather_nd(y, indices) x = tf.cast(x, tf.int32) # Empty tensor. assert x.shape.as_list() == [None] stage = self.default_encoding_stage() encode_params, decode_params = stage.get_params() encoded_x, decoded_x = self.encode_decode_x(stage, x, encode_params, decode_params) test_data = self.evaluate_test_data( test_utils.TestData(x, encoded_x, decoded_x)) assert test_data.x.shape == (0,) assert test_data.encoded_x[stage.ENCODED_VALUES_KEY].shape == (0,) assert test_data.decoded_x.shape == (0,)
Example #2
Source File: loss_utils.py From BERT with Apache License 2.0 | 6 votes |
def focal_loss_multi_v1(config, logits, labels): gamma = config.get("gamma", 2.0) labels = tf.cast(tf.expand_dims(labels, -1), tf.int32) predictions = tf.exp(tf.nn.log_softmax(logits, axis=-1)) batch_idxs = tf.range(0, tf.shape(labels)[0]) batch_idxs = tf.expand_dims(batch_idxs, 1) idxs = tf.concat([batch_idxs, labels], 1) y_true_pred = tf.gather_nd(predictions, idxs) labels = tf.cast(tf.squeeze(labels, axis=-1), tf.float32) losses = tf.log(y_true_pred+EPSILON) * tf.pow(1-y_true_pred, gamma) return -losses, y_true_pred
Example #3
Source File: loss_utils.py From BERT with Apache License 2.0 | 6 votes |
def class_balanced_focal_loss_multi_v1(config, logits, labels, label_weights): gamma = config.get("gamma", 2.0) class_balanced_weights = tf.gather(label_weights, labels) labels = tf.cast(tf.expand_dims(labels, -1), tf.int32) predictions = tf.exp(tf.nn.log_softmax(logits, axis=-1)) batch_idxs = tf.range(0, tf.shape(labels)[0]) batch_idxs = tf.expand_dims(batch_idxs, 1) idxs = tf.concat([batch_idxs, labels], 1) y_true_pred = tf.gather_nd(predictions, idxs) losses = tf.log(y_true_pred+EPSILON) * tf.pow(1-y_true_pred, gamma) * class_balanced_weights return -losses, predictions
Example #4
Source File: common_layers.py From fine-lm with MIT License | 6 votes |
def argmax_with_score(logits, axis=None): """Argmax along with the value.""" axis = axis or len(logits.get_shape()) - 1 predictions = tf.argmax(logits, axis=axis) logits_shape = shape_list(logits) prefix_shape, vocab_size = logits_shape[:-1], logits_shape[-1] prefix_size = 1 for d in prefix_shape: prefix_size *= d # Flatten to extract scores flat_logits = tf.reshape(logits, [prefix_size, vocab_size]) flat_predictions = tf.reshape(predictions, [prefix_size]) flat_indices = tf.stack( [tf.range(tf.to_int64(prefix_size)), tf.to_int64(flat_predictions)], axis=1) flat_scores = tf.gather_nd(flat_logits, flat_indices) # Unflatten scores = tf.reshape(flat_scores, prefix_shape) return predictions, scores
Example #5
Source File: categorical_calibration_layer.py From lattice with Apache License 2.0 | 6 votes |
def call(self, inputs): """Standard Keras call() method.""" if inputs.dtype not in [tf.uint8, tf.int32, tf.int64]: inputs = tf.cast(inputs, dtype=tf.int32) if self.default_input_value is not None: default_input_value_tensor = tf.constant( int(self.default_input_value), dtype=inputs.dtype, name=DEFAULT_INPUT_VALUE_NAME) replacement = tf.zeros_like(inputs) + (self.num_buckets - 1) inputs = tf.where( tf.equal(inputs, default_input_value_tensor), replacement, inputs) # We can't use tf.gather_nd(self.kernel, inputs) as it doesn't support # constraints (constraint functions are not supported for IndexedSlices). # Instead we use matrix multiplication by one-hot encoding of the index. if self.units == 1: # This can be slightly faster as it uses matmul. return tf.matmul( tf.one_hot(tf.squeeze(inputs, axis=[-1]), depth=self.num_buckets), self.kernel) return tf.reduce_sum( tf.one_hot(inputs, axis=1, depth=self.num_buckets) * self.kernel, axis=1)
Example #6
Source File: select_dim_value.py From post--memorization-in-rnns with MIT License | 6 votes |
def select_dim_value(x, indices, name=None): with tf.name_scope(name, "select-dim-value", values=[x, indices]): # x.shape = (rest..., dims) rest = tf.shape(x)[:-1] dims = tf.shape(x)[-1] size = tf.size(indices, out_type=indices.dtype) # reshape to (size, dims) t = tf.reshape(x, shape=[-1, dims]) # then index as ([1,2,3,...,size], indices.ravel()) nd_indices = tf.stack([ tf.range(0, size, dtype=indices.dtype), tf.reshape(indices, shape=[-1]) ], axis=1) t = tf.gather_nd(t, indices=nd_indices) # reshape back to (rest...) t = tf.reshape(t, rest) t.set_shape(x.get_shape()[:-1]) return t
Example #7
Source File: expert_utils.py From fine-lm with MIT License | 6 votes |
def remove(self, x): """Remove padding from the given tensor. Args: x (tf.Tensor): of shape [dim_origin,...] Returns: a tensor of shape [dim_compressed,...] with dim_compressed <= dim_origin """ with tf.name_scope("pad_reduce/remove"): x_shape = x.get_shape().as_list() x = tf.gather_nd( x, indices=self.nonpad_ids, ) if not tf.contrib.eager.in_eager_mode(): # This is a hack but for some reason, gather_nd return a tensor of # undefined shape, so the shape is set up manually x.set_shape([None] + x_shape[1:]) return x
Example #8
Source File: keras_words_subtoken_metrics.py From code2vec with MIT License | 6 votes |
def _get_prediction_from_topk(self, topk_predicted_words): # apply given filter masks = [] if self.predicted_words_filters is not None: masks = [fltr(topk_predicted_words) for fltr in self.predicted_words_filters] if masks: # assert all(mask.shape.assert_is_compatible_with(top_k_pred_indices) for mask in masks) legal_predicted_target_words_mask = reduce(tf.logical_and, masks) else: legal_predicted_target_words_mask = tf.cast(tf.ones_like(topk_predicted_words), dtype=tf.bool) # the first legal predicted word is our prediction first_legal_predicted_target_word_mask = common.tf_get_first_true(legal_predicted_target_words_mask) first_legal_predicted_target_word_idx = tf.where(first_legal_predicted_target_word_mask) first_legal_predicted_word_string = tf.gather_nd(topk_predicted_words, first_legal_predicted_target_word_idx) prediction = tf.reshape(first_legal_predicted_word_string, [-1]) return prediction
Example #9
Source File: keras_word_prediction_layer.py From code2vec with MIT License | 6 votes |
def call(self, y_pred, **kwargs): y_pred.shape.assert_has_rank(2) top_k_pred_indices = tf.cast(tf.nn.top_k(y_pred, k=self.top_k).indices, dtype=self.index_to_word_table.key_dtype) predicted_target_words_strings = self.index_to_word_table.lookup(top_k_pred_indices) # apply given filter masks = [] if self.predicted_words_filters is not None: masks = [fltr(top_k_pred_indices, predicted_target_words_strings) for fltr in self.predicted_words_filters] if masks: # assert all(mask.shape.assert_is_compatible_with(top_k_pred_indices) for mask in masks) legal_predicted_target_words_mask = reduce(tf.logical_and, masks) else: legal_predicted_target_words_mask = tf.cast(tf.ones_like(top_k_pred_indices), dtype=tf.bool) # the first legal predicted word is our prediction first_legal_predicted_target_word_mask = common.tf_get_first_true(legal_predicted_target_words_mask) first_legal_predicted_target_word_idx = tf.where(first_legal_predicted_target_word_mask) first_legal_predicted_word_string = tf.gather_nd(predicted_target_words_strings, first_legal_predicted_target_word_idx) prediction = tf.reshape(first_legal_predicted_word_string, [-1]) return prediction
Example #10
Source File: model.py From dataiku-contrib with Apache License 2.0 | 6 votes |
def rpn_class_loss_graph(rpn_match, rpn_class_logits): """RPN anchor classifier loss. rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive, -1=negative, 0=neutral anchor. rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG. """ # Squeeze last dim to simplify rpn_match = tf.squeeze(rpn_match, -1) # Get anchor classes. Convert the -1/+1 match to 0/1 values. anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32) # Positive and Negative anchors contribute to the loss, # but neutral anchors (match value = 0) don't. indices = tf.where(K.not_equal(rpn_match, 0)) # Pick rows that contribute to the loss and filter out the rest. rpn_class_logits = tf.gather_nd(rpn_class_logits, indices) anchor_class = tf.gather_nd(anchor_class, indices) # Cross entropy loss loss = K.sparse_categorical_crossentropy(target=anchor_class, output=rpn_class_logits, from_logits=True) loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0)) return loss
Example #11
Source File: layers.py From PADME with MIT License | 6 votes |
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs): inputs = self._get_input_tensors(in_layers) if self.indices is None: if len(inputs) != 2: raise ValueError("Must have two parents") indices = inputs[1] if self.indices is not None: if len(inputs) != 1: raise ValueError("Must have one parent") indices = self.indices parent_tensor = inputs[0] out_tensor = tf.gather_nd(parent_tensor, indices) if set_tensors: self.out_tensor = out_tensor return out_tensor # TODO: I didn't change this class.
Example #12
Source File: evaluate_model_utils.py From lanenet-lane-detection with Apache License 2.0 | 6 votes |
def calculate_model_fp(input_tensor, label_tensor): """ calculate fp figure :param input_tensor: :param label_tensor: :return: """ logits = tf.nn.softmax(logits=input_tensor) final_output = tf.expand_dims(tf.argmax(logits, axis=-1), axis=-1) idx = tf.where(tf.equal(final_output, 1)) pix_cls_ret = tf.gather_nd(final_output, idx) false_pred = tf.cast(tf.shape(pix_cls_ret)[0], tf.int64) - tf.count_nonzero( tf.gather_nd(label_tensor, idx) ) return tf.divide(false_pred, tf.cast(tf.shape(pix_cls_ret)[0], tf.int64))
Example #13
Source File: expert_utils.py From BERT with Apache License 2.0 | 6 votes |
def remove(self, x): """Remove padding from the given tensor. Args: x (tf.Tensor): of shape [dim_origin,...] Returns: a tensor of shape [dim_compressed,...] with dim_compressed <= dim_origin """ with tf.name_scope("pad_reduce/remove"): x_shape = x.get_shape().as_list() x = tf.gather_nd( x, indices=self.nonpad_ids, ) if not tf.executing_eagerly(): # This is a hack but for some reason, gather_nd return a tensor of # undefined shape, so the shape is set up manually x.set_shape([None] + x_shape[1:]) return x
Example #14
Source File: layers.py From Pixel2MeshPlusPlus with BSD 3-Clause "New" or "Revised" License | 6 votes |
def bi_linear_sample(self, img_feat, n, x, y): x1 = tf.floor(x) x2 = tf.ceil(x) y1 = tf.floor(y) y2 = tf.ceil(y) Q11 = tf.gather_nd(img_feat, tf.stack([n, tf.cast(x1, tf.int32), tf.cast(y1, tf.int32)], 1)) Q12 = tf.gather_nd(img_feat, tf.stack([n, tf.cast(x1, tf.int32), tf.cast(y2, tf.int32)], 1)) Q21 = tf.gather_nd(img_feat, tf.stack([n, tf.cast(x2, tf.int32), tf.cast(y1, tf.int32)], 1)) Q22 = tf.gather_nd(img_feat, tf.stack([n, tf.cast(x2, tf.int32), tf.cast(y2, tf.int32)], 1)) weights = tf.multiply(tf.subtract(x2, x), tf.subtract(y2, y)) Q11 = tf.multiply(tf.expand_dims(weights, 1), Q11) weights = tf.multiply(tf.subtract(x, x1), tf.subtract(y2, y)) Q21 = tf.multiply(tf.expand_dims(weights, 1), Q21) weights = tf.multiply(tf.subtract(x2, x), tf.subtract(y, y1)) Q12 = tf.multiply(tf.expand_dims(weights, 1), Q12) weights = tf.multiply(tf.subtract(x, x1), tf.subtract(y, y1)) Q22 = tf.multiply(tf.expand_dims(weights, 1), Q22) outputs = tf.add_n([Q11, Q21, Q12, Q22]) return outputs
Example #15
Source File: model_utils.py From Youtube-8M-WILLOW with Apache License 2.0 | 6 votes |
def SampleRandomFrames(model_input, num_frames, num_samples): """Samples a random set of frames of size num_samples. Args: model_input: A tensor of size batch_size x max_frames x feature_size num_frames: A tensor of size batch_size x 1 num_samples: A scalar Returns: `model_input`: A tensor of size batch_size x num_samples x feature_size """ batch_size = tf.shape(model_input)[0] frame_index = tf.cast( tf.multiply( tf.random_uniform([batch_size, num_samples]), tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32) batch_index = tf.tile( tf.expand_dims(tf.range(batch_size), 1), [1, num_samples]) index = tf.stack([batch_index, frame_index], 2) return tf.gather_nd(model_input, index)
Example #16
Source File: core.py From lm-human-preferences with MIT License | 6 votes |
def take_top_p_logits(logits, p): """Nucleus sampling""" batch, sequence, _ = logits.shape.as_list() sorted_logits = tf.sort(logits, direction='DESCENDING', axis=-1) cumulative_probs = tf.cumsum(tf.nn.softmax(sorted_logits, axis=-1), axis=-1) indices = tf.stack([ tf.range(0, batch)[:, tf.newaxis], tf.range(0, sequence)[tf.newaxis, :], # number of indices to include tf.maximum(tf.reduce_sum(tf.cast(cumulative_probs <= p, tf.int32), axis=-1) - 1, 0), ], axis=-1) min_values = tf.gather_nd(sorted_logits, indices) return tf.where( logits < min_values, tf.ones_like(logits) * -1e10, logits, )
Example #17
Source File: core.py From lm-human-preferences with MIT License | 6 votes |
def index_each(a, ix): """Do a batched indexing operation: index row i of a by ix[i] In the simple case (a is >=2D and ix is 1D), returns [row[i] for row, i in zip(a, ix)]. If ix has more dimensions, multiple lookups will be done at each batch index. For instance, if ix is 2D, returns [[row[i] for i in ix_row] for row, ix_row in zip(a, ix)]. Always indexes into dimension 1 of a. """ a = tf.convert_to_tensor(a, name='a') ix = tf.convert_to_tensor(ix, name='ix', dtype=tf.int32) with tf.name_scope('index_each', values=[a, ix]) as scope: a.shape[:1].assert_is_compatible_with(ix.shape[:1]) i0 = tf.range(tf.shape(a)[0], dtype=ix.dtype) if ix.shape.rank > 1: i0 = tf.tile(tf.reshape(i0, (-1,) + (1,)*(ix.shape.rank - 1)), tf.concat([[1], tf.shape(ix)[1:]], axis=0)) return tf.gather_nd(a, tf.stack([i0, ix], axis=-1), name=scope)
Example #18
Source File: model.py From PanopticSegmentation with MIT License | 6 votes |
def rpn_class_loss_graph(rpn_match, rpn_class_logits): """RPN anchor classifier loss. rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive, -1=negative, 0=neutral anchor. rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG. """ # Squeeze last dim to simplify rpn_match = tf.squeeze(rpn_match, -1) # Get anchor classes. Convert the -1/+1 match to 0/1 values. anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32) # Positive and Negative anchors contribute to the loss, # but neutral anchors (match value = 0) don't. indices = tf.where(K.not_equal(rpn_match, 0)) # Pick rows that contribute to the loss and filter out the rest. rpn_class_logits = tf.gather_nd(rpn_class_logits, indices) anchor_class = tf.gather_nd(anchor_class, indices) # Cross entropy loss loss = K.sparse_categorical_crossentropy(target=anchor_class, output=rpn_class_logits, from_logits=True) loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0)) return loss
Example #19
Source File: LookupConvolution2d.py From tf-lcnn with GNU General Public License v3.0 | 6 votes |
def extract_dense_weights(sess): for key in dense_layers.keys(): layer = dense_layers[key] # sparse kernel dense_kernel = layer.kernel dense_kernel_shape = dense_kernel.get_shape().as_list() # dense_kernel = tf.reshape(dense_kernel, [dense_kernel_shape[0] * dense_kernel_shape[1] * dense_kernel_shape[2], # dense_kernel_shape[3]]) # dense_kernel = tf.transpose(dense_kernel) idx = tf.where(tf.not_equal(dense_kernel, 0)) sparse_kernel = tf.SparseTensor(idx, tf.gather_nd(dense_kernel, idx), dense_kernel.get_shape()) if layer.bias is not None: dk, k, b = sess.run([dense_kernel, sparse_kernel, layer.bias]) else: dk, k = sess.run([dense_kernel, sparse_kernel]) b = None dense_weights['%s/%s' % (key, 'kernel_dense')] = dk dense_weights['%s/%s' % (key, 'kernel')] = k dense_weights['%s/%s' % (key, 'kernel_shape')] = dense_kernel_shape dense_weights['%s/%s' % (key, 'bias')] = b
Example #20
Source File: transformer_utils.py From Counterfactual-StoryRW with MIT License | 6 votes |
def remove(self, x): """Remove padding from the given tensor. Args: x: A Tensor of shape [dim_origin,...] Returns: A tensor of shape [dim_compressed,...] with dim_compressed <= dim_origin """ with tf.name_scope("pad_reduce/remove"): x_shape = x.get_shape().as_list() x = tf.gather_nd( x, indices=self.nonpad_ids, ) #if not context.in_eager_mode(): # This is a hack but for some reason, gather_nd return a tensor of # undefined shape, so the shape is set up manually x.set_shape([None] + x_shape[1:]) return x
Example #21
Source File: evaluate_model_utils.py From lanenet-lane-detection with Apache License 2.0 | 6 votes |
def calculate_model_fn(input_tensor, label_tensor): """ calculate fn figure :param input_tensor: :param label_tensor: :return: """ logits = tf.nn.softmax(logits=input_tensor) final_output = tf.expand_dims(tf.argmax(logits, axis=-1), axis=-1) idx = tf.where(tf.equal(label_tensor, 1)) pix_cls_ret = tf.gather_nd(final_output, idx) label_cls_ret = tf.gather_nd(label_tensor, tf.where(tf.equal(label_tensor, 1))) mis_pred = tf.cast(tf.shape(label_cls_ret)[0], tf.int64) - tf.count_nonzero(pix_cls_ret) return tf.divide(mis_pred, tf.cast(tf.shape(label_cls_ret)[0], tf.int64))
Example #22
Source File: evaluate_model_utils.py From lanenet-lane-detection with Apache License 2.0 | 6 votes |
def calculate_model_precision(input_tensor, label_tensor): """ calculate accuracy acc = correct_nums / ground_truth_nums :param input_tensor: binary segmentation logits :param label_tensor: binary segmentation label :return: """ logits = tf.nn.softmax(logits=input_tensor) final_output = tf.expand_dims(tf.argmax(logits, axis=-1), axis=-1) idx = tf.where(tf.equal(final_output, 1)) pix_cls_ret = tf.gather_nd(label_tensor, idx) accuracy = tf.count_nonzero(pix_cls_ret) accuracy = tf.divide( accuracy, tf.cast(tf.shape(tf.gather_nd(label_tensor, tf.where(tf.equal(label_tensor, 1))))[0], tf.int64)) return accuracy
Example #23
Source File: tensor.py From dgl with Apache License 2.0 | 5 votes |
def take(data, indices, dim): return tf.gather_nd(data, indices, dim)
Example #24
Source File: dilated_pooling.py From onnx-tensorflow with Apache License 2.0 | 5 votes |
def _calc_input_ind(self, output_ind, kernel, dilation, stride): """ This function maps index from the output of _remove_dilations to index from the original input along single axis. It calculates the index inside the input data from the index of the output. It is used to generate the correct indexes of the values to be extracted by gather_nd. Args: output_ind: vector with indices from the output to be mapped kernel: kernel size along the axis dilation: dilations along the axis stride: strides along the axis Return: input_ind: calculated indices The formula is: input_ind = (output_ind // kernel) * stride + (output_ind % kernel) * dilation Example: If we have following 2D input to _remove_dilations: [[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [ 12, 13, 14, 15]] and Kernel = [2, 2], Dilations: [2, 2], Strides: [1, 1] the output of _remove_dilations will have shape [4, 4] and _calc_input_ind will be called twice for the two axis 0 (along height) and axis 1 (along width) with output_ind = [0, 1, 2, 3] which will result in: input_ind = [0, 2, 1, 3] """ return (output_ind // kernel) * (stride - kernel * dilation) + \ output_ind * dilation
Example #25
Source File: model.py From PanopticSegmentation with MIT License | 5 votes |
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox): """Loss for Mask R-CNN bounding box refinement. target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))] target_class_ids: [batch, num_rois]. Integer class IDs. pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))] """ # Reshape to merge batch and roi dimensions for simplicity. target_class_ids = K.reshape(target_class_ids, (-1,)) target_bbox = K.reshape(target_bbox, (-1, 4)) pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4)) # Only positive ROIs contribute to the loss. And only # the right class_id of each ROI. Get their indices. positive_roi_ix = tf.where(target_class_ids > 0)[:, 0] positive_roi_class_ids = tf.cast( tf.gather(target_class_ids, positive_roi_ix), tf.int64) indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1) # Gather the deltas (predicted and true) that contribute to loss target_bbox = tf.gather(target_bbox, positive_roi_ix) pred_bbox = tf.gather_nd(pred_bbox, indices) # Smooth-L1 Loss loss = K.switch(tf.size(target_bbox) > 0, smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox), tf.constant(0.0)) loss = K.mean(loss) return loss
Example #26
Source File: nuelus_sampling_utils.py From BERT with Apache License 2.0 | 5 votes |
def reorder(ref, ref_indices): def prepare_fd(fd_indices, sd_dims): fd_indices = tf.expand_dims(fd_indices, 1) fd_indices = tf.tile(fd_indices, [1, sd_dims]) return tf.cast(fd_indices, tf.int32) fd_indices_range = tf.range(0, limit=tf.shape(ref)[0]) sd_dims = tf.shape(ref_indices)[1] pp = prepare_fd(fd_indices_range, sd_dims) indices = tf.stack((prepare_fd(fd_indices_range, sd_dims), ref_indices), axis=2) updates_ref = tf.gather_nd(ref, indices) return updates_ref
Example #27
Source File: model.py From PanopticSegmentation with MIT License | 5 votes |
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox): """Return the RPN bounding box loss graph. config: the model config object. target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))]. Uses 0 padding to fill in unsed bbox deltas. rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive, -1=negative, 0=neutral anchor. rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))] """ # Positive anchors contribute to the loss, but negative and # neutral anchors (match value of 0 or -1) don't. rpn_match = K.squeeze(rpn_match, -1) indices = tf.where(K.equal(rpn_match, 1)) # Pick bbox deltas that contribute to the loss rpn_bbox = tf.gather_nd(rpn_bbox, indices) # Trim target bounding box deltas to the same length as rpn_bbox. batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1) target_bbox = batch_pack_graph(target_bbox, batch_counts, config.IMAGES_PER_GPU) loss = smooth_l1_loss(target_bbox, rpn_bbox) loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0)) return loss
Example #28
Source File: token_generator_hmm.py From BERT with Apache License 2.0 | 5 votes |
def static_span_mask(batch_size, seq_len, hmm_tran_prob): state_seq = [] tran_size = bert_utils.get_shape_list(hmm_tran_prob, expected_rank=[2]) valid_init_state_mask = tf.expand_dims(tf.cast(tf.not_equal(hmm_tran_prob, 0)[:,0], tf.float32), axis=0) init_state_prob = tf.random_uniform([batch_size, tran_size[0]], minval=0.0, maxval=10.0, dtype=tf.float32) init_state_prob *= valid_init_state_mask cur_state = tf.multinomial(tf.log(init_state_prob)+1e-10, num_samples=1, output_dtype=tf.int32) # batch x 1 # cur_state = tf.cast(start_index*tf.ones((batch_size, 1)), dtype=tf.int32) def hmm_recurrence(i, cur_state): current_prob = tf.gather_nd(hmm_tran_prob, cur_state) next_state = tf.multinomial(tf.log(current_prob+1e-10), num_samples=1, output_dtype=tf.int32) return i+1, next_state for i in range(seq_len): _, state = hmm_recurrence(i, cur_state) state_seq.append(tf.squeeze(state)) state = tf.stack(state_seq, axis=1) span_mask = tf.cast(tf.not_equal(state,0), tf.int32) return state, span_mask
Example #29
Source File: model_utils.py From Youtube-8M-WILLOW with Apache License 2.0 | 5 votes |
def SampleRandomSequence(model_input, num_frames, num_samples): """Samples a random sequence of frames of size num_samples. Args: model_input: A tensor of size batch_size x max_frames x feature_size num_frames: A tensor of size batch_size x 1 num_samples: A scalar Returns: `model_input`: A tensor of size batch_size x num_samples x feature_size """ batch_size = tf.shape(model_input)[0] frame_index_offset = tf.tile( tf.expand_dims(tf.range(num_samples), 0), [batch_size, 1]) max_start_frame_index = tf.maximum(num_frames - num_samples, 0) start_frame_index = tf.cast( tf.multiply( tf.random_uniform([batch_size, 1]), tf.cast(max_start_frame_index + 1, tf.float32)), tf.int32) frame_index = tf.minimum(start_frame_index + frame_index_offset, tf.cast(num_frames - 1, tf.int32)) batch_index = tf.tile( tf.expand_dims(tf.range(batch_size), 1), [1, num_samples]) index = tf.stack([batch_index, frame_index], 2) return tf.gather_nd(model_input, index)
Example #30
Source File: model.py From deeping-flow with MIT License | 5 votes |
def _gather_index(props, tgt, prev): tgt = tf.reshape(tgt, [-1, 1]) index = tf.concat((prev, tgt), -1) return tf.gather_nd(props, index)