Python tensorflow.boolean_mask() Examples
The following are 30
code examples of tensorflow.boolean_mask().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: test_tiny_yolo.py From object-detection with MIT License | 21 votes |
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = .6): # Compute box scores box_scores = box_confidence * box_class_probs # Find the box_classes thanks to the max box_scores, keep track of the corresponding score box_classes = K.argmax(box_scores, axis=-1) box_class_scores = K.max(box_scores, axis=-1, keepdims=False) # Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the # same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold) filtering_mask = box_class_scores >= threshold # Apply the mask to scores, boxes and classes scores = tf.boolean_mask(box_class_scores, filtering_mask) boxes = tf.boolean_mask(boxes, filtering_mask) classes = tf.boolean_mask(box_classes, filtering_mask) return scores, boxes, classes
Example #2
Source File: models.py From tf2-yolo3 with Apache License 2.0 | 8 votes |
def yolo_nms(outputs, anchors, masks, num_classes, iou_threshold=0.6, score_threshold=0.15): boxes, confs, classes = [], [], [] for o in outputs: boxes.append(tf.reshape(o[0], (tf.shape(o[0])[0], -1, tf.shape(o[0])[-1]))) confs.append(tf.reshape(o[1], (tf.shape(o[0])[0], -1, tf.shape(o[1])[-1]))) classes.append(tf.reshape(o[2], (tf.shape(o[0])[0], -1, tf.shape(o[2])[-1]))) boxes = tf.concat(boxes, axis=1) confs = tf.concat(confs, axis=1) class_probs = tf.concat(classes, axis=1) box_scores = confs * class_probs mask = box_scores >= score_threshold mask = tf.reduce_any(mask, axis=-1) class_boxes = tf.boolean_mask(boxes, mask) class_boxes = tf.reshape(class_boxes, (tf.shape(boxes)[0], -1, 4)) class_box_scores = tf.boolean_mask(box_scores, mask) class_box_scores = tf.reshape(class_box_scores, (tf.shape(boxes)[0], -1, num_classes)) class_boxes, class_box_scores = tf.py_function(func=batched_nms, inp=[class_boxes, class_box_scores, num_classes, iou_threshold], Tout=[tf.float32, tf.float32]) classes = tf.argmax(class_box_scores, axis=-1) return class_boxes, class_box_scores, classes
Example #3
Source File: model.py From DOTA_models with Apache License 2.0 | 6 votes |
def char_predictions(self, chars_logit): """Returns confidence scores (softmax values) for predicted characters. Args: chars_logit: chars logits, a tensor with shape [batch_size x seq_length x num_char_classes] Returns: A tuple (ids, log_prob, scores), where: ids - predicted characters, a int32 tensor with shape [batch_size x seq_length]; log_prob - a log probability of all characters, a float tensor with shape [batch_size, seq_length, num_char_classes]; scores - corresponding confidence scores for characters, a float tensor with shape [batch_size x seq_length]. """ log_prob = utils.logits_to_log_prob(chars_logit) ids = tf.to_int32(tf.argmax(log_prob, dimension=2), name='predicted_chars') mask = tf.cast( slim.one_hot_encoding(ids, self._params.num_char_classes), tf.bool) all_scores = tf.nn.softmax(chars_logit) selected_scores = tf.boolean_mask(all_scores, mask, name='char_scores') scores = tf.reshape(selected_scores, shape=(-1, self._params.seq_length)) return ids, log_prob, scores
Example #4
Source File: ner_model.py From robotreviewer with GNU General Public License v3.0 | 6 votes |
def add_loss_op(self): """Defines the loss""" if self.config.use_crf: log_likelihood, trans_params = tf.contrib.crf.crf_log_likelihood( self.logits, self.labels, self.sequence_lengths) self.trans_params = trans_params # need to evaluate it for decoding self.loss = tf.reduce_mean(-log_likelihood) else: losses = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=self.logits, labels=self.labels) mask = tf.sequence_mask(self.sequence_lengths) losses = tf.boolean_mask(losses, mask) self.loss = tf.reduce_mean(losses) # for tensorboard tf.summary.scalar("loss", self.loss)
Example #5
Source File: loss.py From mobile-segmentation with Apache License 2.0 | 6 votes |
def flatten_binary_scores(scores, labels, ignore=None): """ Flattens predictions in the batch (binary case) Remove labels equal to 'ignore' """ scores = tf.reshape(scores, (-1,)) labels = tf.reshape(labels, (-1,)) if ignore is None: return scores, labels valid = tf.not_equal(labels, ignore) vscores = tf.boolean_mask(scores, valid, name='valid_scores') vlabels = tf.boolean_mask(labels, valid, name='valid_labels') return vscores, vlabels # --------------------------- MULTICLASS LOSSES ---------------------------
Example #6
Source File: loss.py From mobile-segmentation with Apache License 2.0 | 6 votes |
def flatten_probas(probas, labels, ignore=None, order='BHWC'): """ Flattens predictions in the batch """ if len(probas.shape) == 3: probas, order = tf.expand_dims(probas, 3), 'BHWC' if order == 'BCHW': probas = tf.transpose(probas, (0, 2, 3, 1), name="BCHW_to_BHWC") order = 'BHWC' if order != 'BHWC': raise NotImplementedError('Order {} unknown'.format(order)) C = probas.shape[3] probas = tf.reshape(probas, (-1, C)) labels = tf.reshape(labels, (-1,)) if ignore is None: return probas, labels valid = tf.not_equal(labels, ignore) vprobas = tf.boolean_mask(probas, valid, name='valid_probas') vlabels = tf.boolean_mask(labels, valid, name='valid_labels') return vprobas, vlabels
Example #7
Source File: faster_rcnn_meta_arch.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def _get_refined_encodings_for_postitive_class( self, refined_box_encodings, flat_cls_targets_with_background, batch_size): # We only predict refined location encodings for the non background # classes, but we now pad it to make it compatible with the class # predictions refined_box_encodings_with_background = tf.pad(refined_box_encodings, [[0, 0], [1, 0], [0, 0]]) refined_box_encodings_masked_by_class_targets = ( box_list_ops.boolean_mask( box_list.BoxList( tf.reshape(refined_box_encodings_with_background, [-1, self._box_coder.code_size])), tf.reshape(tf.greater(flat_cls_targets_with_background, 0), [-1]), use_static_shapes=self._use_static_shapes, indicator_sum=batch_size * self.max_num_proposals if self._use_static_shapes else None).get()) return tf.reshape( refined_box_encodings_masked_by_class_targets, [ batch_size, self.max_num_proposals, self._box_coder.code_size ])
Example #8
Source File: anchor.py From keras-ctpn with Apache License 2.0 | 6 votes |
def filter_out_of_bound_boxes(boxes, feature_shape, stride): """ 过滤图像边框外的anchor :param boxes: [n,y1,x1,y2,x2] :param feature_shape: 特征图的长宽 [h,w] :param stride: 网络步长 :return: """ # 图像原始长宽为特征图长宽*步长 h, w = feature_shape[0], feature_shape[1] h = tf.cast(h * stride, tf.float32) w = tf.cast(w * stride, tf.float32) valid_boxes_tag = tf.logical_and(tf.logical_and(tf.logical_and(boxes[:, 0] >= 0, boxes[:, 1] >= 0), boxes[:, 2] <= h), boxes[:, 3] <= w) boxes = tf.boolean_mask(boxes, valid_boxes_tag) valid_boxes_indices = tf.where(valid_boxes_tag)[:, 0] return boxes, valid_boxes_indices
Example #9
Source File: augmentation.py From ffn with Apache License 2.0 | 6 votes |
def reflection(data, decision): """Conditionally reflects the data in XYZ. Args: data: input tensor, shape: [..], z, y, x, c decision: boolean tensor, shape 3, indicating on which spatial dimensions to apply the reflection (x, y, z) Returns: TF op to conditionally apply reflection. """ with tf.name_scope('augment_reflection'): rank = data.get_shape().ndims spatial_dims = tf.constant([rank - 2, rank - 3, rank - 4]) selected_dims = tf.boolean_mask(spatial_dims, decision) return tf.reverse(data, selected_dims)
Example #10
Source File: ssd_augmenter.py From lambda-deep-learning-demo with Apache License 2.0 | 6 votes |
def bboxes_filter_overlap(labels, bboxes, threshold=0.5, assign_negative=False, scope=None): """Filter out bounding boxes based on (relative )overlap with reference box [0, 0, 1, 1]. Remove completely bounding boxes, or assign negative labels to the one outside (useful for latter processing...). Return: labels, bboxes: Filtered (or newly assigned) elements. """ with tf.name_scope(scope, 'bboxes_filter', [labels, bboxes]): scores = bboxes_intersection(tf.constant([0, 0, 1, 1], bboxes.dtype), bboxes) mask = scores > threshold mask.set_shape([None]) if assign_negative: labels = tf.where(mask, labels, -labels) # bboxes = tf.where(mask, bboxes, bboxes) else: labels = tf.boolean_mask(labels, mask) bboxes = tf.boolean_mask(bboxes, mask) return labels, bboxes
Example #11
Source File: encoder.py From atec-nlp with MIT License | 6 votes |
def _last_relevant(outputs, sequence_length): """Deprecated""" batch_size = tf.shape(outputs)[0] max_length = outputs.get_shape()[1] output_size = outputs.get_shape()[2] index = tf.range(0, batch_size) * max_length + (sequence_length - 1) flat = tf.reshape(outputs, [-1, output_size]) last_timesteps = tf.gather(flat, index) # very slow # mask = tf.sign(index) # last_timesteps = tf.boolean_mask(flat, mask) # # Creating a vector of 0s and 1s that will specify what timesteps to choose. # partitions = tf.reduce_sum(tf.one_hot(index, tf.shape(flat)[0], dtype='int32'), 0) # # Selecting the elements we want to choose. # _, last_timesteps = tf.dynamic_partition(flat, partitions, 2) # (batch_size, n_dim) # https://stackoverflow.com/questions/35892412/tensorflow-dense-gradient-explanation return last_timesteps
Example #12
Source File: compress.py From onnx-tensorflow with Apache License 2.0 | 6 votes |
def _common(cls, node, **kwargs): attrs = copy.deepcopy(node.attrs) tensor_dict = kwargs["tensor_dict"] x = tensor_dict[node.inputs[0]] condition = tensor_dict[node.inputs[1]] x = tf.reshape(x, [-1]) if node.attrs.get("axis") is None else x if condition.shape.is_fully_defined(): condition_shape = condition.shape[0] indices = tf.constant(list(range(condition_shape)), dtype=tf.int64) else: condition_shape = tf.shape(condition, out_type=tf.int64)[0] indices = tf.range(condition_shape, dtype=tf.int64) not_zero = tf.not_equal(condition, tf.zeros_like(condition)) attrs['indices'] = tf.boolean_mask(indices, not_zero) return [ cls.make_tensor_from_onnx_node(node, inputs=[x], attrs=attrs, **kwargs) ]
Example #13
Source File: bboxes.py From seglink with GNU General Public License v3.0 | 6 votes |
def bboxes_filter_overlap(labels, bboxes,xs, ys, threshold, scope=None, assign_negative = False): """Filter out bounding boxes based on (relative )overlap with reference box [0, 0, 1, 1]. Remove completely bounding boxes, or assign negative labels to the one outside (useful for latter processing...). Return: labels, bboxes: Filtered (or newly assigned) elements. """ with tf.name_scope(scope, 'bboxes_filter', [labels, bboxes]): scores = bboxes_intersection(tf.constant([0, 0, 1, 1], bboxes.dtype),bboxes) mask = scores > threshold if assign_negative: labels = tf.where(mask, labels, -labels) else: labels = tf.boolean_mask(labels, mask) bboxes = tf.boolean_mask(bboxes, mask) scores = bboxes_intersection(tf.constant([0, 0, 1, 1], bboxes.dtype),bboxes) xs = tf.boolean_mask(xs, mask); ys = tf.boolean_mask(ys, mask); return labels, bboxes, xs, ys
Example #14
Source File: ops.py From spektral with MIT License | 6 votes |
def repeat(x, repeats): """ Repeats elements of a Tensor (equivalent to np.repeat, but only for 1D tensors). :param x: rank 1 Tensor; :param repeats: rank 1 Tensor with same shape as x, the number of repetitions for each element; :return: rank 1 Tensor, of shape `(sum(repeats), )`. """ x = tf.expand_dims(x, 1) max_repeats = tf.reduce_max(repeats) tile_repeats = [1, max_repeats] arr_tiled = tf.tile(x, tile_repeats) mask = tf.less(tf.range(max_repeats), tf.expand_dims(repeats, 1)) result = tf.reshape(tf.boolean_mask(arr_tiled, mask), [-1]) return result
Example #15
Source File: corruption.py From ConMask with MIT License | 6 votes |
def corrupt_single_relationship(triple: tf.Tensor, all_triples: tf.Tensor, max_range: int, name=None): """ Corrupt the relationship by __sampling from [0, max_range] :param triple: :param all_triples: :param max_range: :param name: :return: corrupted 1-d [h,r,t] triple """ with tf.name_scope(name, 'corrupt_single_relation', [triple, all_triples]): h, r, t = tf.unstack(triple, name='unstack_triple') head_mask = tf.equal(all_triples[:, 0], h, name='head_mask') head_matched_triples = tf.boolean_mask(all_triples[:, 1:], head_mask, name='head_matched_triples') tail_mask = tf.equal(head_matched_triples[:, 1], t, name='tail_mask') true_rels = tf.boolean_mask(head_matched_triples[:, 0], tail_mask) corrupted_rel = tf.reshape(single_negative_sampling(true_rels, max_range), ()) return tf.stack([h, corrupted_rel, t], name='rel_corrupted_triple')
Example #16
Source File: detector.py From blitznet with MIT License | 5 votes |
def build_detector(self): img_size = self.config['image_size'] self.image_ph = tf.placeholder(shape=[None, None, 3], dtype=tf.float32, name='img_ph') self.seg_ph = tf.placeholder(shape=[None, None], dtype=tf.int32, name='seg_ph') img = tf.image.resize_bilinear(tf.expand_dims(self.image_ph, 0), (img_size, img_size)) self.net.create_trunk(img) if args.detect: self.net.create_multibox_head(self.loader.num_classes) confidence = tf.nn.softmax(tf.squeeze(self.net.outputs['confidence'])) location = tf.squeeze(self.net.outputs['location']) self.nms(location, confidence, self.bboxer.tiling) if args.segment: self.net.create_segmentation_head(self.loader.num_classes) self.segmentation = self.net.outputs['segmentation'] seg_shape = tf.shape(self.image_ph)[:2] self.segmentation = tf.image.resize_bilinear(self.segmentation, seg_shape) self.segmentation = tf.cast(tf.argmax(tf.squeeze(self.segmentation), axis=-1), tf.int32) self.segmentation = tf.reshape(self.segmentation, seg_shape) self.segmentation.set_shape([None, None]) if not self.no_gt: easy_mask = self.seg_ph <= self.loader.num_classes predictions = tf.boolean_mask(self.segmentation, easy_mask) labels = tf.boolean_mask(self.seg_ph, easy_mask) self.mean_iou, self.iou_update = mean_iou(predictions, labels, self.loader.num_classes) else: self.mean_iou = tf.constant(0) self.iou_update = tf.constant(0)
Example #17
Source File: auto_reset.py From ape-x with Apache License 2.0 | 5 votes |
def step(self, action, indices=None, name=None): rew, done = self.env.step(action=action, indices=indices, name=name) if indices is None: indices = np.arange(self.batch_size, dtype=np.int32) done_idxs = tf.boolean_mask(indices, done) with tf.control_dependencies([self.reset(done_idxs, max_frames=self.max_frames)]): return tf.identity(rew), tf.identity(done)
Example #18
Source File: model.py From EasyPR-python with Apache License 2.0 | 5 votes |
def trim_zeros_graph(boxes): """Often boxes are represented with matricies of shape [N, 4] and are padded with zeros. This removes zero boxes. boxes: [N, 4] matrix of boxes. TODO: use this function to reduce code duplication """ area = tf.boolean_mask(boxes, tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool))
Example #19
Source File: utils_tf.py From blitznet with MIT License | 5 votes |
def filter_small_gt(gt_bboxes, gt_cats, min_size): mask = tf.logical_and(gt_bboxes[:, 2] >= min_size, gt_bboxes[:, 3] >= min_size) return tf.boolean_mask(gt_bboxes, mask), tf.boolean_mask(gt_cats, mask)
Example #20
Source File: loss.py From hart with GNU General Public License v3.0 | 5 votes |
def _mask(expr, mask): assert mask.dtype == tf.bool, '`mask`.dtype has to be tf.bool' mask_rank = tf.rank(mask) sample_shape = tf.shape(expr)[mask_rank:] flat_shape = tf.concat(([-1], sample_shape), 0) flat_expr = tf.reshape(expr, flat_shape) flat_mask = tf.reshape(mask, (-1,)) return tf.boolean_mask(flat_expr, flat_mask)
Example #21
Source File: model_utils.py From nucleus7 with Mozilla Public License 2.0 | 5 votes |
def select_inputs_by_sample_mask( sample_mask: tf.Tensor, keys_to_exclude_from_sample_mask: Optional[List[str]] = None, **inputs ) -> Dict[str, tf.Tensor]: """ Select inputs by masking out samples with sample_mask == 0 Parameters ---------- sample_mask tensor of shape [batch_size] with 1 indicating that sample should be leaved as is and 0 - remove sample keys_to_exclude_from_sample_mask list of keys that will not be masked using sample_mask **inputs inputs to mask Returns ------- masked_inputs masked inputs sample-wise """ inputs_flatten = nest_utils.flatten_nested_struct(inputs) inputs_masked_flatten = {} keys_to_exclude = keys_to_exclude_from_sample_mask or [] for each_key, each_value in inputs_flatten.items(): if each_key in keys_to_exclude: inputs_masked_flatten[each_key] = each_value else: inputs_masked_flatten[each_key] = tf.boolean_mask( each_value, sample_mask) inputs_masked = nest_utils.unflatten_dict_to_nested( inputs_masked_flatten) return inputs_masked
Example #22
Source File: loss_utils.py From BERT with Apache License 2.0 | 5 votes |
def spread_loss(config, labels, activations, margin, **kargs): activations_shape = activations.get_shape().as_list() mask_t = tf.equal(labels, 1) mask_i = tf.equal(labels, 0) activations_t = tf.reshape( tf.boolean_mask(activations, mask_t), [activations_shape[0], 1] ) activations_i = tf.reshape( tf.boolean_mask(activations, mask_i), [activations_shape[0], activations_shape[1] - 1] ) gap_mit = tf.reduce_sum(tf.square(tf.nn.relu(margin - (activations_t - activations_i)))) return gap_mit
Example #23
Source File: keras_yolo.py From object-detection with MIT License | 5 votes |
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=.6): """Filter YOLO boxes based on object and class confidence.""" box_scores = box_confidence * box_class_probs box_classes = K.argmax(box_scores, axis=-1) box_class_scores = K.max(box_scores, axis=-1) prediction_mask = box_class_scores >= threshold # TODO: Expose tf.boolean_mask to Keras backend? boxes = tf.boolean_mask(boxes, prediction_mask) scores = tf.boolean_mask(box_class_scores, prediction_mask) classes = tf.boolean_mask(box_classes, prediction_mask) return boxes, scores, classes
Example #24
Source File: model.py From object-detection with MIT License | 5 votes |
def yolo_filter_boxes(boxes, box_scores, box_class_probs, threshold = .6): # Find the box_classes thanks to the max box_scores, keep track of the corresponding score box_classes = K.argmax(box_scores, axis=-1) box_class_scores = K.max(box_scores, axis=-1, keepdims=False) # Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the # same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold) filtering_mask = box_class_scores >= threshold # (3549, 3) # Apply the mask to scores, boxes and classes scores = tf.boolean_mask(box_class_scores, filtering_mask) boxes = tf.boolean_mask(boxes, filtering_mask) classes = tf.boolean_mask(box_classes, filtering_mask) return scores, boxes, classes
Example #25
Source File: loss.py From mobile-segmentation with Apache License 2.0 | 5 votes |
def lovasz_softmax_flat(probas, labels, classes='present'): """ Multi-class Lovasz-Softmax loss probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1) labels: [P] Tensor, ground truth labels (between 0 and C - 1) classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average. """ C = probas.shape[1] losses = [] present = [] class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes for c in class_to_sum: # foreground for class c fg = tf.cast(tf.equal(labels, c), probas.dtype) if classes == 'present': present.append(tf.reduce_sum(fg) > 0) if C == 1: if len(classes) > 1: raise ValueError('Sigmoid output possible only with 1 class') class_pred = probas[:, 0] else: class_pred = probas[:, c] errors = tf.abs(fg - class_pred) errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape( errors)[0], name="descending_sort_{}".format(c)) fg_sorted = tf.gather(fg, perm) grad = lovasz_grad(fg_sorted) losses.append( tf.tensordot(errors_sorted, tf.stop_gradient( grad), 1, name="loss_class_{}".format(c)) ) if len(class_to_sum) == 1: # short-circuit mean when only one class return losses[0] losses_tensor = tf.stack(losses) if classes == 'present': present = tf.stack(present) losses_tensor = tf.boolean_mask(losses_tensor, present) loss = tf.reduce_mean(losses_tensor) return loss
Example #26
Source File: box_list_ops.py From ros_people_object_detection_tensorflow with Apache License 2.0 | 5 votes |
def boolean_mask(boxlist, indicator, fields=None, scope=None): """Select boxes from BoxList according to indicator and return new BoxList. `boolean_mask` returns the subset of boxes that are marked as "True" by the indicator tensor. By default, `boolean_mask` returns boxes corresponding to the input index list, as well as all additional fields stored in the boxlist (indexing into the first dimension). However one can optionally only draw from a subset of fields. Args: boxlist: BoxList holding N boxes indicator: a rank-1 boolean tensor fields: (optional) list of fields to also gather from. If None (default), all fields are gathered from. Pass an empty fields list to only gather the box coordinates. scope: name scope. Returns: subboxlist: a BoxList corresponding to the subset of the input BoxList specified by indicator Raises: ValueError: if `indicator` is not a rank-1 boolean tensor. """ with tf.name_scope(scope, 'BooleanMask'): if indicator.shape.ndims != 1: raise ValueError('indicator should have rank 1') if indicator.dtype != tf.bool: raise ValueError('indicator should be a boolean tensor') subboxlist = box_list.BoxList(tf.boolean_mask(boxlist.get(), indicator)) if fields is None: fields = boxlist.get_extra_fields() for field in fields: if not boxlist.has_field(field): raise ValueError('boxlist must contain all specified fields') subfieldlist = tf.boolean_mask(boxlist.get_field(field), indicator) subboxlist.add_field(field, subfieldlist) return subboxlist
Example #27
Source File: faster_rcnn_meta_arch.py From ros_people_object_detection_tensorflow with Apache License 2.0 | 5 votes |
def _sample_box_classifier_minibatch(self, proposal_boxlist, groundtruth_boxlist, groundtruth_classes_with_background): """Samples a mini-batch of proposals to be sent to the box classifier. Helper function for self._postprocess_rpn. Args: proposal_boxlist: A BoxList containing K proposal boxes in absolute coordinates. groundtruth_boxlist: A Boxlist containing N groundtruth object boxes in absolute coordinates. groundtruth_classes_with_background: A tensor with shape `[N, self.num_classes + 1]` representing groundtruth classes. The classes are assumed to be k-hot encoded, and include background as the zero-th class. Returns: a BoxList contained sampled proposals. """ (cls_targets, cls_weights, _, _, _) = self._detector_target_assigner.assign( proposal_boxlist, groundtruth_boxlist, groundtruth_classes_with_background) # Selects all boxes as candidates if none of them is selected according # to cls_weights. This could happen as boxes within certain IOU ranges # are ignored. If triggered, the selected boxes will still be ignored # during loss computation. cls_weights += tf.to_float(tf.equal(tf.reduce_sum(cls_weights), 0)) positive_indicator = tf.greater(tf.argmax(cls_targets, axis=1), 0) sampled_indices = self._second_stage_sampler.subsample( tf.cast(cls_weights, tf.bool), self._second_stage_batch_size, positive_indicator) return box_list_ops.boolean_mask(proposal_boxlist, sampled_indices)
Example #28
Source File: coref_model.py From gap with MIT License | 5 votes |
def flatten_emb_by_sentence(self, emb, text_len_mask): num_sentences = tf.shape(emb)[0] max_sentence_length = tf.shape(emb)[1] emb_rank = len(emb.get_shape()) if emb_rank == 2: flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length]) elif emb_rank == 3: flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length, util.shape(emb, 2)]) else: raise ValueError("Unsupported rank: {}".format(emb_rank)) return tf.boolean_mask(flattened_emb, tf.reshape(text_len_mask, [num_sentences * max_sentence_length]))
Example #29
Source File: faster_rcnn_meta_arch.py From DOTA_models with Apache License 2.0 | 5 votes |
def _sample_box_classifier_minibatch(self, proposal_boxlist, groundtruth_boxlist, groundtruth_classes_with_background): """Samples a mini-batch of proposals to be sent to the box classifier. Helper function for self._postprocess_rpn. Args: proposal_boxlist: A BoxList containing K proposal boxes in absolute coordinates. groundtruth_boxlist: A Boxlist containing N groundtruth object boxes in absolute coordinates. groundtruth_classes_with_background: A tensor with shape `[N, self.num_classes + 1]` representing groundtruth classes. The classes are assumed to be k-hot encoded, and include background as the zero-th class. Returns: a BoxList contained sampled proposals. """ (cls_targets, cls_weights, _, _, _) = self._detector_target_assigner.assign( proposal_boxlist, groundtruth_boxlist, groundtruth_classes_with_background) # Selects all boxes as candidates if none of them is selected according # to cls_weights. This could happen as boxes within certain IOU ranges # are ignored. If triggered, the selected boxes will still be ignored # during loss computation. cls_weights += tf.to_float(tf.equal(tf.reduce_sum(cls_weights), 0)) positive_indicator = tf.greater(tf.argmax(cls_targets, axis=1), 0) sampled_indices = self._second_stage_sampler.subsample( tf.cast(cls_weights, tf.bool), self._second_stage_batch_size, positive_indicator) return box_list_ops.boolean_mask(proposal_boxlist, sampled_indices)
Example #30
Source File: faster_rcnn_meta_arch.py From Person-Detection-and-Tracking with MIT License | 5 votes |
def _sample_box_classifier_minibatch(self, proposal_boxlist, groundtruth_boxlist, groundtruth_classes_with_background): """Samples a mini-batch of proposals to be sent to the box classifier. Helper function for self._postprocess_rpn. Args: proposal_boxlist: A BoxList containing K proposal boxes in absolute coordinates. groundtruth_boxlist: A Boxlist containing N groundtruth object boxes in absolute coordinates. groundtruth_classes_with_background: A tensor with shape `[N, self.num_classes + 1]` representing groundtruth classes. The classes are assumed to be k-hot encoded, and include background as the zero-th class. Returns: a BoxList contained sampled proposals. """ (cls_targets, cls_weights, _, _, _) = self._detector_target_assigner.assign( proposal_boxlist, groundtruth_boxlist, groundtruth_classes_with_background) # Selects all boxes as candidates if none of them is selected according # to cls_weights. This could happen as boxes within certain IOU ranges # are ignored. If triggered, the selected boxes will still be ignored # during loss computation. cls_weights += tf.to_float(tf.equal(tf.reduce_sum(cls_weights), 0)) positive_indicator = tf.greater(tf.argmax(cls_targets, axis=1), 0) sampled_indices = self._second_stage_sampler.subsample( tf.cast(cls_weights, tf.bool), self._second_stage_batch_size, positive_indicator) return box_list_ops.boolean_mask(proposal_boxlist, sampled_indices)