Python tensorflow.bool() Examples
The following are 30
code examples of tensorflow.bool().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: model.py From PathCon with MIT License | 7 votes |
def _get_neighbors_and_masks(self, relations, entity_pairs, train_edges): edges_list = [relations] masks = [] train_edges = tf.expand_dims(train_edges, -1) # [batch_size, 1] for i in range(self.context_hops): if i == 0: neighbor_entities = entity_pairs else: neighbor_entities = tf.reshape(tf.gather(self.edge2entities, edges_list[-1]), [self.batch_size, -1]) neighbor_edges = tf.reshape(tf.gather(self.entity2edges, neighbor_entities), [self.batch_size, -1]) edges_list.append(neighbor_edges) mask = neighbor_edges - train_edges # [batch_size, -1] mask = tf.cast(tf.cast(mask, tf.bool), tf.float64) # [batch_size, -1] masks.append(mask) return edges_list, masks
Example #2
Source File: ssd_meta_arch.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def _minibatch_subsample_fn(self, inputs): """Randomly samples anchors for one image. Args: inputs: a list of 2 inputs. First one is a tensor of shape [num_anchors, num_classes] indicating targets assigned to each anchor. Second one is a tensor of shape [num_anchors] indicating the class weight of each anchor. Returns: batch_sampled_indicator: bool tensor of shape [num_anchors] indicating whether the anchor should be selected for loss computation. """ cls_targets, cls_weights = inputs if self._add_background_class: # Set background_class bits to 0 so that the positives_indicator # computation would not consider background class. background_class = tf.zeros_like(tf.slice(cls_targets, [0, 0], [-1, 1])) regular_class = tf.slice(cls_targets, [0, 1], [-1, -1]) cls_targets = tf.concat([background_class, regular_class], 1) positives_indicator = tf.reduce_sum(cls_targets, axis=1) return self._random_example_sampler.subsample( tf.cast(cls_weights, tf.bool), batch_size=None, labels=tf.cast(positives_indicator, tf.bool))
Example #3
Source File: model.py From DOTA_models with Apache License 2.0 | 6 votes |
def char_predictions(self, chars_logit): """Returns confidence scores (softmax values) for predicted characters. Args: chars_logit: chars logits, a tensor with shape [batch_size x seq_length x num_char_classes] Returns: A tuple (ids, log_prob, scores), where: ids - predicted characters, a int32 tensor with shape [batch_size x seq_length]; log_prob - a log probability of all characters, a float tensor with shape [batch_size, seq_length, num_char_classes]; scores - corresponding confidence scores for characters, a float tensor with shape [batch_size x seq_length]. """ log_prob = utils.logits_to_log_prob(chars_logit) ids = tf.to_int32(tf.argmax(log_prob, dimension=2), name='predicted_chars') mask = tf.cast( slim.one_hot_encoding(ids, self._params.num_char_classes), tf.bool) all_scores = tf.nn.softmax(chars_logit) selected_scores = tf.boolean_mask(all_scores, mask, name='char_scores') scores = tf.reshape(selected_scores, shape=(-1, self._params.seq_length)) return ids, log_prob, scores
Example #4
Source File: tf_example_decoder.py From DOTA_models with Apache License 2.0 | 6 votes |
def _reshape_instance_masks(self, keys_to_tensors): """Reshape instance segmentation masks. The instance segmentation masks are reshaped to [num_instances, height, width] and cast to boolean type to save memory. Args: keys_to_tensors: a dictionary from keys to tensors. Returns: A 3-D boolean tensor of shape [num_instances, height, width]. """ masks = keys_to_tensors['image/segmentation/object'] if isinstance(masks, tf.SparseTensor): masks = tf.sparse_tensor_to_dense(masks) height = keys_to_tensors['image/height'] width = keys_to_tensors['image/width'] to_shape = tf.cast(tf.stack([-1, height, width]), tf.int32) return tf.cast(tf.reshape(masks, to_shape), tf.bool)
Example #5
Source File: composite_optimizer.py From DOTA_models with Apache License 2.0 | 6 votes |
def __init__(self, optimizer1, optimizer2, switch, use_locking=False, name='Composite'): """Construct a new Composite optimizer. Args: optimizer1: A tf.python.training.optimizer.Optimizer object. optimizer2: A tf.python.training.optimizer.Optimizer object. switch: A tf.bool Tensor, selecting whether to use the first or the second optimizer. use_locking: Bool. If True apply use locks to prevent concurrent updates to variables. name: Optional name prefix for the operations created when applying gradients. Defaults to "Composite". """ super(CompositeOptimizer, self).__init__(use_locking, name) self._optimizer1 = optimizer1 self._optimizer2 = optimizer2 self._switch = switch
Example #6
Source File: faster_rcnn_meta_arch.py From DOTA_models with Apache License 2.0 | 6 votes |
def _padded_batched_proposals_indicator(self, num_proposals, max_num_proposals): """Creates indicator matrix of non-pad elements of padded batch proposals. Args: num_proposals: Tensor of type tf.int32 with shape [batch_size]. max_num_proposals: Maximum number of proposals per image (integer). Returns: A Tensor of type tf.bool with shape [batch_size, max_num_proposals]. """ batch_size = tf.size(num_proposals) tiled_num_proposals = tf.tile( tf.expand_dims(num_proposals, 1), [1, max_num_proposals]) tiled_proposal_index = tf.tile( tf.expand_dims(tf.range(max_num_proposals), 0), [batch_size, 1]) return tf.greater(tiled_num_proposals, tiled_proposal_index)
Example #7
Source File: loop.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def __init__(self, logdir, step=None, log=None, report=None, reset=None): """Execute operations in a loop and coordinate logging and checkpoints. The step, log, report, and report arguments will get created if not provided. Reset is used to indicate switching to a new phase, so that the model can start a new computation in case its computation is split over multiple training steps. Args: logdir: Will contain checkpoints and summaries for each phase. step: Variable of the global step (optional). log: Tensor indicating to the model to compute summary tensors. report: Tensor indicating to the loop to report the current mean score. reset: Tensor indicating to the model to start a new computation. """ self._logdir = logdir self._step = ( tf.Variable(0, False, name='global_step') if step is None else step) self._log = tf.placeholder(tf.bool) if log is None else log self._report = tf.placeholder(tf.bool) if report is None else report self._reset = tf.placeholder(tf.bool) if reset is None else reset self._phases = []
Example #8
Source File: in_graph_batch_env.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def __init__(self, batch_env): """Batch of environments inside the TensorFlow graph. Args: batch_env: Batch environment. """ self._batch_env = batch_env observ_shape = self._parse_shape(self._batch_env.observation_space) observ_dtype = self._parse_dtype(self._batch_env.observation_space) action_shape = self._parse_shape(self._batch_env.action_space) action_dtype = self._parse_dtype(self._batch_env.action_space) with tf.variable_scope('env_temporary'): self._observ = tf.Variable( tf.zeros((len(self._batch_env),) + observ_shape, observ_dtype), name='observ', trainable=False) self._action = tf.Variable( tf.zeros((len(self._batch_env),) + action_shape, action_dtype), name='action', trainable=False) self._reward = tf.Variable( tf.zeros((len(self._batch_env),), tf.float32), name='reward', trainable=False) self._done = tf.Variable( tf.cast(tf.ones((len(self._batch_env),)), tf.bool), name='done', trainable=False)
Example #9
Source File: in_graph_batch_env.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def simulate(self, action): """Step the batch of environments. The results of the step can be accessed from the variables defined below. Args: action: Tensor holding the batch of actions to apply. Returns: Operation. """ with tf.name_scope('environment/simulate'): if action.dtype in (tf.float16, tf.float32, tf.float64): action = tf.check_numerics(action, 'action') observ_dtype = self._parse_dtype(self._batch_env.observation_space) observ, reward, done = tf.py_func( lambda a: self._batch_env.step(a)[:3], [action], [observ_dtype, tf.float32, tf.bool], name='step') observ = tf.check_numerics(observ, 'observ') reward = tf.check_numerics(reward, 'reward') return tf.group( self._observ.assign(observ), self._action.assign(action), self._reward.assign(reward), self._done.assign(done))
Example #10
Source File: in_graph_batch_env.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def reset(self, indices=None): """Reset the batch of environments. Args: indices: The batch indices of the environments to reset; defaults to all. Returns: Batch tensor of the new observations. """ if indices is None: indices = tf.range(len(self._batch_env)) observ_dtype = self._parse_dtype(self._batch_env.observation_space) observ = tf.py_func( self._batch_env.reset, [indices], observ_dtype, name='reset') observ = tf.check_numerics(observ, 'observ') reward = tf.zeros_like(indices, tf.float32) done = tf.zeros_like(indices, tf.bool) with tf.control_dependencies([ tf.scatter_update(self._observ, indices, observ), tf.scatter_update(self._reward, indices, reward), tf.scatter_update(self._done, indices, done)]): return tf.identity(observ)
Example #11
Source File: loop.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def __init__(self, logdir, step=None, log=None, report=None, reset=None): """Execute operations in a loop and coordinate logging and checkpoints. The step, log, report, and report arguments will get created if not provided. Reset is used to indicate switching to a new phase, so that the model can start a new computation in case its computation is split over multiple training steps. Args: logdir: Will contain checkpoints and summaries for each phase. step: Variable of the global step (optional). log: Tensor indicating to the model to compute summary tensors. report: Tensor indicating to the loop to report the current mean score. reset: Tensor indicating to the model to start a new computation. """ self._logdir = logdir self._step = ( tf.Variable(0, False, name='global_step') if step is None else step) self._log = tf.placeholder(tf.bool) if log is None else log self._report = tf.placeholder(tf.bool) if report is None else report self._reset = tf.placeholder(tf.bool) if reset is None else reset self._phases = []
Example #12
Source File: in_graph_batch_env.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def __init__(self, batch_env): """Batch of environments inside the TensorFlow graph. Args: batch_env: Batch environment. """ self._batch_env = batch_env observ_shape = self._parse_shape(self._batch_env.observation_space) observ_dtype = self._parse_dtype(self._batch_env.observation_space) action_shape = self._parse_shape(self._batch_env.action_space) action_dtype = self._parse_dtype(self._batch_env.action_space) with tf.variable_scope('env_temporary'): self._observ = tf.Variable( tf.zeros((len(self._batch_env),) + observ_shape, observ_dtype), name='observ', trainable=False) self._action = tf.Variable( tf.zeros((len(self._batch_env),) + action_shape, action_dtype), name='action', trainable=False) self._reward = tf.Variable( tf.zeros((len(self._batch_env),), tf.float32), name='reward', trainable=False) self._done = tf.Variable( tf.cast(tf.ones((len(self._batch_env),)), tf.bool), name='done', trainable=False)
Example #13
Source File: in_graph_batch_env.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def simulate(self, action): """Step the batch of environments. The results of the step can be accessed from the variables defined below. Args: action: Tensor holding the batch of actions to apply. Returns: Operation. """ with tf.name_scope('environment/simulate'): if action.dtype in (tf.float16, tf.float32, tf.float64): action = tf.check_numerics(action, 'action') observ_dtype = self._parse_dtype(self._batch_env.observation_space) observ, reward, done = tf.py_func( lambda a: self._batch_env.step(a)[:3], [action], [observ_dtype, tf.float32, tf.bool], name='step') observ = tf.check_numerics(observ, 'observ') reward = tf.check_numerics(reward, 'reward') return tf.group( self._observ.assign(observ), self._action.assign(action), self._reward.assign(reward), self._done.assign(done))
Example #14
Source File: py_func_batch_env.py From fine-lm with MIT License | 6 votes |
def simulate(self, action): """Step the batch of environments. The results of the step can be accessed from the variables defined below. Args: action: Tensor holding the batch of actions to apply. Returns: Operation. """ with tf.name_scope('environment/simulate'): if action.dtype in (tf.float16, tf.float32, tf.float64): action = tf.check_numerics(action, 'action') observ_dtype = utils.parse_dtype(self._batch_env.observation_space) observ, reward, done = tf.py_func( lambda a: self._batch_env.step(a)[:3], [action], [observ_dtype, tf.float32, tf.bool], name='step') observ = tf.check_numerics(observ, 'observ') reward = tf.check_numerics(reward, 'reward') reward.set_shape((len(self),)) done.set_shape((len(self),)) with tf.control_dependencies([self._observ.assign(observ)]): return tf.identity(reward), tf.identity(done)
Example #15
Source File: metrics.py From fine-lm with MIT License | 6 votes |
def set_precision(predictions, labels, weights_fn=common_layers.weights_nonzero): """Precision of set predictions. Args: predictions : A Tensor of scores of shape [batch, nlabels]. labels: A Tensor of int32s giving true set elements, of shape [batch, seq_length]. weights_fn: A function to weight the elements. Returns: hits: A Tensor of shape [batch, nlabels]. weights: A Tensor of shape [batch, nlabels]. """ with tf.variable_scope("set_precision", values=[predictions, labels]): labels = tf.squeeze(labels, [2, 3]) weights = weights_fn(labels) labels = tf.one_hot(labels, predictions.shape[-1]) labels = tf.reduce_max(labels, axis=1) labels = tf.cast(labels, tf.bool) return tf.to_float(tf.equal(labels, predictions)), weights
Example #16
Source File: problem.py From fine-lm with MIT License | 6 votes |
def pad_batch(features, batch_multiple): """Pad batch dim of features to nearest multiple of batch_multiple.""" feature = list(features.items())[0][1] batch_size = tf.shape(feature)[0] mod = batch_size % batch_multiple has_mod = tf.cast(tf.cast(mod, tf.bool), tf.int32) batch_padding = batch_multiple * has_mod - mod padded_features = {} for k, feature in features.items(): rank = len(feature.shape) paddings = [] for _ in range(rank): paddings.append([0, 0]) paddings[0][1] = batch_padding padded_feature = tf.pad(feature, paddings) padded_features[k] = padded_feature return padded_features
Example #17
Source File: tf_example_decoder.py From object_detector_app with MIT License | 6 votes |
def _reshape_instance_masks(self, keys_to_tensors): """Reshape instance segmentation masks. The instance segmentation masks are reshaped to [num_instances, height, width] and cast to boolean type to save memory. Args: keys_to_tensors: a dictionary from keys to tensors. Returns: A 3-D boolean tensor of shape [num_instances, height, width]. """ masks = keys_to_tensors['image/segmentation/object'] if isinstance(masks, tf.SparseTensor): masks = tf.sparse_tensor_to_dense(masks) height = keys_to_tensors['image/height'] width = keys_to_tensors['image/width'] to_shape = tf.cast(tf.stack([-1, height, width]), tf.int32) return tf.cast(tf.reshape(masks, to_shape), tf.bool)
Example #18
Source File: faster_rcnn_meta_arch.py From object_detector_app with MIT License | 6 votes |
def _padded_batched_proposals_indicator(self, num_proposals, max_num_proposals): """Creates indicator matrix of non-pad elements of padded batch proposals. Args: num_proposals: Tensor of type tf.int32 with shape [batch_size]. max_num_proposals: Maximum number of proposals per image (integer). Returns: A Tensor of type tf.bool with shape [batch_size, max_num_proposals]. """ batch_size = tf.size(num_proposals) tiled_num_proposals = tf.tile( tf.expand_dims(num_proposals, 1), [1, max_num_proposals]) tiled_proposal_index = tf.tile( tf.expand_dims(tf.range(max_num_proposals), 0), [batch_size, 1]) return tf.greater(tiled_num_proposals, tiled_proposal_index)
Example #19
Source File: box_list_ops_test.py From object_detector_app with MIT License | 6 votes |
def test_boolean_mask_with_field(self): corners = tf.constant( [4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]]) indicator = tf.constant([True, False, True, False, True], tf.bool) weights = tf.constant([[.1], [.3], [.5], [.7], [.9]], tf.float32) expected_subset = [4 * [0.0], 4 * [2.0], 4 * [4.0]] expected_weights = [[.1], [.5], [.9]] boxes = box_list.BoxList(corners) boxes.add_field('weights', weights) subset = box_list_ops.boolean_mask(boxes, indicator, ['weights']) with self.test_session() as sess: subset_output, weights_output = sess.run( [subset.get(), subset.get_field('weights')]) self.assertAllClose(subset_output, expected_subset) self.assertAllClose(weights_output, expected_weights)
Example #20
Source File: in_graph_env.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def __init__(self, env): """Put an OpenAI Gym environment into the TensorFlow graph. Args: env: OpenAI Gym environment. """ self._env = env observ_shape = self._parse_shape(self._env.observation_space) observ_dtype = self._parse_dtype(self._env.observation_space) action_shape = self._parse_shape(self._env.action_space) action_dtype = self._parse_dtype(self._env.action_space) with tf.name_scope('environment'): self._observ = tf.Variable( tf.zeros(observ_shape, observ_dtype), name='observ', trainable=False) self._action = tf.Variable( tf.zeros(action_shape, action_dtype), name='action', trainable=False) self._reward = tf.Variable( 0.0, dtype=tf.float32, name='reward', trainable=False) self._done = tf.Variable( True, dtype=tf.bool, name='done', trainable=False) self._step = tf.Variable( 0, dtype=tf.int32, name='step', trainable=False)
Example #21
Source File: test_model.py From models with MIT License | 6 votes |
def network_surgery(): tf.reset_default_graph() inputs = tf.placeholder(tf.float32, shape=(None, 131072, 4), name='inputs') targets = tf.placeholder(tf.float32, shape=(None, 1024, 4229), name='targets') targets_na = tf.placeholder(tf.bool, shape=(None, 1024), name="targets_na") preds_adhoc = tf.placeholder(tf.float32, shape=(None, 960, 4229), name="Placeholder_15") saver = tf.train.import_meta_graph("model_files/model.tf.meta", input_map={'Placeholder_15:0': preds_adhoc, 'Placeholder:0': targets_na, 'inputs:0': inputs, 'targets:0': targets }) ops = tf.get_default_graph().get_operations() out = tf.train.export_meta_graph(filename='model_files/model.tf-modified.meta', as_text=True) ops[:15]
Example #22
Source File: inputs_test.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def test_keypoints(self): input_tensor_dict = { fields.InputDataFields.groundtruth_keypoints: tf.placeholder(tf.float32, [None, 16, 4]), fields.InputDataFields.groundtruth_keypoint_visibilities: tf.placeholder(tf.bool, [None, 16]), } padded_tensor_dict = inputs.pad_input_data_to_static_shapes( tensor_dict=input_tensor_dict, max_num_boxes=3, num_classes=3, spatial_image_shape=[5, 6]) self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.groundtruth_keypoints] .shape.as_list(), [3, 16, 4]) self.assertAllEqual( padded_tensor_dict[ fields.InputDataFields.groundtruth_keypoint_visibilities] .shape.as_list(), [3, 16])
Example #23
Source File: focal_loss.py From tf2-yolo3 with Apache License 2.0 | 6 votes |
def _binary_focal_loss_from_probs(labels, p, gamma, pos_weight, label_smoothing): q = 1 - p # For numerical stability (so we don't inadvertently take the log of 0) p = tf.math.maximum(p, _EPSILON) q = tf.math.maximum(q, _EPSILON) # Loss for the positive examples pos_loss = -(q**gamma) * tf.math.log(p) if pos_weight is not None: pos_loss *= pos_weight # Loss for the negative examples neg_loss = -(p**gamma) * tf.math.log(q) # Combine loss terms if label_smoothing is None: labels = tf.dtypes.cast(labels, dtype=tf.bool) loss = tf.where(labels, pos_loss, neg_loss) else: labels = _process_labels(labels=labels, label_smoothing=label_smoothing, dtype=p.dtype) loss = labels * pos_loss + (1 - labels) * neg_loss return loss
Example #24
Source File: run_summarization.py From TransferRL with MIT License | 6 votes |
def scheduled_sampling(self, batch_size, sampling_probability, true, estimate): with variable_scope.variable_scope("ScheduledEmbedding"): # Return -1s where we do not sample, and sample_ids elsewhere select_sampler = bernoulli.Bernoulli(probs=sampling_probability, dtype=tf.bool) select_sample = select_sampler.sample(sample_shape=batch_size) sample_ids = array_ops.where( select_sample, tf.range(batch_size), gen_array_ops.fill([batch_size], -1)) where_sampling = math_ops.cast( array_ops.where(sample_ids > -1), tf.int32) where_not_sampling = math_ops.cast( array_ops.where(sample_ids <= -1), tf.int32) _estimate = array_ops.gather_nd(estimate, where_sampling) _true = array_ops.gather_nd(true, where_not_sampling) base_shape = array_ops.shape(true) result1 = array_ops.scatter_nd(indices=where_sampling, updates=_estimate, shape=base_shape) result2 = array_ops.scatter_nd(indices=where_not_sampling, updates=_true, shape=base_shape) result = result1 + result2 return result1 + result2
Example #25
Source File: mlp_policy.py From HardRLWithYoutube with MIT License | 5 votes |
def _init(self, ob_space, ac_space, hid_size, num_hid_layers, gaussian_fixed_var=True): assert isinstance(ob_space, gym.spaces.Box) self.pdtype = pdtype = make_pdtype(ac_space) sequence_length = None ob = U.get_placeholder(name="ob", dtype=tf.float32, shape=[sequence_length] + list(ob_space.shape)) with tf.variable_scope("obfilter"): self.ob_rms = RunningMeanStd(shape=ob_space.shape) obz = tf.clip_by_value((ob - self.ob_rms.mean) / self.ob_rms.std, -5.0, 5.0) last_out = obz for i in range(num_hid_layers): last_out = tf.nn.tanh(dense(last_out, hid_size, "vffc%i" % (i+1), weight_init=U.normc_initializer(1.0))) self.vpred = dense(last_out, 1, "vffinal", weight_init=U.normc_initializer(1.0))[:, 0] last_out = obz for i in range(num_hid_layers): last_out = tf.nn.tanh(dense(last_out, hid_size, "polfc%i" % (i+1), weight_init=U.normc_initializer(1.0))) if gaussian_fixed_var and isinstance(ac_space, gym.spaces.Box): mean = dense(last_out, pdtype.param_shape()[0]//2, "polfinal", U.normc_initializer(0.01)) logstd = tf.get_variable(name="logstd", shape=[1, pdtype.param_shape()[0]//2], initializer=tf.zeros_initializer()) pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1) else: pdparam = dense(last_out, pdtype.param_shape()[0], "polfinal", U.normc_initializer(0.01)) self.pd = pdtype.pdfromflat(pdparam) self.state_in = [] self.state_out = [] # change for BC stochastic = U.get_placeholder(name="stochastic", dtype=tf.bool, shape=()) ac = U.switch(stochastic, self.pd.sample(), self.pd.mode()) self.ac = ac self._act = U.function([stochastic, ob], [ac, self.vpred])
Example #26
Source File: faster_rcnn_meta_arch.py From object_detector_app with MIT License | 5 votes |
def _sample_box_classifier_minibatch(self, proposal_boxlist, groundtruth_boxlist, groundtruth_classes_with_background): """Samples a mini-batch of proposals to be sent to the box classifier. Helper function for self._postprocess_rpn. Args: proposal_boxlist: A BoxList containing K proposal boxes in absolute coordinates. groundtruth_boxlist: A Boxlist containing N groundtruth object boxes in absolute coordinates. groundtruth_classes_with_background: A tensor with shape `[N, self.num_classes + 1]` representing groundtruth classes. The classes are assumed to be k-hot encoded, and include background as the zero-th class. Returns: a BoxList contained sampled proposals. """ (cls_targets, cls_weights, _, _, _) = self._detector_target_assigner.assign( proposal_boxlist, groundtruth_boxlist, groundtruth_classes_with_background) # Selects all boxes as candidates if none of them is selected according # to cls_weights. This could happen as boxes within certain IOU ranges # are ignored. If triggered, the selected boxes will still be ignored # during loss computation. cls_weights += tf.to_float(tf.equal(tf.reduce_sum(cls_weights), 0)) positive_indicator = tf.greater(tf.argmax(cls_targets, axis=1), 0) sampled_indices = self._second_stage_sampler.subsample( tf.cast(cls_weights, tf.bool), self._second_stage_batch_size, positive_indicator) return box_list_ops.boolean_mask(proposal_boxlist, sampled_indices)
Example #27
Source File: minibatch_sampler_test.py From object_detector_app with MIT License | 5 votes |
def test_subsample_when_more_true_elements_than_num_samples_no_shape(self): np_indicator = [True, False, True, False, True, True, False] indicator = tf.placeholder(tf.bool) feed_dict = {indicator: np_indicator} samples = minibatch_sampler.MinibatchSampler.subsample_indicator( indicator, 3) with self.test_session() as sess: samples_out = sess.run(samples, feed_dict=feed_dict) self.assertTrue(np.sum(samples_out), 3) self.assertAllEqual(samples_out, np.logical_and(samples_out, np_indicator))
Example #28
Source File: minibatch_sampler_test.py From object_detector_app with MIT License | 5 votes |
def test_subsample_indicator_when_indicator_all_false(self): indicator_empty = tf.zeros([0], dtype=tf.bool) samples_empty = minibatch_sampler.MinibatchSampler.subsample_indicator( indicator_empty, 4) with self.test_session() as sess: samples_empty_out = sess.run(samples_empty) self.assertEqual(0, samples_empty_out.size)
Example #29
Source File: matcher_test.py From object_detector_app with MIT License | 5 votes |
def test_get_correct_matched_column_indicator(self): match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) match = matcher.Match(match_results) expected_column_indicator = [True, True, False, True, False, True, False] matched_column_indicator = match.matched_column_indicator() self.assertEquals(matched_column_indicator.dtype, tf.bool) with self.test_session() as sess: matched_column_indicator = sess.run(matched_column_indicator) self.assertAllEqual(matched_column_indicator, expected_column_indicator)
Example #30
Source File: box_list_ops_test.py From object_detector_app with MIT License | 5 votes |
def test_boolean_mask(self): corners = tf.constant( [4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]]) indicator = tf.constant([True, False, True, False, True], tf.bool) expected_subset = [4 * [0.0], 4 * [2.0], 4 * [4.0]] boxes = box_list.BoxList(corners) subset = box_list_ops.boolean_mask(boxes, indicator) with self.test_session() as sess: subset_output = sess.run(subset.get()) self.assertAllClose(subset_output, expected_subset)