Python tensorflow.greater_equal() Examples
The following are 30
code examples of tensorflow.greater_equal().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: box_list_ops.py From object_detector_app with MIT License | 6 votes |
def prune_small_boxes(boxlist, min_side, scope=None): """Prunes small boxes in the boxlist which have a side smaller than min_side. Args: boxlist: BoxList holding N boxes. min_side: Minimum width AND height of box to survive pruning. scope: name scope. Returns: A pruned boxlist. """ with tf.name_scope(scope, 'PruneSmallBoxes'): height, width = height_width(boxlist) is_valid = tf.logical_and(tf.greater_equal(width, min_side), tf.greater_equal(height, min_side)) return gather(boxlist, tf.reshape(tf.where(is_valid), [-1]))
Example #2
Source File: preprocessor_test.py From ros_people_object_detection_tensorflow with Apache License 2.0 | 6 votes |
def testRandomPixelValueScale(self): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_pixel_value_scale, {})) images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images_min = tf.to_float(images) * 0.9 / 255.0 images_max = tf.to_float(images) * 1.1 / 255.0 images = tensor_dict[fields.InputDataFields.image] values_greater = tf.greater_equal(images, images_min) values_less = tf.less_equal(images, images_max) values_true = tf.fill([1, 4, 4, 3], True) with self.test_session() as sess: (values_greater_, values_less_, values_true_) = sess.run( [values_greater, values_less, values_true]) self.assertAllClose(values_greater_, values_true_) self.assertAllClose(values_less_, values_true_)
Example #3
Source File: box_list_ops.py From ros_people_object_detection_tensorflow with Apache License 2.0 | 6 votes |
def prune_small_boxes(boxlist, min_side, scope=None): """Prunes small boxes in the boxlist which have a side smaller than min_side. Args: boxlist: BoxList holding N boxes. min_side: Minimum width AND height of box to survive pruning. scope: name scope. Returns: A pruned boxlist. """ with tf.name_scope(scope, 'PruneSmallBoxes'): height, width = height_width(boxlist) is_valid = tf.logical_and(tf.greater_equal(width, min_side), tf.greater_equal(height, min_side)) return gather(boxlist, tf.reshape(tf.where(is_valid), [-1]))
Example #4
Source File: box_list_ops.py From DOTA_models with Apache License 2.0 | 6 votes |
def prune_small_boxes(boxlist, min_side, scope=None): """Prunes small boxes in the boxlist which have a side smaller than min_side. Args: boxlist: BoxList holding N boxes. min_side: Minimum width AND height of box to survive pruning. scope: name scope. Returns: A pruned boxlist. """ with tf.name_scope(scope, 'PruneSmallBoxes'): height, width = height_width(boxlist) is_valid = tf.logical_and(tf.greater_equal(width, min_side), tf.greater_equal(height, min_side)) return gather(boxlist, tf.reshape(tf.where(is_valid), [-1]))
Example #5
Source File: data_provider.py From DOTA_models with Apache License 2.0 | 6 votes |
def central_crop(image, crop_size): """Returns a central crop for the specified size of an image. Args: image: A tensor with shape [height, width, channels] crop_size: A tuple (crop_width, crop_height) Returns: A tensor of shape [crop_height, crop_width, channels]. """ with tf.variable_scope('CentralCrop'): target_width, target_height = crop_size image_height, image_width = tf.shape(image)[0], tf.shape(image)[1] assert_op1 = tf.Assert( tf.greater_equal(image_height, target_height), ['image_height < target_height', image_height, target_height]) assert_op2 = tf.Assert( tf.greater_equal(image_width, target_width), ['image_width < target_width', image_width, target_width]) with tf.control_dependencies([assert_op1, assert_op2]): offset_width = (image_width - target_width) / 2 offset_height = (image_height - target_height) / 2 return tf.image.crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width)
Example #6
Source File: preprocessor_test.py From object_detector_app with MIT License | 6 votes |
def testRandomPixelValueScale(self): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_pixel_value_scale, {})) images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images_min = tf.to_float(images) * 0.9 / 255.0 images_max = tf.to_float(images) * 1.1 / 255.0 images = tensor_dict[fields.InputDataFields.image] values_greater = tf.greater_equal(images, images_min) values_less = tf.less_equal(images, images_max) values_true = tf.fill([1, 4, 4, 3], True) with self.test_session() as sess: (values_greater_, values_less_, values_true_) = sess.run( [values_greater, values_less, values_true]) self.assertAllClose(values_greater_, values_true_) self.assertAllClose(values_less_, values_true_)
Example #7
Source File: box_list_ops.py From BMW-TensorFlow-Inference-API-CPU with Apache License 2.0 | 6 votes |
def prune_small_boxes(boxlist, min_side, scope=None): """Prunes small boxes in the boxlist which have a side smaller than min_side. Args: boxlist: BoxList holding N boxes. min_side: Minimum width AND height of box to survive pruning. scope: name scope. Returns: A pruned boxlist. """ with tf.name_scope(scope, 'PruneSmallBoxes'): height, width = height_width(boxlist) is_valid = tf.logical_and(tf.greater_equal(width, min_side), tf.greater_equal(height, min_side)) return gather(boxlist, tf.reshape(tf.where(is_valid), [-1]))
Example #8
Source File: shape_utils.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1): """Asserts the input box tensor is normalized. Args: boxes: a tensor of shape [N, 4] where N is the number of boxes. maximum_normalized_coordinate: Maximum coordinate value to be considered as normalized, default to 1.1. Returns: a tf.Assert op which fails when the input box tensor is not normalized. Raises: ValueError: When the input box tensor is not normalized. """ box_minimum = tf.reduce_min(boxes) box_maximum = tf.reduce_max(boxes) return tf.Assert( tf.logical_and( tf.less_equal(box_maximum, maximum_normalized_coordinate), tf.greater_equal(box_minimum, 0)), [boxes])
Example #9
Source File: box_list_ops.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def prune_small_boxes(boxlist, min_side, scope=None): """Prunes small boxes in the boxlist which have a side smaller than min_side. Args: boxlist: BoxList holding N boxes. min_side: Minimum width AND height of box to survive pruning. scope: name scope. Returns: A pruned boxlist. """ with tf.name_scope(scope, 'PruneSmallBoxes'): height, width = height_width(boxlist) is_valid = tf.logical_and(tf.greater_equal(width, min_side), tf.greater_equal(height, min_side)) return gather(boxlist, tf.reshape(tf.where(is_valid), [-1]))
Example #10
Source File: utils.py From FastMaskRCNN with Apache License 2.0 | 6 votes |
def _crop(image, offset_height, offset_width, crop_height, crop_width): original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.']) cropped_shape = control_flow_ops.with_dependencies( [rank_assertion], tf.stack([crop_height, crop_width, original_shape[2]])) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.']) offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. image = control_flow_ops.with_dependencies( [size_assertion], tf.slice(image, offsets, cropped_shape)) return tf.reshape(image, cropped_shape)
Example #11
Source File: pyramid_network.py From FastMaskRCNN with Apache License 2.0 | 6 votes |
def _filter_negative_samples(labels, tensors): """keeps only samples with none-negative labels Params: ----- labels: of shape (N,) tensors: a list of tensors, each of shape (N, .., ..) the first axis is sample number Returns: ----- tensors: filtered tensors """ # return tensors keeps = tf.where(tf.greater_equal(labels, 0)) keeps = tf.reshape(keeps, [-1]) filtered = [] for t in tensors: tf.assert_equal(tf.shape(t)[0], tf.shape(labels)[0]) f = tf.gather(t, keeps) filtered.append(f) return filtered
Example #12
Source File: preprocessor_test.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def testRandomPixelValueScale(self): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_pixel_value_scale, {})) images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images_min = tf.to_float(images) * 0.9 / 255.0 images_max = tf.to_float(images) * 1.1 / 255.0 images = tensor_dict[fields.InputDataFields.image] values_greater = tf.greater_equal(images, images_min) values_less = tf.less_equal(images, images_max) values_true = tf.fill([1, 4, 4, 3], True) with self.test_session() as sess: (values_greater_, values_less_, values_true_) = sess.run( [values_greater, values_less, values_true]) self.assertAllClose(values_greater_, values_true_) self.assertAllClose(values_less_, values_true_)
Example #13
Source File: jumpNormalAlgorithms.py From decompose with MIT License | 6 votes |
def mode(cls, parameters: Dict[str, Tensor]) -> Tensor: mu = parameters["mu"] tau = parameters["tau"] nu = parameters["nu"] beta = parameters["beta"] lam = 1./beta mode = tf.zeros_like(mu) * tf.zeros_like(mu) mode = tf.where(tf.logical_and(tf.greater(nu, mu), tf.less(mu+lam/tau, nu)), mu+lam/tau, mode) mode = tf.where(tf.logical_and(tf.greater(nu, mu), tf.greater_equal(mu+lam/tau, nu)), nu, mode) mode = tf.where(tf.logical_and(tf.less_equal(nu, mu), tf.greater(mu-lam/tau, nu)), mu-lam/tau, mode) mode = tf.where(tf.logical_and(tf.less_equal(nu, mu), tf.less_equal(mu-lam/tau, nu)), nu, mode) return(mode)
Example #14
Source File: losses.py From R2CNN_Faster-RCNN_Tensorflow with MIT License | 6 votes |
def smooth_l1_loss_rpn(bbox_pred, bbox_targets, label, sigma=1.0): ''' :param bbox_pred: [-1, 4] :param bbox_targets: [-1, 4] :param label: [-1] :param sigma: :return: ''' value = _smooth_l1_loss_base(bbox_pred, bbox_targets, sigma=sigma) value = tf.reduce_sum(value, axis=1) # to sum in axis 1 # rpn_select = tf.reshape(tf.where(tf.greater_equal(label, 0)), [-1]) rpn_select = tf.where(tf.greater(label, 0)) # rpn_select = tf.stop_gradient(rpn_select) # to avoid selected_value = tf.gather(value, rpn_select) non_ignored_mask = tf.stop_gradient( 1.0 - tf.to_float(tf.equal(label, -1))) # positve is 1.0 others is 0.0 bbox_loss = tf.reduce_sum(selected_value) / tf.maximum(1.0, tf.reduce_sum(non_ignored_mask)) return bbox_loss
Example #15
Source File: boxes_utils.py From R2CNN_Faster-RCNN_Tensorflow with MIT License | 6 votes |
def filter_outside_boxes(boxes, img_h, img_w): ''' :param anchors:boxes with format [xmin, ymin, xmax, ymax] :param img_h: height of image :param img_w: width of image :return: indices of anchors that inside the image boundary ''' with tf.name_scope('filter_outside_boxes'): xmin, ymin, xmax, ymax = tf.unstack(boxes, axis=1) xmin_index = tf.greater_equal(xmin, 0) ymin_index = tf.greater_equal(ymin, 0) xmax_index = tf.less_equal(xmax, tf.cast(img_w, tf.float32)) ymax_index = tf.less_equal(ymax, tf.cast(img_h, tf.float32)) indices = tf.transpose(tf.stack([xmin_index, ymin_index, xmax_index, ymax_index])) indices = tf.cast(indices, dtype=tf.int32) indices = tf.reduce_sum(indices, axis=1) indices = tf.where(tf.equal(indices, 4)) # indices = tf.equal(indices, 4) return tf.reshape(indices, [-1])
Example #16
Source File: adapter.py From Waymo_Kitti_Adapter with MIT License | 6 votes |
def show_range_image(self, range_image, layout_index_start=1): """Shows range image. Args: range_image: the range image data from a given lidar of type MatrixFloat. layout_index_start: layout offset """ range_image_tensor = tf.convert_to_tensor(range_image.data) range_image_tensor = tf.reshape(range_image_tensor, range_image.shape.dims) lidar_image_mask = tf.greater_equal(range_image_tensor, 0) range_image_tensor = tf.where(lidar_image_mask, range_image_tensor, tf.ones_like(range_image_tensor) * 1e10) range_image_range = range_image_tensor[..., 0] range_image_intensity = range_image_tensor[..., 1] range_image_elongation = range_image_tensor[..., 2] self.plot_range_image_helper(range_image_range.numpy(), 'range', [8, 1, layout_index_start], vmax=75, cmap='gray') self.plot_range_image_helper(range_image_intensity.numpy(), 'intensity', [8, 1, layout_index_start + 1], vmax=1.5, cmap='gray') self.plot_range_image_helper(range_image_elongation.numpy(), 'elongation', [8, 1, layout_index_start + 2], vmax=1.5, cmap='gray')
Example #17
Source File: box_list_ops.py From ros_people_object_detection_tensorflow with Apache License 2.0 | 5 votes |
def prune_completely_outside_window(boxlist, window, scope=None): """Prunes bounding boxes that fall completely outside of the given window. The function clip_to_window prunes bounding boxes that fall completely outside the window, but also clips any bounding boxes that partially overflow. This function does not clip partially overflowing boxes. Args: boxlist: a BoxList holding M_in boxes. window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax] of the window scope: name scope. Returns: pruned_boxlist: a new BoxList with all bounding boxes partially or fully in the window. valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes in the input tensor. """ with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'): y_min, x_min, y_max, x_max = tf.split( value=boxlist.get(), num_or_size_splits=4, axis=1) win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) coordinate_violations = tf.concat([ tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max), tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min) ], 1) valid_indices = tf.reshape( tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1]) return gather(boxlist, valid_indices), valid_indices
Example #18
Source File: box_list_ops.py From BMW-TensorFlow-Inference-API-CPU with Apache License 2.0 | 5 votes |
def prune_non_overlapping_boxes( boxlist1, boxlist2, min_overlap=0.0, scope=None): """Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2. For each box in boxlist1, we want its IOA to be more than minoverlap with at least one of the boxes in boxlist2. If it does not, we remove it. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. min_overlap: Minimum required overlap between boxes, to count them as overlapping. scope: name scope. Returns: new_boxlist1: A pruned boxlist with size [N', 4]. keep_inds: A tensor with shape [N'] indexing kept bounding boxes in the first input BoxList `boxlist1`. """ with tf.name_scope(scope, 'PruneNonOverlappingBoxes'): ioa_ = ioa(boxlist2, boxlist1) # [M, N] tensor ioa_ = tf.reduce_max(ioa_, reduction_indices=[0]) # [N] tensor keep_bool = tf.greater_equal(ioa_, tf.constant(min_overlap)) keep_inds = tf.squeeze(tf.where(keep_bool), squeeze_dims=[1]) new_boxlist1 = gather(boxlist1, keep_inds) return new_boxlist1, keep_inds
Example #19
Source File: input_fn.py From imitation-learning with MIT License | 5 votes |
def filter_valid_intentions(tf_example): """Return True if high-level command is in {2, 3, 4, 5}. Args: tf_example: Dict[str, tf.Tensor] Returns: tf.Tensor (type=bool) """ high_level_command = tf_example[ilc.TGT_HIGH_LVL_CMD] return tf.logical_and( tf.greater_equal(high_level_command, 2), tf.less_equal(high_level_command, 5))
Example #20
Source File: box_list_ops.py From ros_people_object_detection_tensorflow with Apache License 2.0 | 5 votes |
def to_absolute_coordinates(boxlist, height, width, check_range=True, maximum_normalized_coordinate=1.01, scope=None): """Converts normalized box coordinates to absolute pixel coordinates. This function raises an assertion failed error when the maximum box coordinate value is larger than maximum_normalized_coordinate (in which case coordinates are already absolute). Args: boxlist: BoxList with coordinates in range [0, 1]. height: Maximum value for height of absolute box coordinates. width: Maximum value for width of absolute box coordinates. check_range: If True, checks if the coordinates are normalized or not. maximum_normalized_coordinate: Maximum coordinate value to be considered as normalized, default to 1.01. scope: name scope. Returns: boxlist with absolute coordinates in terms of the image size. """ with tf.name_scope(scope, 'ToAbsoluteCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) # Ensure range of input boxes is correct. if check_range: box_maximum = tf.reduce_max(boxlist.get()) max_assert = tf.Assert( tf.greater_equal(maximum_normalized_coordinate, box_maximum), ['maximum box coordinate value is larger ' 'than %f: ' % maximum_normalized_coordinate, box_maximum]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(boxlist, height, width)
Example #21
Source File: box_list_ops.py From ros_people_object_detection_tensorflow with Apache License 2.0 | 5 votes |
def select_random_box(boxlist, default_box=None, seed=None, scope=None): """Selects a random bounding box from a `BoxList`. Args: boxlist: A BoxList. default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`, this default box will be returned. If None, will use a default box of [[-1., -1., -1., -1.]]. seed: Random seed. scope: Name scope. Returns: bbox: A [1, 4] tensor with a random bounding box. valid: A bool tensor indicating whether a valid bounding box is returned (True) or whether the default box is returned (False). """ with tf.name_scope(scope, 'SelectRandomBox'): bboxes = boxlist.get() combined_shape = shape_utils.combined_static_and_dynamic_shape(bboxes) number_of_boxes = combined_shape[0] default_box = default_box or tf.constant([[-1., -1., -1., -1.]]) def select_box(): random_index = tf.random_uniform([], maxval=number_of_boxes, dtype=tf.int32, seed=seed) return tf.expand_dims(bboxes[random_index], axis=0), tf.constant(True) return tf.cond( tf.greater_equal(number_of_boxes, 1), true_fn=select_box, false_fn=lambda: (default_box, tf.constant(False)))
Example #22
Source File: pyramid_network.py From FastMaskRCNN with Apache License 2.0 | 5 votes |
def _get_valid_sample_fraction(labels, p=0): """return fraction of non-negative examples, the ignored examples have been marked as negative""" num_valid = tf.reduce_sum(tf.cast(tf.greater_equal(labels, p), tf.float32)) num_example = tf.cast(tf.size(labels), tf.float32) frac = tf.cond(tf.greater(num_example, 0), lambda:num_valid / num_example, lambda: tf.cast(0, tf.float32)) frac_ = tf.cond(tf.greater(num_valid, 0), lambda:num_example / num_valid, lambda: tf.cast(0, tf.float32)) return frac, frac_
Example #23
Source File: vgg_preprocessing.py From tf-pose with Apache License 2.0 | 5 votes |
def _crop(image, offset_height, offset_width, crop_height, crop_width): """Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. Args: image: an image of shape [height, width, channels]. offset_height: a scalar tensor indicating the height offset. offset_width: a scalar tensor indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """ original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.']) with tf.control_dependencies([rank_assertion]): cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.']) offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. with tf.control_dependencies([size_assertion]): image = tf.slice(image, offsets, cropped_shape) return tf.reshape(image, cropped_shape)
Example #24
Source File: kfac.py From stable-baselines with MIT License | 5 votes |
def apply_stats(self, stats_updates): """ compute stats and update/apply the new stats to the running average :param stats_updates: ([TensorFlow Tensor]) The stats updates :return: (function) update stats operation """ def _update_accum_stats(): if self._full_stats_init: return tf.cond(tf.greater(self.sgd_step, self._cold_iter), lambda: tf.group( *self._apply_stats(stats_updates, accumulate=True, accumulate_coeff=1. / self._stats_accum_iter)), tf.no_op) else: return tf.group( *self._apply_stats(stats_updates, accumulate=True, accumulate_coeff=1. / self._stats_accum_iter)) def _update_running_avg_stats(stats_updates): return tf.group(*self._apply_stats(stats_updates)) if self._async_stats: # asynchronous stats update update_stats = self._apply_stats(stats_updates) queue = tf.FIFOQueue(1, [item.dtype for item in update_stats], shapes=[ item.get_shape() for item in update_stats]) enqueue_op = queue.enqueue(update_stats) def dequeue_stats_op(): return queue.dequeue() self.qr_stats = tf.train.QueueRunner(queue, [enqueue_op]) update_stats_op = tf.cond(tf.equal(queue.size(), tf.convert_to_tensor( 0)), tf.no_op, lambda: tf.group(*[dequeue_stats_op(), ])) else: # synchronous stats update update_stats_op = tf.cond(tf.greater_equal(self.stats_step, self._stats_accum_iter), lambda: _update_running_avg_stats(stats_updates), _update_accum_stats) self._update_stats_op = update_stats_op return update_stats_op
Example #25
Source File: vgg_preprocessing.py From lambda-deep-learning-demo with Apache License 2.0 | 5 votes |
def _crop(image, offset_height, offset_width, crop_height, crop_width): """Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. config: image: an image of shape [height, width, channels]. offset_height: a scalar tensor indicating the height offset. offset_width: a scalar tensor indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """ original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.']) with tf.control_dependencies([rank_assertion]): cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.']) offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. with tf.control_dependencies([size_assertion]): image = tf.slice(image, offsets, cropped_shape) return tf.reshape(image, cropped_shape)
Example #26
Source File: data.py From zoo with Apache License 2.0 | 5 votes |
def _at_least_x_are_equal(a, b, x): """At least `x` of `a` and `b` `Tensors` are equal.""" match = tf.equal(a, b) match = tf.cast(match, tf.int32) return tf.greater_equal(tf.reduce_sum(match), x)
Example #27
Source File: vgg_preprocessing.py From DOTA_models with Apache License 2.0 | 5 votes |
def _crop(image, offset_height, offset_width, crop_height, crop_width): """Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. Args: image: an image of shape [height, width, channels]. offset_height: a scalar tensor indicating the height offset. offset_width: a scalar tensor indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """ original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.']) with tf.control_dependencies([rank_assertion]): cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.']) offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. with tf.control_dependencies([size_assertion]): image = tf.slice(image, offsets, cropped_shape) return tf.reshape(image, cropped_shape)
Example #28
Source File: build_whole_network.py From R2CNN_Faster-RCNN_Tensorflow with MIT License | 5 votes |
def add_roi_batch_img_smry(self, img, rois, labels): positive_roi_indices = tf.reshape(tf.where(tf.greater_equal(labels, 1)), [-1]) negative_roi_indices = tf.reshape(tf.where(tf.equal(labels, 0)), [-1]) pos_roi = tf.gather(rois, positive_roi_indices) neg_roi = tf.gather(rois, negative_roi_indices) pos_in_img = show_box_in_tensor.draw_box_with_color(img, pos_roi, tf.shape(pos_roi)[0]) neg_in_img = show_box_in_tensor.draw_box_with_color(img, neg_roi, tf.shape(neg_roi)[0]) tf.summary.image('pos_rois', pos_in_img) tf.summary.image('neg_rois', neg_in_img)
Example #29
Source File: build_whole_network.py From R2CNN_Faster-RCNN_Tensorflow with MIT License | 5 votes |
def add_anchor_img_smry(self, img, anchors, labels): positive_anchor_indices = tf.reshape(tf.where(tf.greater_equal(labels, 1)), [-1]) negative_anchor_indices = tf.reshape(tf.where(tf.equal(labels, 0)), [-1]) positive_anchor = tf.gather(anchors, positive_anchor_indices) negative_anchor = tf.gather(anchors, negative_anchor_indices) pos_in_img = show_box_in_tensor.draw_box_with_color(img, positive_anchor, tf.shape(positive_anchor)[0]) neg_in_img = show_box_in_tensor.draw_box_with_color(img, negative_anchor, tf.shape(positive_anchor)[0]) tf.summary.image('positive_anchor', pos_in_img) tf.summary.image('negative_anchors', neg_in_img)
Example #30
Source File: resnet_preprocessing.py From cloudml-samples with Apache License 2.0 | 5 votes |
def _at_least_x_are_equal(a, b, x): """At least `x` of `a` and `b` `Tensors` are equal.""" match = tf.equal(a, b) match = tf.cast(match, tf.int32) return tf.greater_equal(tf.reduce_sum(match), x)