Python tensorflow.Assert() Examples
The following are 30
code examples of tensorflow.Assert().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: utils_pg.py From rl_algorithms with MIT License | 6 votes |
def gauss_KL(mu1, logstd1, mu2, logstd2): """ Returns KL divergence among two multivariate Gaussians, component-wise. It assumes the covariance matrix is diagonal. All inputs have shape (n,a). It is not necessary to know the number of actions because reduce_sum will sum over this to get the `d` constant offset. The part consisting of the trace in the formula is blended with the mean difference squared due to the common "denominator" of var2_na. This forumula generalizes for an arbitrary number of actions. I think mu2 and logstd2 should represent the policy before the update. Returns the KL divergence for each of the n components in the minibatch, then we do a reduce_mean outside this. """ var1_na = tf.exp(2.*logstd1) var2_na = tf.exp(2.*logstd2) tmp_matrix = 2.*(logstd2 - logstd1) + (var1_na + tf.square(mu1-mu2))/var2_na - 1 kl_n = tf.reduce_sum(0.5 * tmp_matrix, axis=[1]) # Don't forget the 1/2 !! assert_op = tf.Assert(tf.reduce_all(kl_n >= -0.0000001), [kl_n]) with tf.control_dependencies([assert_op]): kl_n = tf.identity(kl_n) return kl_n
Example #2
Source File: utils.py From FastMaskRCNN with Apache License 2.0 | 6 votes |
def _crop(image, offset_height, offset_width, crop_height, crop_width): original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.']) cropped_shape = control_flow_ops.with_dependencies( [rank_assertion], tf.stack([crop_height, crop_width, original_shape[2]])) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.']) offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. image = control_flow_ops.with_dependencies( [size_assertion], tf.slice(image, offsets, cropped_shape)) return tf.reshape(image, cropped_shape)
Example #3
Source File: projection.py From tf_smpl with MIT License | 6 votes |
def batch_orth_proj_idrot(X, camera, name=None): """ X is N x num_points x 3 camera is N x 3 same as applying orth_proj_idrot to each N """ with tf.variable_scope(name, "batch_orth_proj_idrot", [X, camera]): # TODO check X dim size. # tf.Assert(X.shape[2] == 3, [X]) camera = tf.reshape(camera, [-1, 1, 3], name="cam_adj_shape") X_trans = X[:, :, :2] + camera[:, :, 1:] shape = tf.shape(X_trans) return tf.reshape( camera[:, :, 0] * tf.reshape(X_trans, [shape[0], -1]), shape)
Example #4
Source File: shape_utils.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1): """Asserts the input box tensor is normalized. Args: boxes: a tensor of shape [N, 4] where N is the number of boxes. maximum_normalized_coordinate: Maximum coordinate value to be considered as normalized, default to 1.1. Returns: a tf.Assert op which fails when the input box tensor is not normalized. Raises: ValueError: When the input box tensor is not normalized. """ box_minimum = tf.reduce_min(boxes) box_maximum = tf.reduce_max(boxes) return tf.Assert( tf.logical_and( tf.less_equal(box_maximum, maximum_normalized_coordinate), tf.greater_equal(box_minimum, 0)), [boxes])
Example #5
Source File: sequence_insert.py From onnx-tensorflow with Apache License 2.0 | 6 votes |
def version_11(cls, node, **kwargs): tensor_dict = kwargs["tensor_dict"] input_sequence = tensor_dict[node.inputs[0]] input_tensor = tensor_dict[node.inputs[1]] position = tensor_dict[node.inputs[2]] if len(node.inputs) > 2 else tf.shape(input_sequence.to_sparse())[0] # check whether position is in-bounds and assert if not result = cls.chk_pos_in_bounds(input_sequence, position) assert_pos = tf.Assert(tf.equal(result, True), [result]) with tf.control_dependencies([assert_pos]): input_tensor = tf.expand_dims(input_tensor, 0) if input_sequence.shape[0] == 0: output_seq = tf.RaggedTensor.from_tensor(input_tensor) else: s1 = input_sequence[:position] s2 = input_sequence[position:] output_seq = tf.concat([s1, input_tensor, s2], axis = 0) return [output_seq]
Example #6
Source File: model_distillation_adv_adaptation.py From BERT with Apache License 2.0 | 6 votes |
def diff_loss(shared_feat, task_feat): '''Orthogonality Constraints from https://github.com/tensorflow/models, in directory research/domain_adaptation ''' task_feat -= tf.reduce_mean(task_feat, 0) shared_feat -= tf.reduce_mean(shared_feat, 0) task_feat = tf.nn.l2_normalize(task_feat, 1) shared_feat = tf.nn.l2_normalize(shared_feat, 1) correlation_matrix = tf.matmul( task_feat, shared_feat, transpose_a=True) cost = tf.reduce_mean(tf.square(correlation_matrix)) cost = tf.where(cost > 0, cost, 0, name='value') assert_op = tf.Assert(tf.is_finite(cost), [cost]) with tf.control_dependencies([assert_op]): loss_diff = tf.identity(cost) return loss_diff
Example #7
Source File: losses.py From DOTA_models with Apache License 2.0 | 6 votes |
def mmd_loss(source_samples, target_samples, weight, scope=None): """Adds a similarity loss term, the MMD between two representations. This Maximum Mean Discrepancy (MMD) loss is calculated with a number of different Gaussian kernels. Args: source_samples: a tensor of shape [num_samples, num_features]. target_samples: a tensor of shape [num_samples, num_features]. weight: the weight of the MMD loss. scope: optional name scope for summary tags. Returns: a scalar tensor representing the MMD loss value. """ sigmas = [ 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100, 1e3, 1e4, 1e5, 1e6 ] gaussian_kernel = partial( utils.gaussian_kernel_matrix, sigmas=tf.constant(sigmas)) loss_value = maximum_mean_discrepancy( source_samples, target_samples, kernel=gaussian_kernel) loss_value = tf.maximum(1e-4, loss_value) * weight assert_op = tf.Assert(tf.is_finite(loss_value), [loss_value]) with tf.control_dependencies([assert_op]): tag = 'MMD Loss' if scope: tag = scope + tag tf.summary.scalar(tag, loss_value) tf.losses.add_loss(loss_value) return loss_value
Example #8
Source File: dsn.py From DOTA_models with Apache License 2.0 | 6 votes |
def add_reconstruction_loss(recon_loss_name, images, recons, weight, domain): """Adds a reconstruction loss. Args: recon_loss_name: The name of the reconstruction loss. images: A `Tensor` of size [batch_size, height, width, 3]. recons: A `Tensor` whose size matches `images`. weight: A scalar coefficient for the loss. domain: The name of the domain being reconstructed. Raises: ValueError: If `recon_loss_name` is not recognized. """ if recon_loss_name == 'sum_of_pairwise_squares': loss_fn = tf.contrib.losses.mean_pairwise_squared_error elif recon_loss_name == 'sum_of_squares': loss_fn = tf.contrib.losses.mean_squared_error else: raise ValueError('recon_loss_name value [%s] not recognized.' % recon_loss_name) loss = loss_fn(recons, images, weight) assert_op = tf.Assert(tf.is_finite(loss), [loss]) with tf.control_dependencies([assert_op]): tf.summary.scalar('losses/%s Recon Loss' % domain, loss)
Example #9
Source File: data_provider.py From DOTA_models with Apache License 2.0 | 6 votes |
def central_crop(image, crop_size): """Returns a central crop for the specified size of an image. Args: image: A tensor with shape [height, width, channels] crop_size: A tuple (crop_width, crop_height) Returns: A tensor of shape [crop_height, crop_width, channels]. """ with tf.variable_scope('CentralCrop'): target_width, target_height = crop_size image_height, image_width = tf.shape(image)[0], tf.shape(image)[1] assert_op1 = tf.Assert( tf.greater_equal(image_height, target_height), ['image_height < target_height', image_height, target_height]) assert_op2 = tf.Assert( tf.greater_equal(image_width, target_width), ['image_width < target_width', image_width, target_width]) with tf.control_dependencies([assert_op1, assert_op2]): offset_width = (image_width - target_width) / 2 offset_height = (image_height - target_height) / 2 return tf.image.crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width)
Example #10
Source File: component.py From DOTA_models with Apache License 2.0 | 6 votes |
def build_structured_training(self, state, network_states): """Builds a beam search based training loop for this component. The default implementation builds a dummy graph and raises a TensorFlow runtime exception to indicate that structured training is not implemented. Args: state: MasterState from the 'AdvanceMaster' op that advances the underlying master to this component. network_states: dictionary of component NetworkState objects. Returns: (handle, cost, correct, total) -- These are TF ops corresponding to the final handle after unrolling, the total cost, and the total number of actions. Since the number of correctly predicted actions is not applicable in the structured training setting, a dummy value should returned. """ del network_states # Unused. with tf.control_dependencies([tf.Assert(False, ['Not implemented.'])]): handle = tf.identity(state.handle) cost = tf.constant(0.) correct, total = tf.constant(0), tf.constant(0) return handle, cost, correct, total
Example #11
Source File: gaussian_actor.py From tf2rl with MIT License | 5 votes |
def _squash_correction(self, logp_pis, actions): # assert_op = tf.Assert(tf.less_equal(tf.reduce_max(actions), 1.), [actions]) # To avoid evil machine precision error, strictly clip 1-pi**2 to [0,1] range. # with tf.control_dependencies([assert_op]): diff = tf.reduce_sum( tf.math.log(1. - actions ** 2 + self.EPS), axis=1) return logp_pis - diff
Example #12
Source File: faster_rcnn_inception_v2_feature_extractor.py From Person-Detection-and-Tracking with MIT License | 5 votes |
def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] activations: A dictionary mapping feature extractor tensor names to tensors Raises: InvalidArgumentError: If the spatial size of `preprocessed_inputs` (height or width) is less than 33. ValueError: If the created network is missing the required activation. """ preprocessed_inputs.get_shape().assert_has_rank(4) shape_assert = tf.Assert( tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.']) with tf.control_dependencies([shape_assert]): with tf.variable_scope('InceptionV2', reuse=self._reuse_weights) as scope: with _batch_norm_arg_scope([slim.conv2d, slim.separable_conv2d], batch_norm_scale=True, train_batch_norm=self._train_batch_norm): _, activations = inception_v2.inception_v2_base( preprocessed_inputs, final_endpoint='Mixed_4e', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) return activations['Mixed_4e'], activations
Example #13
Source File: shape_utils.py From ros_people_object_detection_tensorflow with Apache License 2.0 | 5 votes |
def check_min_image_dim(min_dim, image_tensor): """Checks that the image width/height are greater than some number. This function is used to check that the width and height of an image are above a certain value. If the image shape is static, this function will perform the check at graph construction time. Otherwise, if the image shape varies, an Assertion control dependency will be added to the graph. Args: min_dim: The minimum number of pixels along the width and height of the image. image_tensor: The image tensor to check size for. Returns: If `image_tensor` has dynamic size, return `image_tensor` with a Assert control dependency. Otherwise returns image_tensor. Raises: ValueError: if `image_tensor`'s' width or height is smaller than `min_dim`. """ image_shape = image_tensor.get_shape() image_height = static_shape.get_height(image_shape) image_width = static_shape.get_width(image_shape) if image_height is None or image_width is None: shape_assert = tf.Assert( tf.logical_and(tf.greater_equal(tf.shape(image_tensor)[1], min_dim), tf.greater_equal(tf.shape(image_tensor)[2], min_dim)), ['image size must be >= {} in both height and width.'.format(min_dim)]) with tf.control_dependencies([shape_assert]): return tf.identity(image_tensor) if image_height < min_dim or image_width < min_dim: raise ValueError( 'image size must be >= %d in both height and width; image dim = %d,%d' % (min_dim, image_height, image_width)) return image_tensor
Example #14
Source File: faster_rcnn_mobilenet_v1_feature_extractor.py From ros_people_object_detection_tensorflow with Apache License 2.0 | 5 votes |
def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] activations: A dictionary mapping feature extractor tensor names to tensors Raises: InvalidArgumentError: If the spatial size of `preprocessed_inputs` (height or width) is less than 33. ValueError: If the created network is missing the required activation. """ preprocessed_inputs.get_shape().assert_has_rank(4) shape_assert = tf.Assert( tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.']) with tf.control_dependencies([shape_assert]): with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: with _batch_norm_arg_scope([slim.conv2d, slim.separable_conv2d], batch_norm_scale=True, train_batch_norm=self._train_batch_norm): _, activations = mobilenet_v1.mobilenet_v1_base( preprocessed_inputs, final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) return activations['Conv2d_13_pointwise'], activations
Example #15
Source File: faster_rcnn_inception_v2_feature_extractor.py From ros_people_object_detection_tensorflow with Apache License 2.0 | 5 votes |
def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] activations: A dictionary mapping feature extractor tensor names to tensors Raises: InvalidArgumentError: If the spatial size of `preprocessed_inputs` (height or width) is less than 33. ValueError: If the created network is missing the required activation. """ preprocessed_inputs.get_shape().assert_has_rank(4) shape_assert = tf.Assert( tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.']) with tf.control_dependencies([shape_assert]): with tf.variable_scope('InceptionV2', reuse=self._reuse_weights) as scope: with _batch_norm_arg_scope([slim.conv2d, slim.separable_conv2d], batch_norm_scale=True, train_batch_norm=self._train_batch_norm): _, activations = inception_v2.inception_v2_base( preprocessed_inputs, final_endpoint='Mixed_4e', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) return activations['Mixed_4e'], activations
Example #16
Source File: box_list_ops.py From Person-Detection-and-Tracking with MIT License | 5 votes |
def to_normalized_coordinates(boxlist, height, width, check_range=True, scope=None): """Converts absolute box coordinates to normalized coordinates in [0, 1]. Usually one uses the dynamic shape of the image or conv-layer tensor: boxlist = box_list_ops.to_normalized_coordinates(boxlist, tf.shape(images)[1], tf.shape(images)[2]), This function raises an assertion failed error at graph execution time when the maximum coordinate is smaller than 1.01 (which means that coordinates are already normalized). The value 1.01 is to deal with small rounding errors. Args: boxlist: BoxList with coordinates in terms of pixel-locations. height: Maximum value for height of absolute box coordinates. width: Maximum value for width of absolute box coordinates. check_range: If True, checks if the coordinates are normalized or not. scope: name scope. Returns: boxlist with normalized coordinates in [0, 1]. """ with tf.name_scope(scope, 'ToNormalizedCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) if check_range: max_val = tf.reduce_max(boxlist.get()) max_assert = tf.Assert(tf.greater(max_val, 1.01), ['max value is lower than 1.01: ', max_val]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(boxlist, 1 / height, 1 / width)
Example #17
Source File: box_list_ops.py From Person-Detection-and-Tracking with MIT License | 5 votes |
def to_absolute_coordinates(boxlist, height, width, check_range=True, maximum_normalized_coordinate=1.1, scope=None): """Converts normalized box coordinates to absolute pixel coordinates. This function raises an assertion failed error when the maximum box coordinate value is larger than maximum_normalized_coordinate (in which case coordinates are already absolute). Args: boxlist: BoxList with coordinates in range [0, 1]. height: Maximum value for height of absolute box coordinates. width: Maximum value for width of absolute box coordinates. check_range: If True, checks if the coordinates are normalized or not. maximum_normalized_coordinate: Maximum coordinate value to be considered as normalized, default to 1.1. scope: name scope. Returns: boxlist with absolute coordinates in terms of the image size. """ with tf.name_scope(scope, 'ToAbsoluteCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) # Ensure range of input boxes is correct. if check_range: box_maximum = tf.reduce_max(boxlist.get()) max_assert = tf.Assert( tf.greater_equal(maximum_normalized_coordinate, box_maximum), ['maximum box coordinate value is larger ' 'than %f: ' % maximum_normalized_coordinate, box_maximum]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(boxlist, height, width)
Example #18
Source File: yellowfin.py From BERT with Apache License 2.0 | 5 votes |
def _get_cubic_root(self): """Get the cubic root.""" # We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2 # where x = sqrt(mu). # We substitute x, which is sqrt(mu), with x = y + 1. # It gives y^3 + py = q # where p = (D^2 h_min^2)/(2*C) and q = -p. # We use the Vieta's substitution to compute the root. # There is only one real solution y (which is in [0, 1] ). # http://mathworld.wolfram.com/VietasSubstitution.html assert_array = [ tf.Assert( tf.logical_not(tf.is_nan(self._dist_to_opt_avg)), [self._dist_to_opt_avg,]), tf.Assert( tf.logical_not(tf.is_nan(self._h_min)), [self._h_min,]), tf.Assert( tf.logical_not(tf.is_nan(self._grad_var)), [self._grad_var,]), tf.Assert( tf.logical_not(tf.is_inf(self._dist_to_opt_avg)), [self._dist_to_opt_avg,]), tf.Assert( tf.logical_not(tf.is_inf(self._h_min)), [self._h_min,]), tf.Assert( tf.logical_not(tf.is_inf(self._grad_var)), [self._grad_var,]) ] with tf.control_dependencies(assert_array): p = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0 w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0/3.0) y = w - p / 3.0 / w x = y + 1 return x
Example #19
Source File: imagenet.py From BERT with Apache License 2.0 | 5 votes |
def _crop(image, offset_height, offset_width, crop_height, crop_width): """Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. Args: image: `Tensor` image of shape [height, width, channels]. offset_height: `Tensor` indicating the height offset. offset_width: `Tensor` indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """ original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ["Rank of image must be equal to 3."]) with tf.control_dependencies([rank_assertion]): cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ["Crop size greater than the image size."]) offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. with tf.control_dependencies([size_assertion]): image = tf.slice(image, offsets, cropped_shape) return tf.reshape(image, cropped_shape)
Example #20
Source File: keypoint_ops.py From Person-Detection-and-Tracking with MIT License | 5 votes |
def to_absolute_coordinates(keypoints, height, width, check_range=True, scope=None): """Converts normalized keypoint coordinates to absolute pixel coordinates. This function raises an assertion failed error when the maximum keypoint coordinate value is larger than 1.01 (in which case coordinates are already absolute). Args: keypoints: A tensor of shape [num_instances, num_keypoints, 2] height: Maximum value for y coordinate of absolute keypoint coordinates. width: Maximum value for x coordinate of absolute keypoint coordinates. check_range: If True, checks if the coordinates are normalized or not. scope: name scope. Returns: tensor of shape [num_instances, num_keypoints, 2] with absolute coordinates in terms of the image size. """ with tf.name_scope(scope, 'ToAbsoluteCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) # Ensure range of input keypoints is correct. if check_range: max_val = tf.reduce_max(keypoints) max_assert = tf.Assert(tf.greater_equal(1.01, max_val), ['maximum keypoint coordinate value is larger ' 'than 1.01: ', max_val]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(keypoints, height, width)
Example #21
Source File: vgg_preprocessing.py From morph-net with Apache License 2.0 | 5 votes |
def _crop(image, offset_height, offset_width, crop_height, crop_width): """Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. Args: image: an image of shape [height, width, channels]. offset_height: a scalar tensor indicating the height offset. offset_width: a scalar tensor indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """ original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.']) with tf.control_dependencies([rank_assertion]): cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.']) offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. with tf.control_dependencies([size_assertion]): image = tf.slice(image, offsets, cropped_shape) return tf.reshape(image, cropped_shape)
Example #22
Source File: box_list_ops.py From BMW-TensorFlow-Inference-API-CPU with Apache License 2.0 | 5 votes |
def to_absolute_coordinates(boxlist, height, width, check_range=True, maximum_normalized_coordinate=1.1, scope=None): """Converts normalized box coordinates to absolute pixel coordinates. This function raises an assertion failed error when the maximum box coordinate value is larger than maximum_normalized_coordinate (in which case coordinates are already absolute). Args: boxlist: BoxList with coordinates in range [0, 1]. height: Maximum value for height of absolute box coordinates. width: Maximum value for width of absolute box coordinates. check_range: If True, checks if the coordinates are normalized or not. maximum_normalized_coordinate: Maximum coordinate value to be considered as normalized, default to 1.1. scope: name scope. Returns: boxlist with absolute coordinates in terms of the image size. """ with tf.name_scope(scope, 'ToAbsoluteCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) # Ensure range of input boxes is correct. if check_range: box_maximum = tf.reduce_max(boxlist.get()) max_assert = tf.Assert( tf.greater_equal(maximum_normalized_coordinate, box_maximum), ['maximum box coordinate value is larger ' 'than %f: ' % maximum_normalized_coordinate, box_maximum]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(boxlist, height, width)
Example #23
Source File: box_list_ops.py From BMW-TensorFlow-Inference-API-CPU with Apache License 2.0 | 5 votes |
def to_normalized_coordinates(boxlist, height, width, check_range=True, scope=None): """Converts absolute box coordinates to normalized coordinates in [0, 1]. Usually one uses the dynamic shape of the image or conv-layer tensor: boxlist = box_list_ops.to_normalized_coordinates(boxlist, tf.shape(images)[1], tf.shape(images)[2]), This function raises an assertion failed error at graph execution time when the maximum coordinate is smaller than 1.01 (which means that coordinates are already normalized). The value 1.01 is to deal with small rounding errors. Args: boxlist: BoxList with coordinates in terms of pixel-locations. height: Maximum value for height of absolute box coordinates. width: Maximum value for width of absolute box coordinates. check_range: If True, checks if the coordinates are normalized or not. scope: name scope. Returns: boxlist with normalized coordinates in [0, 1]. """ with tf.name_scope(scope, 'ToNormalizedCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) if check_range: max_val = tf.reduce_max(boxlist.get()) max_assert = tf.Assert(tf.greater(max_val, 1.01), ['max value is lower than 1.01: ', max_val]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(boxlist, 1 / height, 1 / width)
Example #24
Source File: shape_utils.py From BMW-TensorFlow-Inference-API-CPU with Apache License 2.0 | 5 votes |
def check_min_image_dim(min_dim, image_tensor): """Checks that the image width/height are greater than some number. This function is used to check that the width and height of an image are above a certain value. If the image shape is static, this function will perform the check at graph construction time. Otherwise, if the image shape varies, an Assertion control dependency will be added to the graph. Args: min_dim: The minimum number of pixels along the width and height of the image. image_tensor: The image tensor to check size for. Returns: If `image_tensor` has dynamic size, return `image_tensor` with a Assert control dependency. Otherwise returns image_tensor. Raises: ValueError: if `image_tensor`'s' width or height is smaller than `min_dim`. """ image_shape = image_tensor.get_shape() image_height = static_shape.get_height(image_shape) image_width = static_shape.get_width(image_shape) if image_height is None or image_width is None: shape_assert = tf.Assert( tf.logical_and(tf.greater_equal(tf.shape(image_tensor)[1], min_dim), tf.greater_equal(tf.shape(image_tensor)[2], min_dim)), ['image size must be >= {} in both height and width.'.format(min_dim)]) with tf.control_dependencies([shape_assert]): return tf.identity(image_tensor) if image_height < min_dim or image_width < min_dim: raise ValueError( 'image size must be >= %d in both height and width; image dim = %d,%d' % (min_dim, image_height, image_width)) return image_tensor
Example #25
Source File: vgg_preprocessing.py From DOTA_models with Apache License 2.0 | 5 votes |
def _crop(image, offset_height, offset_width, crop_height, crop_width): """Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. Args: image: an image of shape [height, width, channels]. offset_height: a scalar tensor indicating the height offset. offset_width: a scalar tensor indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """ original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.']) with tf.control_dependencies([rank_assertion]): cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.']) offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. with tf.control_dependencies([size_assertion]): image = tf.slice(image, offsets, cropped_shape) return tf.reshape(image, cropped_shape)
Example #26
Source File: crop.py From FastMaskRCNN with Apache License 2.0 | 5 votes |
def crop_(images, boxes, batch_inds, ih, iw, stride = 1, pooled_height = 7, pooled_width = 7, scope='ROIAlign'): """Cropping areas of features into fixed size Params: -------- images: a 4-d Tensor of shape (N, H, W, C) boxes: rois in the original image, of shape (N, ..., 4), [x1, y1, x2, y2] batch_inds: Returns: -------- A Tensor of shape (N, pooled_height, pooled_width, C) """ with tf.name_scope(scope): # boxes = boxes / (stride + 0.0) boxes = tf.reshape(boxes, [-1, 4]) # normalize the boxes and swap x y dimensions shape = tf.shape(images) boxes = tf.reshape(boxes, [-1, 2]) # to (x, y) xs = boxes[:, 0] ys = boxes[:, 1] xs = xs / tf.cast(shape[2], tf.float32) ys = ys / tf.cast(shape[1], tf.float32) boxes = tf.concat([ys[:, tf.newaxis], xs[:, tf.newaxis]], axis=1) boxes = tf.reshape(boxes, [-1, 4]) # to (y1, x1, y2, x2) # if batch_inds is False: # num_boxes = tf.shape(boxes)[0] # batch_inds = tf.zeros([num_boxes], dtype=tf.int32, name='batch_inds') # batch_inds = boxes[:, 0] * 0 # batch_inds = tf.cast(batch_inds, tf.int32) # assert_op = tf.Assert(tf.greater(tf.shape(images)[0], tf.reduce_max(batch_inds)), [images, batch_inds]) assert_op = tf.Assert(tf.greater(tf.size(images), 0), [images, batch_inds]) with tf.control_dependencies([assert_op, images, batch_inds]): return [tf.image.crop_and_resize(images, boxes, batch_inds, [pooled_height, pooled_width], method='bilinear', name='Crop')] + [boxes]
Example #27
Source File: crop.py From FastMaskRCNN with Apache License 2.0 | 5 votes |
def crop(images, boxes, batch_inds, stride = 1, pooled_height = 7, pooled_width = 7, scope='ROIAlign'): """Cropping areas of features into fixed size Params: -------- images: a 4-d Tensor of shape (N, H, W, C) boxes: rois in the original image, of shape (N, ..., 4), [x1, y1, x2, y2] batch_inds: Returns: -------- A Tensor of shape (N, pooled_height, pooled_width, C) """ with tf.name_scope(scope): # boxes = boxes / (stride + 0.0) boxes = tf.reshape(boxes, [-1, 4]) # normalize the boxes and swap x y dimensions shape = tf.shape(images) boxes = tf.reshape(boxes, [-1, 2]) # to (x, y) xs = boxes[:, 0] ys = boxes[:, 1] xs = xs / tf.cast(shape[2], tf.float32) ys = ys / tf.cast(shape[1], tf.float32) boxes = tf.concat([ys[:, tf.newaxis], xs[:, tf.newaxis]], axis=1) boxes = tf.reshape(boxes, [-1, 4]) # to (y1, x1, y2, x2) # if batch_inds is False: # num_boxes = tf.shape(boxes)[0] # batch_inds = tf.zeros([num_boxes], dtype=tf.int32, name='batch_inds') # batch_inds = boxes[:, 0] * 0 # batch_inds = tf.cast(batch_inds, tf.int32) # assert_op = tf.Assert(tf.greater(tf.shape(images)[0], tf.reduce_max(batch_inds)), [images, batch_inds]) assert_op = tf.Assert(tf.greater(tf.size(images), 0), [images, batch_inds]) with tf.control_dependencies([assert_op, images, batch_inds]): return tf.image.crop_and_resize(images, boxes, batch_inds, [pooled_height, pooled_width], method='bilinear', name='Crop')
Example #28
Source File: vgg_preprocessing.py From tf-pose with Apache License 2.0 | 5 votes |
def _crop(image, offset_height, offset_width, crop_height, crop_width): """Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. Args: image: an image of shape [height, width, channels]. offset_height: a scalar tensor indicating the height offset. offset_width: a scalar tensor indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """ original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.']) with tf.control_dependencies([rank_assertion]): cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.']) offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. with tf.control_dependencies([size_assertion]): image = tf.slice(image, offsets, cropped_shape) return tf.reshape(image, cropped_shape)
Example #29
Source File: vgg_preprocessing.py From models with Apache License 2.0 | 5 votes |
def _crop(image, offset_height, offset_width, crop_height, crop_width): """Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. Args: image: an image of shape [height, width, channels]. offset_height: a scalar tensor indicating the height offset. offset_width: a scalar tensor indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """ original_shape = tf.shape(input=image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.']) with tf.control_dependencies([rank_assertion]): cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.']) offsets = tf.cast(tf.stack([offset_height, offset_width, 0]), dtype=tf.int32) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. with tf.control_dependencies([size_assertion]): image = tf.slice(image, offsets, cropped_shape) return tf.reshape(image, cropped_shape)
Example #30
Source File: keypoint_ops.py From ros_people_object_detection_tensorflow with Apache License 2.0 | 5 votes |
def to_absolute_coordinates(keypoints, height, width, check_range=True, scope=None): """Converts normalized keypoint coordinates to absolute pixel coordinates. This function raises an assertion failed error when the maximum keypoint coordinate value is larger than 1.01 (in which case coordinates are already absolute). Args: keypoints: A tensor of shape [num_instances, num_keypoints, 2] height: Maximum value for y coordinate of absolute keypoint coordinates. width: Maximum value for x coordinate of absolute keypoint coordinates. check_range: If True, checks if the coordinates are normalized or not. scope: name scope. Returns: tensor of shape [num_instances, num_keypoints, 2] with absolute coordinates in terms of the image size. """ with tf.name_scope(scope, 'ToAbsoluteCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) # Ensure range of input keypoints is correct. if check_range: max_val = tf.reduce_max(keypoints) max_assert = tf.Assert(tf.greater_equal(1.01, max_val), ['maximum keypoint coordinate value is larger ' 'than 1.01: ', max_val]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(keypoints, height, width)