Python tensorflow.logical_and() Examples
The following are 30
code examples of tensorflow.logical_and().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: hmc.py From zhusuan with MIT License | 7 votes |
def _leapfrog(self, q, p, step_size, get_gradient, mass): def loop_cond(i, q, p): return i < self.n_leapfrogs + 1 def loop_body(i, q, p): step_size1 = tf.cond(i > 0, lambda: step_size, lambda: tf.constant(0.0, dtype=tf.float32)) step_size2 = tf.cond(tf.logical_and(tf.less(i, self.n_leapfrogs), tf.less(0, i)), lambda: step_size, lambda: step_size / 2) q, p = leapfrog_integrator(q, p, step_size1, step_size2, lambda q: get_gradient(q), mass) return [i + 1, q, p] i = tf.constant(0) _, q, p = tf.while_loop(loop_cond, loop_body, [i, q, p], back_prop=False, parallel_iterations=1) return q, p
Example #2
Source File: keras_words_subtoken_metrics.py From code2vec with MIT License | 6 votes |
def _get_prediction_from_topk(self, topk_predicted_words): # apply given filter masks = [] if self.predicted_words_filters is not None: masks = [fltr(topk_predicted_words) for fltr in self.predicted_words_filters] if masks: # assert all(mask.shape.assert_is_compatible_with(top_k_pred_indices) for mask in masks) legal_predicted_target_words_mask = reduce(tf.logical_and, masks) else: legal_predicted_target_words_mask = tf.cast(tf.ones_like(topk_predicted_words), dtype=tf.bool) # the first legal predicted word is our prediction first_legal_predicted_target_word_mask = common.tf_get_first_true(legal_predicted_target_words_mask) first_legal_predicted_target_word_idx = tf.where(first_legal_predicted_target_word_mask) first_legal_predicted_word_string = tf.gather_nd(topk_predicted_words, first_legal_predicted_target_word_idx) prediction = tf.reshape(first_legal_predicted_word_string, [-1]) return prediction
Example #3
Source File: box_list_ops.py From BMW-TensorFlow-Inference-API-CPU with Apache License 2.0 | 6 votes |
def prune_small_boxes(boxlist, min_side, scope=None): """Prunes small boxes in the boxlist which have a side smaller than min_side. Args: boxlist: BoxList holding N boxes. min_side: Minimum width AND height of box to survive pruning. scope: name scope. Returns: A pruned boxlist. """ with tf.name_scope(scope, 'PruneSmallBoxes'): height, width = height_width(boxlist) is_valid = tf.logical_and(tf.greater_equal(width, min_side), tf.greater_equal(height, min_side)) return gather(boxlist, tf.reshape(tf.where(is_valid), [-1]))
Example #4
Source File: coref_model.py From gap with MIT License | 6 votes |
def truncate_example(self, tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids): max_training_sentences = self.config["max_training_sentences"] num_sentences = context_word_emb.shape[0] assert num_sentences > max_training_sentences sentence_offset = random.randint(0, num_sentences - max_training_sentences) word_offset = text_len[:sentence_offset].sum() num_words = text_len[sentence_offset:sentence_offset + max_training_sentences].sum() tokens = tokens[sentence_offset:sentence_offset + max_training_sentences, :] context_word_emb = context_word_emb[sentence_offset:sentence_offset + max_training_sentences, :, :] head_word_emb = head_word_emb[sentence_offset:sentence_offset + max_training_sentences, :, :] lm_emb = lm_emb[sentence_offset:sentence_offset + max_training_sentences, :, :, :] char_index = char_index[sentence_offset:sentence_offset + max_training_sentences, :, :] text_len = text_len[sentence_offset:sentence_offset + max_training_sentences] speaker_ids = speaker_ids[word_offset: word_offset + num_words] gold_spans = np.logical_and(gold_ends >= word_offset, gold_starts < word_offset + num_words) gold_starts = gold_starts[gold_spans] - word_offset gold_ends = gold_ends[gold_spans] - word_offset cluster_ids = cluster_ids[gold_spans] return tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids
Example #5
Source File: cvNormal2dLikelihood.py From decompose with MIT License | 6 votes |
def init(self, data: Tensor) -> None: tau = self.__tauInit dtype = self.__dtype properties = self.__properties noiseDistribution = CenNormal(tau=tf.constant([tau], dtype=dtype), properties=properties) self.__noiseDistribution = noiseDistribution observedMask = tf.logical_not(tf.is_nan(data)) trainMask = tf.logical_not(self.cv.mask(X=data)) trainMask = tf.get_variable("trainMask", dtype=trainMask.dtype, initializer=trainMask) trainMask = tf.logical_and(trainMask, observedMask) testMask = tf.logical_and(observedMask, tf.logical_not(trainMask)) self.__observedMask = observedMask self.__trainMask = trainMask self.__testMask = testMask
Example #6
Source File: shape_utils.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1): """Asserts the input box tensor is normalized. Args: boxes: a tensor of shape [N, 4] where N is the number of boxes. maximum_normalized_coordinate: Maximum coordinate value to be considered as normalized, default to 1.1. Returns: a tf.Assert op which fails when the input box tensor is not normalized. Raises: ValueError: When the input box tensor is not normalized. """ box_minimum = tf.reduce_min(boxes) box_maximum = tf.reduce_max(boxes) return tf.Assert( tf.logical_and( tf.less_equal(box_maximum, maximum_normalized_coordinate), tf.greater_equal(box_minimum, 0)), [boxes])
Example #7
Source File: box_list_ops.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def prune_small_boxes(boxlist, min_side, scope=None): """Prunes small boxes in the boxlist which have a side smaller than min_side. Args: boxlist: BoxList holding N boxes. min_side: Minimum width AND height of box to survive pruning. scope: name scope. Returns: A pruned boxlist. """ with tf.name_scope(scope, 'PruneSmallBoxes'): height, width = height_width(boxlist) is_valid = tf.logical_and(tf.greater_equal(width, min_side), tf.greater_equal(height, min_side)) return gather(boxlist, tf.reshape(tf.where(is_valid), [-1]))
Example #8
Source File: utils.py From FastMaskRCNN with Apache License 2.0 | 6 votes |
def _crop(image, offset_height, offset_width, crop_height, crop_width): original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.']) cropped_shape = control_flow_ops.with_dependencies( [rank_assertion], tf.stack([crop_height, crop_width, original_shape[2]])) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.']) offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. image = control_flow_ops.with_dependencies( [size_assertion], tf.slice(image, offsets, cropped_shape)) return tf.reshape(image, cropped_shape)
Example #9
Source File: cvNormalNdLikelihood.py From decompose with MIT License | 6 votes |
def init(self, data: Tensor) -> None: tau = self.__tauInit dtype = self.__dtype properties = self.__properties noiseDistribution = CenNormal(tau=tf.constant([tau], dtype=dtype), properties=properties) self.__noiseDistribution = noiseDistribution observedMask = tf.logical_not(tf.is_nan(data)) trainMask = tf.logical_not(self.cv.mask(X=data)) trainMask = tf.get_variable("trainMask", dtype=trainMask.dtype, initializer=trainMask) trainMask = tf.logical_and(trainMask, observedMask) testMask = tf.logical_and(observedMask, tf.logical_not(trainMask)) self.__observedMask = observedMask self.__trainMask = trainMask self.__testMask = testMask
Example #10
Source File: box_list_ops.py From object_detector_app with MIT License | 6 votes |
def prune_small_boxes(boxlist, min_side, scope=None): """Prunes small boxes in the boxlist which have a side smaller than min_side. Args: boxlist: BoxList holding N boxes. min_side: Minimum width AND height of box to survive pruning. scope: name scope. Returns: A pruned boxlist. """ with tf.name_scope(scope, 'PruneSmallBoxes'): height, width = height_width(boxlist) is_valid = tf.logical_and(tf.greater_equal(width, min_side), tf.greater_equal(height, min_side)) return gather(boxlist, tf.reshape(tf.where(is_valid), [-1]))
Example #11
Source File: box_list_ops.py From ros_people_object_detection_tensorflow with Apache License 2.0 | 6 votes |
def prune_small_boxes(boxlist, min_side, scope=None): """Prunes small boxes in the boxlist which have a side smaller than min_side. Args: boxlist: BoxList holding N boxes. min_side: Minimum width AND height of box to survive pruning. scope: name scope. Returns: A pruned boxlist. """ with tf.name_scope(scope, 'PruneSmallBoxes'): height, width = height_width(boxlist) is_valid = tf.logical_and(tf.greater_equal(width, min_side), tf.greater_equal(height, min_side)) return gather(boxlist, tf.reshape(tf.where(is_valid), [-1]))
Example #12
Source File: postU.py From decompose with MIT License | 6 votes |
def updateK(self, k, prepVars, U): f = self.__f UfShape = U[f].get_shape() lhUfk = self.__likelihood.lhUfk(U[f], prepVars, f, k) postfk = lhUfk*self.prior[k].cond() Ufk = postfk.draw() Ufk = tf.expand_dims(Ufk, 0) normUfk = tf.norm(Ufk) notNanNorm = tf.logical_not(tf.is_nan(normUfk)) finiteNorm = tf.is_finite(normUfk) positiveNorm = normUfk > 0. isValid = tf.logical_and(notNanNorm, tf.logical_and(finiteNorm, positiveNorm)) Uf = tf.cond(isValid, lambda: self.updateUf(U[f], Ufk, k), lambda: U[f]) # TODO: if valid -> self.__likelihood.lhU()[f].updateUfk(U[f][k], k) Uf.set_shape(UfShape) U[f] = Uf return(U)
Example #13
Source File: util.py From R-Net with MIT License | 6 votes |
def get_batch_dataset(record_file, parser, config): num_threads = tf.constant(config.num_threads, dtype=tf.int32) dataset = tf.data.TFRecordDataset(record_file).map( parser, num_parallel_calls=num_threads).shuffle(config.capacity).repeat() if config.is_bucket: buckets = [tf.constant(num) for num in range(*config.bucket_range)] def key_func(context_idxs, ques_idxs, context_char_idxs, ques_char_idxs, y1, y2, qa_id): c_len = tf.reduce_sum( tf.cast(tf.cast(context_idxs, tf.bool), tf.int32)) buckets_min = [np.iinfo(np.int32).min] + buckets buckets_max = buckets + [np.iinfo(np.int32).max] conditions_c = tf.logical_and( tf.less(buckets_min, c_len), tf.less_equal(c_len, buckets_max)) bucket_id = tf.reduce_min(tf.where(conditions_c)) return bucket_id def reduce_func(key, elements): return elements.batch(config.batch_size) dataset = dataset.apply(tf.contrib.data.group_by_window( key_func, reduce_func, window_size=5 * config.batch_size)).shuffle(len(buckets) * 25) else: dataset = dataset.batch(config.batch_size) return dataset
Example #14
Source File: path_context_reader.py From code2vec with MIT License | 6 votes |
def _filter_input_rows(self, *row_parts) -> tf.bool: row_parts = self.model_input_tensors_former.from_model_input_form(row_parts) #assert all(tensor.shape == (self.config.MAX_CONTEXTS,) for tensor in # {row_parts.path_source_token_indices, row_parts.path_indices, # row_parts.path_target_token_indices, row_parts.context_valid_mask}) # FIXME: Does "valid" here mean just "no padding" or "neither padding nor OOV"? I assumed just "no padding". any_word_valid_mask_per_context_part = [ tf.not_equal(tf.reduce_max(row_parts.path_source_token_indices, axis=0), self.vocabs.token_vocab.word_to_index[self.vocabs.token_vocab.special_words.PAD]), tf.not_equal(tf.reduce_max(row_parts.path_target_token_indices, axis=0), self.vocabs.token_vocab.word_to_index[self.vocabs.token_vocab.special_words.PAD]), tf.not_equal(tf.reduce_max(row_parts.path_indices, axis=0), self.vocabs.path_vocab.word_to_index[self.vocabs.path_vocab.special_words.PAD])] any_contexts_is_valid = reduce(tf.logical_or, any_word_valid_mask_per_context_part) # scalar if self.estimator_action.is_evaluate: cond = any_contexts_is_valid # scalar else: # training word_is_valid = tf.greater( row_parts.target_index, self.vocabs.target_vocab.word_to_index[self.vocabs.target_vocab.special_words.OOV]) # scalar cond = tf.logical_and(word_is_valid, any_contexts_is_valid) # scalar return cond # scalar
Example #15
Source File: gather_and_scatter_mixin.py From onnx-tensorflow with Apache License 2.0 | 6 votes |
def chk_idx_out_of_bounds_along_axis(cls, data, axis, indices): """ Check indices out of bounds for ScatterElement In Tensorflow GPU version, if an out of bound index is found, the index is ignored for ScatterND/TensorScatterNDUpdate. But ONNX spec state that it is an error if any index values are out of bounds. Therefore the converter need to run this function to verify all the indices are in bounds along the axis before send it to Tensoflow. If out of bound is detected then the caller of this function need to throw InvalidArgumentError exception. """ data_shape = tf.cast(tf_shape(data), indices.dtype) limit = data_shape[axis] cond1 = tf.greater_equal(indices, tf.negative(limit)) cond2 = tf.less(indices, limit) return tf.logical_and(cond1, cond2)
Example #16
Source File: triplet_loss_utils.py From BERT with Apache License 2.0 | 6 votes |
def _get_anchor_positive_triplet_mask(labels): """Return a 2D mask where mask[a, p] is True iff a and p are distinct and have same label. Args: labels: tf.int32 `Tensor` with shape [batch_size] Returns: mask: tf.bool `Tensor` with shape [batch_size, batch_size] """ # Check that i and j are distinct indices_equal = tf.cast(tf.eye(tf.shape(labels)[0]), tf.bool) indices_not_equal = tf.logical_not(indices_equal) # Check if labels[i] == labels[j] # Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1) labels_equal = tf.equal(tf.expand_dims(labels, 0), tf.expand_dims(labels, 1)) # Combine the two masks mask = tf.logical_and(indices_not_equal, labels_equal) return mask
Example #17
Source File: box_list_ops.py From DOTA_models with Apache License 2.0 | 6 votes |
def prune_small_boxes(boxlist, min_side, scope=None): """Prunes small boxes in the boxlist which have a side smaller than min_side. Args: boxlist: BoxList holding N boxes. min_side: Minimum width AND height of box to survive pruning. scope: name scope. Returns: A pruned boxlist. """ with tf.name_scope(scope, 'PruneSmallBoxes'): height, width = height_width(boxlist) is_valid = tf.logical_and(tf.greater_equal(width, min_side), tf.greater_equal(height, min_side)) return gather(boxlist, tf.reshape(tf.where(is_valid), [-1]))
Example #18
Source File: keras_word_prediction_layer.py From code2vec with MIT License | 6 votes |
def call(self, y_pred, **kwargs): y_pred.shape.assert_has_rank(2) top_k_pred_indices = tf.cast(tf.nn.top_k(y_pred, k=self.top_k).indices, dtype=self.index_to_word_table.key_dtype) predicted_target_words_strings = self.index_to_word_table.lookup(top_k_pred_indices) # apply given filter masks = [] if self.predicted_words_filters is not None: masks = [fltr(top_k_pred_indices, predicted_target_words_strings) for fltr in self.predicted_words_filters] if masks: # assert all(mask.shape.assert_is_compatible_with(top_k_pred_indices) for mask in masks) legal_predicted_target_words_mask = reduce(tf.logical_and, masks) else: legal_predicted_target_words_mask = tf.cast(tf.ones_like(top_k_pred_indices), dtype=tf.bool) # the first legal predicted word is our prediction first_legal_predicted_target_word_mask = common.tf_get_first_true(legal_predicted_target_words_mask) first_legal_predicted_target_word_idx = tf.where(first_legal_predicted_target_word_mask) first_legal_predicted_word_string = tf.gather_nd(predicted_target_words_strings, first_legal_predicted_target_word_idx) prediction = tf.reshape(first_legal_predicted_word_string, [-1]) return prediction
Example #19
Source File: hmc.py From zhusuan with MIT License | 6 votes |
def get_acceptance_rate(q, p, new_q, new_p, log_posterior, mass, data_axes): old_hamiltonian, old_log_prob = hamiltonian( q, p, log_posterior, mass, data_axes) new_hamiltonian, new_log_prob = hamiltonian( new_q, new_p, log_posterior, mass, data_axes) old_log_prob = tf.check_numerics( old_log_prob, 'HMC: old_log_prob has numeric errors! Try better initialization.') acceptance_rate = tf.exp( tf.minimum(-new_hamiltonian + old_hamiltonian, 0.0)) is_finite = tf.logical_and(tf.is_finite(acceptance_rate), tf.is_finite(new_log_prob)) acceptance_rate = tf.where(is_finite, acceptance_rate, tf.zeros_like(acceptance_rate)) return old_hamiltonian, new_hamiltonian, old_log_prob, new_log_prob, \ acceptance_rate
Example #20
Source File: anchor.py From keras-ctpn with Apache License 2.0 | 6 votes |
def filter_out_of_bound_boxes(boxes, feature_shape, stride): """ 过滤图像边框外的anchor :param boxes: [n,y1,x1,y2,x2] :param feature_shape: 特征图的长宽 [h,w] :param stride: 网络步长 :return: """ # 图像原始长宽为特征图长宽*步长 h, w = feature_shape[0], feature_shape[1] h = tf.cast(h * stride, tf.float32) w = tf.cast(w * stride, tf.float32) valid_boxes_tag = tf.logical_and(tf.logical_and(tf.logical_and(boxes[:, 0] >= 0, boxes[:, 1] >= 0), boxes[:, 2] <= h), boxes[:, 3] <= w) boxes = tf.boolean_mask(boxes, valid_boxes_tag) valid_boxes_indices = tf.where(valid_boxes_tag)[:, 0] return boxes, valid_boxes_indices
Example #21
Source File: tensor.py From spleeter with MIT License | 6 votes |
def check_tensor_shape(tensor_tf, target_shape): """ Return a Tensorflow boolean graph that indicates whether sample[features_key] has the specified target shape. Only check not None entries of target_shape. :param tensor_tf: Tensor to check shape for. :param target_shape: Target shape to compare tensor to. :returns: True if shape is valid, False otherwise (as TF boolean). """ result = tf.constant(True) for i, target_length in enumerate(target_shape): if target_length: result = tf.logical_and( result, tf.equal(tf.constant(target_length), tf.shape(tensor_tf)[i])) return result
Example #22
Source File: data_reader.py From BERT with Apache License 2.0 | 5 votes |
def example_valid_size(example, min_length, max_length): length = example_length(example) return tf.logical_and( length >= min_length, length <= max_length, )
Example #23
Source File: imagenet.py From BERT with Apache License 2.0 | 5 votes |
def _crop(image, offset_height, offset_width, crop_height, crop_width): """Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. Args: image: `Tensor` image of shape [height, width, channels]. offset_height: `Tensor` indicating the height offset. offset_width: `Tensor` indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """ original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ["Rank of image must be equal to 3."]) with tf.control_dependencies([rank_assertion]): cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ["Crop size greater than the image size."]) offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. with tf.control_dependencies([size_assertion]): image = tf.slice(image, offsets, cropped_shape) return tf.reshape(image, cropped_shape)
Example #24
Source File: common_layers.py From BERT with Apache License 2.0 | 5 votes |
def weights_concatenated(labels): """Assign weight 1.0 to the "target" part of the concatenated labels. The labels look like: source English I love you . ID1 target French Je t'aime . ID1 source English the cat ID1 target French le chat ID1 source English ... We want to assign weight 1.0 to all words in the target text (including the ID1 end symbol), but not to the source text or the boilerplate. In the above example, the target words that get positive weight are: Je t'aime . ID1 le chat ID1 Args: labels: a Tensor Returns: a Tensor """ eos_mask = tf.to_int32(tf.equal(labels, 1)) sentence_num = tf.cumsum(eos_mask, axis=1, exclusive=True) in_target = tf.equal(tf.mod(sentence_num, 2), 1) # first two tokens of each sentence are boilerplate. sentence_num_plus_one = sentence_num + 1 shifted = tf.pad(sentence_num_plus_one, [[0, 0], [2, 0], [0, 0], [0, 0]])[:, :-2, :, :] nonboilerplate = tf.equal(sentence_num_plus_one, shifted) ret = to_float(tf.logical_and(nonboilerplate, in_target)) return ret
Example #25
Source File: utils.py From zhusuan with MIT License | 5 votes |
def __and__(self, other): return tf.logical_and(self, other)
Example #26
Source File: input_fn.py From imitation-learning with MIT License | 5 votes |
def filter_valid_intentions(tf_example): """Return True if high-level command is in {2, 3, 4, 5}. Args: tf_example: Dict[str, tf.Tensor] Returns: tf.Tensor (type=bool) """ high_level_command = tf_example[ilc.TGT_HIGH_LVL_CMD] return tf.logical_and( tf.greater_equal(high_level_command, 2), tf.less_equal(high_level_command, 5))
Example #27
Source File: vgg_preprocessing.py From tf-pose with Apache License 2.0 | 5 votes |
def _crop(image, offset_height, offset_width, crop_height, crop_width): """Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. Args: image: an image of shape [height, width, channels]. offset_height: a scalar tensor indicating the height offset. offset_width: a scalar tensor indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """ original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.']) with tf.control_dependencies([rank_assertion]): cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.']) offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. with tf.control_dependencies([size_assertion]): image = tf.slice(image, offsets, cropped_shape) return tf.reshape(image, cropped_shape)
Example #28
Source File: vgg_preprocessing.py From lambda-deep-learning-demo with Apache License 2.0 | 5 votes |
def _crop(image, offset_height, offset_width, crop_height, crop_width): """Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. config: image: an image of shape [height, width, channels]. offset_height: a scalar tensor indicating the height offset. offset_width: a scalar tensor indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """ original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.']) with tf.control_dependencies([rank_assertion]): cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.']) offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. with tf.control_dependencies([size_assertion]): image = tf.slice(image, offsets, cropped_shape) return tf.reshape(image, cropped_shape)
Example #29
Source File: utils.py From zhusuan with MIT License | 5 votes |
def __rand__(self, other): return tf.logical_and(other, self)
Example #30
Source File: vgg_preprocessing.py From ctw-baseline with MIT License | 5 votes |
def _crop(image, offset_height, offset_width, crop_height, crop_width): """Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. Args: image: an image of shape [height, width, channels]. offset_height: a scalar tensor indicating the height offset. offset_width: a scalar tensor indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """ original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.']) with tf.control_dependencies([rank_assertion]): cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.']) offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. with tf.control_dependencies([size_assertion]): image = tf.slice(image, offsets, cropped_shape) return tf.reshape(image, cropped_shape)