Python tensorflow.assert_non_negative() Examples
The following are 9
code examples of tensorflow.assert_non_negative().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: tensorcheck.py From in-silico-labeling with Apache License 2.0 | 6 votes |
def bounds_unlabeled(lower: float, upper: float, tensor: tf.Tensor, name: Optional[str] = None) -> tf.Tensor: """Checks the tensor elements fall in the given bounds. Args: lower: The lower bound. upper: The upper bound. tensor: The input tensor. name: Optional op name. Returns: The input tensor. """ with tf.name_scope(name, 'check_bounds', [tensor]) as scope: if FLAGS.tensorcheck_enable_checks: lower_bound_op = tf.assert_non_negative( tensor - lower, name='lower_bound') upper_bound_op = tf.assert_non_positive( tensor - upper, name='upper_bound') with tf.control_dependencies([lower_bound_op, upper_bound_op]): tensor = tf.identity(tensor, name=scope) return tensor
Example #2
Source File: check_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def test_raises_when_negative(self): with self.test_session(): zoe = tf.constant([-1, -2], name="zoe") with tf.control_dependencies([tf.assert_non_negative(zoe)]): out = tf.identity(zoe) with self.assertRaisesOpError("zoe"): out.eval()
Example #3
Source File: check_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def test_doesnt_raise_when_zero_and_positive(self): with self.test_session(): lucas = tf.constant([0, 2], name="lucas") with tf.control_dependencies([tf.assert_non_negative(lucas)]): out = tf.identity(lucas) out.eval()
Example #4
Source File: check_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def test_empty_tensor_doesnt_raise(self): # A tensor is non-negative when it satisfies: # For every element x_i in x, x_i >= 0 # and an empty tensor has no elements, so this is trivially satisfied. # This is standard set theory. with self.test_session(): empty = tf.constant([], name="empty") with tf.control_dependencies([tf.assert_non_negative(empty)]): out = tf.identity(empty) out.eval()
Example #5
Source File: embed.py From triplet-reid with MIT License | 5 votes |
def five_crops(image, crop_size): """ Returns the central and four corner crops of `crop_size` from `image`. """ image_size = tf.shape(image)[:2] crop_margin = tf.subtract(image_size, crop_size) assert_size = tf.assert_non_negative( crop_margin, message='Crop size must be smaller or equal to the image size.') with tf.control_dependencies([assert_size]): top_left = tf.floor_div(crop_margin, 2) bottom_right = tf.add(top_left, crop_size) center = image[top_left[0]:bottom_right[0], top_left[1]:bottom_right[1]] top_left = image[:-crop_margin[0], :-crop_margin[1]] top_right = image[:-crop_margin[0], crop_margin[1]:] bottom_left = image[crop_margin[0]:, :-crop_margin[1]] bottom_right = image[crop_margin[0]:, crop_margin[1]:] return center, top_left, top_right, bottom_left, bottom_right
Example #6
Source File: dataset_utils.py From TwinGAN with Apache License 2.0 | 5 votes |
def __init__(self, preprocess_fn = None): # Create a single Session to run all image coding calls. # All images must either be in the same format, or have their encoding method recorded. # Initializes function that converts PNG to JPEG data. self._png_data = tf.placeholder(dtype=tf.string) self._decode_png = tf.image.decode_png(self._png_data) self._png_to_jpeg = tf.image.encode_jpeg(self._decode_png, format='rgb', quality=100) # Initializes function that decodes RGB JPEG data. self._decode_jpeg_data = tf.placeholder(dtype=tf.string) self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) # self._decode_image_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_image(self._decode_image_data, channels=3) self._image_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100) self._array_image = tf.placeholder(shape=[None, None, None], dtype=tf.uint8) self._encode_array_to_jpeg = tf.image.encode_jpeg(self._array_image, format='rgb', quality=100) if preprocess_fn: image.set_shape([None, None, 3]) self._decode_preprocessed_image = preprocess_fn(image) assert self._decode_preprocessed_image.dtype == tf.uint8 le_255 = tf.assert_less_equal(self._decode_preprocessed_image, tf.constant(255, tf.uint8)) ge_0 = tf.assert_non_negative(self._decode_preprocessed_image) with tf.control_dependencies([le_255, ge_0]): format = 'grayscale' if self._decode_preprocessed_image.shape[-1] == 1 else 'rgb' self._image_to_preprocessed_jpeg = tf.image.encode_jpeg(self._decode_preprocessed_image, format=format, quality=100) self._image_preprocessed_shape = tf.shape(self._decode_preprocessed_image) else: self._image_to_preprocessed_jpeg = None self._image_preprocessed_shape = None
Example #7
Source File: embed.py From vehicle-triplet-reid with MIT License | 5 votes |
def five_crops(image, crop_size): """ Returns the central and four corner crops of `crop_size` from `image`. """ image_size = tf.shape(image)[:2] crop_margin = tf.subtract(image_size, crop_size) assert_size = tf.assert_non_negative( crop_margin, message='Crop size must be smaller or equal to the image size.') with tf.control_dependencies([assert_size]): top_left = tf.floor_div(crop_margin, 2) bottom_right = tf.add(top_left, crop_size) center = image[top_left[0]:bottom_right[0], top_left[1]:bottom_right[1]] top_left = image[:-crop_margin[0], :-crop_margin[1]] top_right = image[:-crop_margin[0], crop_margin[1]:] bottom_left = image[crop_margin[0]:, :-crop_margin[1]] bottom_right = image[crop_margin[0]:, crop_margin[1]:] return center, top_left, top_right, bottom_left, bottom_right
Example #8
Source File: vae_test.py From vae-seq with Apache License 2.0 | 4 votes |
def _test_assertions(inf_tensors, gen_tensors, eval_tensors): """Returns in-graph assertions for testing.""" observed, latents, divs, log_probs, elbo = inf_tensors generated, sampled_latents = gen_tensors eval_log_probs, = eval_tensors # For RNN, we return None from infer_latents as an optimization. if latents is None: latents = sampled_latents def _same_batch_and_sequence_size_asserts(t1, name1, t2, name2): return [ tf.assert_equal( util.batch_size_from_nested_tensors(t1), util.batch_size_from_nested_tensors(t2), message="Batch: " + name1 + " vs " + name2), tf.assert_equal( util.sequence_size_from_nested_tensors(t1), util.sequence_size_from_nested_tensors(t2), message="Steps: " + name1 + " vs " + name2), ] def _same_shapes(nested1, nested2): return snt.nest.flatten(snt.nest.map( lambda t1, t2: tf.assert_equal( tf.shape(t1), tf.shape(t2), message="Shapes: " + t1.name + " vs " + t2.name), nested1, nested2)) def _all_same_batch_and_sequence_sizes(nested): batch_size = util.batch_size_from_nested_tensors(nested) sequence_size = util.sequence_size_from_nested_tensors(nested) return [ tf.assert_equal(tf.shape(tensor)[0], batch_size, message="Batch: " + tensor.name) for tensor in snt.nest.flatten(nested) ] + [ tf.assert_equal(tf.shape(tensor)[1], sequence_size, message="Steps: " + tensor.name) for tensor in snt.nest.flatten(nested) ] assertions = [ tf.assert_non_negative(divs), tf.assert_non_positive(log_probs), ] + _same_shapes( (log_probs, log_probs, observed, latents), (divs, eval_log_probs, generated, sampled_latents) ) + _all_same_batch_and_sequence_sizes( (observed, latents, divs) ) + _all_same_batch_and_sequence_sizes( (generated, sampled_latents) ) vars_ = tf.trainable_variables() grads = tf.gradients(-elbo, vars_) for (var, grad) in zip(vars_, grads): assertions.append(tf.check_numerics(grad, "Gradient for " + var.name)) return assertions
Example #9
Source File: summary.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 4 votes |
def op(name, images, max_outputs=3, display_name=None, description=None, collections=None): """Create an image summary op for use in a TensorFlow graph. Arguments: name: A unique name for the generated summary node. images: A `Tensor` representing pixel data with shape `[k, w, h, c]`, where `k` is the number of images, `w` and `h` are the width and height of the images, and `c` is the number of channels, which should be 1, 3, or 4. Any of the dimensions may be statically unknown (i.e., `None`). max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this many images will be emitted at each step. When more than `max_outputs` many images are provided, the first `max_outputs` many images will be used and the rest silently discarded. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A TensorFlow summary op. """ if display_name is None: display_name = name summary_metadata = metadata.create_summary_metadata( display_name=display_name, description=description) with tf.name_scope(name), \ tf.control_dependencies([tf.assert_rank(images, 4), tf.assert_type(images, tf.uint8), tf.assert_non_negative(max_outputs)]): limited_images = images[:max_outputs] encoded_images = tf.map_fn(tf.image.encode_png, limited_images, dtype=tf.string, name='encode_each_image') image_shape = tf.shape(images) dimensions = tf.stack([tf.as_string(image_shape[1], name='width'), tf.as_string(image_shape[2], name='height')], name='dimensions') tensor = tf.concat([dimensions, encoded_images], axis=0) return tf.summary.tensor_summary(name='image_summary', tensor=tensor, collections=collections, summary_metadata=summary_metadata)