Python tensorflow.assert_less_equal() Examples

The following are 30 code examples of tensorflow.assert_less_equal(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: memory.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def replace(self, episodes, length, rows=None):
    """Replace full episodes.

    Args:
      episodes: Tuple of transition quantities with batch and time dimensions.
      length: Batch of sequence lengths.
      rows: Episodes to replace, defaults to all.

    Returns:
      Operation.
    """
    rows = tf.range(self._capacity) if rows is None else rows
    assert rows.shape.ndims == 1
    assert_capacity = tf.assert_less(
        rows, self._capacity, message='capacity exceeded')
    with tf.control_dependencies([assert_capacity]):
      assert_max_length = tf.assert_less_equal(
          length, self._max_length, message='max length exceeded')
    replace_ops = []
    with tf.control_dependencies([assert_max_length]):
      for buffer_, elements in zip(self._buffers, episodes):
        replace_op = tf.scatter_update(buffer_, rows, elements)
        replace_ops.append(replace_op)
    with tf.control_dependencies(replace_ops):
      return tf.scatter_update(self._length, rows, length) 
Example #2
Source File: GAN.py    From VideoSuperResolution with MIT License 6 votes vote down vote up
def _preprocess_for_inception(images):
  """Preprocess images for inception.

  Args:
    images: images minibatch. Shape [batch size, width, height,
      channels]. Values are in [0..255].

  Returns:
    preprocessed_images
  """

  images = tf.cast(images, tf.float32)

  # tfgan_eval.preprocess_image function takes values in [0, 255]
  with tf.control_dependencies([tf.assert_greater_equal(images, 0.0),
                                tf.assert_less_equal(images, 255.0)]):
    images = tf.identity(images)

  preprocessed_images = tf.map_fn(
    fn=_TFGAN.preprocess_image,
    elems=images,
    back_prop=False)

  return preprocessed_images 
Example #3
Source File: memory.py    From batch-ppo with Apache License 2.0 6 votes vote down vote up
def replace(self, episodes, length, rows=None):
    """Replace full episodes.

    Args:
      episodes: Tuple of transition quantities with batch and time dimensions.
      length: Batch of sequence lengths.
      rows: Episodes to replace, defaults to all.

    Returns:
      Operation.
    """
    rows = tf.range(self._capacity) if rows is None else rows
    assert rows.shape.ndims == 1
    assert_capacity = tf.assert_less(
        rows, self._capacity, message='capacity exceeded')
    with tf.control_dependencies([assert_capacity]):
      assert_max_length = tf.assert_less_equal(
          length, self._max_length, message='max length exceeded')
    with tf.control_dependencies([assert_max_length]):
      replace_ops = tools.nested.map(
          lambda var, val: tf.scatter_update(var, rows, val),
          self._buffers, episodes, flatten=True)
    with tf.control_dependencies(replace_ops):
      return tf.scatter_update(self._length, rows, length) 
Example #4
Source File: gmm_ops.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _init_clusters_random(data, num_clusters, random_seed):
  """Does random initialization of clusters.

  Args:
    data: a list of Tensors with a matrix of data, each row is an example.
    num_clusters: an integer with the number of clusters.
    random_seed: Seed for PRNG used to initialize seeds.

  Returns:
    A Tensor with num_clusters random rows of data.
  """
  assert isinstance(data, list)
  num_data = tf.add_n([tf.shape(inp)[0] for inp in data])
  with tf.control_dependencies([tf.assert_less_equal(num_clusters, num_data)]):
    indices = tf.random_uniform([num_clusters],
                                minval=0,
                                maxval=tf.cast(num_data, tf.int64),
                                seed=random_seed,
                                dtype=tf.int64)
  indices = tf.cast(indices, tf.int32) % num_data
  clusters_init = embedding_lookup(data, indices, partition_strategy='div')
  return clusters_init 
Example #5
Source File: clustering_ops.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _init_clusters_random(self):
    """Does random initialization of clusters.

    Returns:
      Tensor of randomly initialized clusters.
    """
    num_data = tf.add_n([tf.shape(inp)[0] for inp in self._inputs])
    # Note that for mini-batch k-means, we should ensure that the batch size of
    # data used during initialization is sufficiently large to avoid duplicated
    # clusters.
    with tf.control_dependencies(
        [tf.assert_less_equal(self._num_clusters, num_data)]):
      indices = tf.random_uniform(tf.reshape(self._num_clusters, [-1]),
                                  minval=0,
                                  maxval=tf.cast(num_data, tf.int64),
                                  seed=self._random_seed,
                                  dtype=tf.int64)
      clusters_init = embedding_lookup(self._inputs, indices,
                                       partition_strategy='div')
      return clusters_init 
Example #6
Source File: data_reader.py    From kfac with Apache License 2.0 6 votes vote down vote up
def __call__(self, batch_size):
    """Reads `batch_size` data.

    Args:
      batch_size: Tensor of type `int32`, batch size of the data to be
        retrieved from the dataset. `batch_size` should be less than or
        equal to `max_batch_size`.

    Returns:
       Read data, An iterable of tensors with batch size equal to `batch_size`.
    """
    check_size = tf.assert_less_equal(
        batch_size,
        tf.convert_to_tensor(self._max_batch_size, dtype=tf.int32),
        message='Data set read failure, Batch size greater than max allowed.'
    )
    with tf.control_dependencies([check_size]):
      return _slice_data(self._dataset, batch_size) 
Example #7
Source File: data_reader_alt.py    From kfac with Apache License 2.0 6 votes vote down vote up
def __call__(self, batch_size):
    """Reads `batch_size` data.

    Args:
      batch_size: Tensor of type `int32`. Batch size of the data to be
        retrieved from the dataset. `batch_size` should be less than or
        equal to the number of examples in the dataset.

    Returns:
       Read data, a list of Tensors with batch size equal to `batch_size`.
    """
    check_size = tf.assert_less_equal(
        batch_size,
        tf.convert_to_tensor(self._num_examples, dtype=tf.int32),
        message='Data set read failure, batch_size > num_examples.'
    )
    with tf.control_dependencies([check_size]):
      self._indices = tf.random.shuffle(
          tf.range(self._num_examples, dtype=tf.int32))
      return _extract_data(self._dataset, self._indices[:batch_size]) 
Example #8
Source File: memory.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def replace(self, episodes, length, rows=None):
    """Replace full episodes.

    Args:
      episodes: Tuple of transition quantities with batch and time dimensions.
      length: Batch of sequence lengths.
      rows: Episodes to replace, defaults to all.

    Returns:
      Operation.
    """
    rows = tf.range(self._capacity) if rows is None else rows
    assert rows.shape.ndims == 1
    assert_capacity = tf.assert_less(
        rows, self._capacity, message='capacity exceeded')
    with tf.control_dependencies([assert_capacity]):
      assert_max_length = tf.assert_less_equal(
          length, self._max_length, message='max length exceeded')
    replace_ops = []
    with tf.control_dependencies([assert_max_length]):
      for buffer_, elements in zip(self._buffers, episodes):
        replace_op = tf.scatter_update(buffer_, rows, elements)
        replace_ops.append(replace_op)
    with tf.control_dependencies(replace_ops):
      return tf.scatter_update(self._length, rows, length) 
Example #9
Source File: attacks_tf.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _project_perturbation(perturbation, epsilon, input_image):
    """Project `perturbation` onto L-infinity ball of radius `epsilon`."""
    # Ensure inputs are in the correct range
    with tf.control_dependencies([
        tf.assert_less_equal(input_image, 1.0),
        tf.assert_greater_equal(input_image, 0.0)
    ]):
        clipped_perturbation = tf.clip_by_value(
            perturbation, -epsilon, epsilon)
        new_image = tf.clip_by_value(
            input_image + clipped_perturbation, 0., 1.)
        return new_image - input_image 
Example #10
Source File: preprocessing.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def scale_to_inception_range(image):
  """Scales an image in the range [0,1] to [-1,1] as expected by inception."""
  # Assert that incoming images have been properly scaled to [0,1].
  with tf.control_dependencies(
      [tf.assert_less_equal(tf.reduce_max(image), 1.),
       tf.assert_greater_equal(tf.reduce_min(image), 0.)]):
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)
    return image 
Example #11
Source File: utils_tf.py    From cleverhans with MIT License 5 votes vote down vote up
def assert_less_equal(*args, **kwargs):
  """
  Wrapper for tf.assert_less_equal
  Overrides tf.device so that the assert always goes on CPU.
  The unwrapped version raises an exception if used with tf.device("/GPU:x").
  """
  with tf.device("/CPU:0"):
    return tf.assert_less_equal(*args, **kwargs) 
Example #12
Source File: utils.py    From models with Apache License 2.0 5 votes vote down vote up
def new_mean_squared(grad_vec, decay, ms):
  """Calculates the new accumulated mean squared of the gradient.

  Args:
    grad_vec: the vector for the current gradient
    decay: the decay term
    ms: the previous mean_squared value

  Returns:
    the new mean_squared value
  """
  decay_size = decay.get_shape().num_elements()
  decay_check_ops = [
      tf.assert_less_equal(decay, 1., summarize=decay_size),
      tf.assert_greater_equal(decay, 0., summarize=decay_size)]

  with tf.control_dependencies(decay_check_ops):
    grad_squared = tf.square(grad_vec)

  # If the previous mean_squared is the 0 vector, don't use the decay and just
  # return the full grad_squared. This should only happen on the first timestep.
  decay = tf.cond(tf.reduce_all(tf.equal(ms, 0.)),
                  lambda: tf.zeros_like(decay, dtype=tf.float32), lambda: decay)

  # Update the running average of squared gradients.
  epsilon = 1e-12
  return (1. - decay) * (grad_squared + epsilon) + decay * ms 
Example #13
Source File: preprocessing.py    From models with Apache License 2.0 5 votes vote down vote up
def scale_to_inception_range(image):
  """Scales an image in the range [0,1] to [-1,1] as expected by inception."""
  # Assert that incoming images have been properly scaled to [0,1].
  with tf.control_dependencies(
      [tf.assert_less_equal(tf.reduce_max(image), 1.),
       tf.assert_greater_equal(tf.reduce_min(image), 0.)]):
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)
    return image 
Example #14
Source File: utils.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def new_mean_squared(grad_vec, decay, ms):
  """Calculates the new accumulated mean squared of the gradient.

  Args:
    grad_vec: the vector for the current gradient
    decay: the decay term
    ms: the previous mean_squared value

  Returns:
    the new mean_squared value
  """
  decay_size = decay.get_shape().num_elements()
  decay_check_ops = [
      tf.assert_less_equal(decay, 1., summarize=decay_size),
      tf.assert_greater_equal(decay, 0., summarize=decay_size)]

  with tf.control_dependencies(decay_check_ops):
    grad_squared = tf.square(grad_vec)

  # If the previous mean_squared is the 0 vector, don't use the decay and just
  # return the full grad_squared. This should only happen on the first timestep.
  decay = tf.cond(tf.reduce_all(tf.equal(ms, 0.)),
                  lambda: tf.zeros_like(decay, dtype=tf.float32), lambda: decay)

  # Update the running average of squared gradients.
  epsilon = 1e-12
  return (1. - decay) * (grad_squared + epsilon) + decay * ms 
Example #15
Source File: preprocessing.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def scale_to_inception_range(image):
  """Scales an image in the range [0,1] to [-1,1] as expected by inception."""
  # Assert that incoming images have been properly scaled to [0,1].
  with tf.control_dependencies(
      [tf.assert_less_equal(tf.reduce_max(image), 1.),
       tf.assert_greater_equal(tf.reduce_min(image), 0.)]):
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)
    return image 
Example #16
Source File: inception_network.py    From precision-recall-distributions with Apache License 2.0 5 votes vote down vote up
def preprocess_for_inception(images):
  """Preprocess images for inception.

  Args:
    images: images minibatch. Shape [batch size, width, height,
      channels]. Values are in [0..255].

  Returns:
    preprocessed_images
  """

  # Images should have 3 channels.
  assert images.shape[3].value == 3

  # tf.contrib.gan.eval.preprocess_image function takes values in [0, 255]
  with tf.control_dependencies([tf.assert_greater_equal(images, 0.0),
                                tf.assert_less_equal(images, 255.0)]):
    images = tf.identity(images)

  preprocessed_images = tf.map_fn(
      fn=tf.contrib.gan.eval.preprocess_image,
      elems=images,
      back_prop=False
  )

  return preprocessed_images 
Example #17
Source File: eval_utils.py    From compare_gan with Apache License 2.0 5 votes vote down vote up
def inception_transform(inputs):
  with tf.control_dependencies([
      tf.assert_greater_equal(inputs, 0.0),
      tf.assert_less_equal(inputs, 255.0)]):
    inputs = tf.identity(inputs)
  preprocessed_inputs = tf.map_fn(
      fn=tfgan.eval.preprocess_image, elems=inputs, back_prop=False)
  return tfgan.eval.run_inception(
      preprocessed_inputs,
      graph_def=get_inception_graph_def(),
      output_tensor=["pool_3:0", "logits:0"]) 
Example #18
Source File: utils.py    From object_detection_with_tensorflow with MIT License 5 votes vote down vote up
def new_mean_squared(grad_vec, decay, ms):
  """Calculates the new accumulated mean squared of the gradient.

  Args:
    grad_vec: the vector for the current gradient
    decay: the decay term
    ms: the previous mean_squared value

  Returns:
    the new mean_squared value
  """
  decay_size = decay.get_shape().num_elements()
  decay_check_ops = [
      tf.assert_less_equal(decay, 1., summarize=decay_size),
      tf.assert_greater_equal(decay, 0., summarize=decay_size)]

  with tf.control_dependencies(decay_check_ops):
    grad_squared = tf.square(grad_vec)

  # If the previous mean_squared is the 0 vector, don't use the decay and just
  # return the full grad_squared. This should only happen on the first timestep.
  decay = tf.cond(tf.reduce_all(tf.equal(ms, 0.)),
                  lambda: tf.zeros_like(decay, dtype=tf.float32), lambda: decay)

  # Update the running average of squared gradients.
  epsilon = 1e-12
  return (1. - decay) * (grad_squared + epsilon) + decay * ms 
Example #19
Source File: preprocessing.py    From object_detection_with_tensorflow with MIT License 5 votes vote down vote up
def scale_to_inception_range(image):
  """Scales an image in the range [0,1] to [-1,1] as expected by inception."""
  # Assert that incoming images have been properly scaled to [0,1].
  with tf.control_dependencies(
      [tf.assert_less_equal(tf.reduce_max(image), 1.),
       tf.assert_greater_equal(tf.reduce_min(image), 0.)]):
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)
    return image 
Example #20
Source File: utils.py    From object_detection_kitti with Apache License 2.0 5 votes vote down vote up
def new_mean_squared(grad_vec, decay, ms):
  """Calculates the new accumulated mean squared of the gradient.

  Args:
    grad_vec: the vector for the current gradient
    decay: the decay term
    ms: the previous mean_squared value

  Returns:
    the new mean_squared value
  """
  decay_size = decay.get_shape().num_elements()
  decay_check_ops = [
      tf.assert_less_equal(decay, 1., summarize=decay_size),
      tf.assert_greater_equal(decay, 0., summarize=decay_size)]

  with tf.control_dependencies(decay_check_ops):
    grad_squared = tf.square(grad_vec)

  # If the previous mean_squared is the 0 vector, don't use the decay and just
  # return the full grad_squared. This should only happen on the first timestep.
  decay = tf.cond(tf.reduce_all(tf.equal(ms, 0.)),
                  lambda: tf.zeros_like(decay, dtype=tf.float32), lambda: decay)

  # Update the running average of squared gradients.
  epsilon = 1e-12
  return (1. - decay) * (grad_squared + epsilon) + decay * ms 
Example #21
Source File: utils.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def new_mean_squared(grad_vec, decay, ms):
  """Calculates the new accumulated mean squared of the gradient.

  Args:
    grad_vec: the vector for the current gradient
    decay: the decay term
    ms: the previous mean_squared value

  Returns:
    the new mean_squared value
  """
  decay_size = decay.get_shape().num_elements()
  decay_check_ops = [
      tf.assert_less_equal(decay, 1., summarize=decay_size),
      tf.assert_greater_equal(decay, 0., summarize=decay_size)]

  with tf.control_dependencies(decay_check_ops):
    grad_squared = tf.square(grad_vec)

  # If the previous mean_squared is the 0 vector, don't use the decay and just
  # return the full grad_squared. This should only happen on the first timestep.
  decay = tf.cond(tf.reduce_all(tf.equal(ms, 0.)),
                  lambda: tf.zeros_like(decay, dtype=tf.float32), lambda: decay)

  # Update the running average of squared gradients.
  epsilon = 1e-12
  return (1. - decay) * (grad_squared + epsilon) + decay * ms 
Example #22
Source File: dataset_utils.py    From TwinGAN with Apache License 2.0 5 votes vote down vote up
def __init__(self, preprocess_fn = None):
    # Create a single Session to run all image coding calls.
    # All images must either be in the same format, or have their encoding method recorded.

    # Initializes function that converts PNG to JPEG data.
    self._png_data = tf.placeholder(dtype=tf.string)
    self._decode_png = tf.image.decode_png(self._png_data)
    self._png_to_jpeg = tf.image.encode_jpeg(self._decode_png, format='rgb', quality=100)

    # Initializes function that decodes RGB JPEG data.
    self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
    self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)

    #
    self._decode_image_data = tf.placeholder(dtype=tf.string)
    image = tf.image.decode_image(self._decode_image_data, channels=3)
    self._image_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)

    self._array_image = tf.placeholder(shape=[None, None, None], dtype=tf.uint8)
    self._encode_array_to_jpeg = tf.image.encode_jpeg(self._array_image, format='rgb', quality=100)


    if preprocess_fn:
      image.set_shape([None, None, 3])
      self._decode_preprocessed_image = preprocess_fn(image)
      assert self._decode_preprocessed_image.dtype == tf.uint8
      le_255 = tf.assert_less_equal(self._decode_preprocessed_image, tf.constant(255, tf.uint8))
      ge_0 = tf.assert_non_negative(self._decode_preprocessed_image)
      with tf.control_dependencies([le_255, ge_0]):
        format = 'grayscale' if self._decode_preprocessed_image.shape[-1] == 1 else 'rgb'
        self._image_to_preprocessed_jpeg = tf.image.encode_jpeg(self._decode_preprocessed_image, format=format, quality=100)
        self._image_preprocessed_shape = tf.shape(self._decode_preprocessed_image)
    else:
      self._image_to_preprocessed_jpeg = None
      self._image_preprocessed_shape = None 
Example #23
Source File: preprocessing.py    From yolo_v2 with Apache License 2.0 5 votes vote down vote up
def scale_to_inception_range(image):
  """Scales an image in the range [0,1] to [-1,1] as expected by inception."""
  # Assert that incoming images have been properly scaled to [0,1].
  with tf.control_dependencies(
      [tf.assert_less_equal(tf.reduce_max(image), 1.),
       tf.assert_greater_equal(tf.reduce_min(image), 0.)]):
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)
    return image 
Example #24
Source File: utils.py    From yolo_v2 with Apache License 2.0 5 votes vote down vote up
def new_mean_squared(grad_vec, decay, ms):
  """Calculates the new accumulated mean squared of the gradient.

  Args:
    grad_vec: the vector for the current gradient
    decay: the decay term
    ms: the previous mean_squared value

  Returns:
    the new mean_squared value
  """
  decay_size = decay.get_shape().num_elements()
  decay_check_ops = [
      tf.assert_less_equal(decay, 1., summarize=decay_size),
      tf.assert_greater_equal(decay, 0., summarize=decay_size)]

  with tf.control_dependencies(decay_check_ops):
    grad_squared = tf.square(grad_vec)

  # If the previous mean_squared is the 0 vector, don't use the decay and just
  # return the full grad_squared. This should only happen on the first timestep.
  decay = tf.cond(tf.reduce_all(tf.equal(ms, 0.)),
                  lambda: tf.zeros_like(decay, dtype=tf.float32), lambda: decay)

  # Update the running average of squared gradients.
  epsilon = 1e-12
  return (1. - decay) * (grad_squared + epsilon) + decay * ms 
Example #25
Source File: preprocessing.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def scale_to_inception_range(image):
  """Scales an image in the range [0,1] to [-1,1] as expected by inception."""
  # Assert that incoming images have been properly scaled to [0,1].
  with tf.control_dependencies(
      [tf.assert_less_equal(tf.reduce_max(image), 1.),
       tf.assert_greater_equal(tf.reduce_min(image), 0.)]):
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)
    return image 
Example #26
Source File: utils.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def new_mean_squared(grad_vec, decay, ms):
  """Calculates the new accumulated mean squared of the gradient.

  Args:
    grad_vec: the vector for the current gradient
    decay: the decay term
    ms: the previous mean_squared value

  Returns:
    the new mean_squared value
  """
  decay_size = decay.get_shape().num_elements()
  decay_check_ops = [
      tf.assert_less_equal(decay, 1., summarize=decay_size),
      tf.assert_greater_equal(decay, 0., summarize=decay_size)]

  with tf.control_dependencies(decay_check_ops):
    grad_squared = tf.square(grad_vec)

  # If the previous mean_squared is the 0 vector, don't use the decay and just
  # return the full grad_squared. This should only happen on the first timestep.
  decay = tf.cond(tf.reduce_all(tf.equal(ms, 0.)),
                  lambda: tf.zeros_like(decay, dtype=tf.float32), lambda: decay)

  # Update the running average of squared gradients.
  epsilon = 1e-12
  return (1. - decay) * (grad_squared + epsilon) + decay * ms 
Example #27
Source File: check_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def test_doesnt_raise_when_equal(self):
    with self.test_session():
      small = tf.constant([1, 2], name="small")
      with tf.control_dependencies([tf.assert_less_equal(small, small)]):
        out = tf.identity(small)
      out.eval() 
Example #28
Source File: check_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def test_raises_when_greater(self):
    with self.test_session():
      small = tf.constant([1, 2], name="small")
      big = tf.constant([3, 4], name="big")
      with tf.control_dependencies(
          [tf.assert_less_equal(big, small, message="fail")]):
        out = tf.identity(small)
      with self.assertRaisesOpError("fail.*big.*small"):
        out.eval() 
Example #29
Source File: check_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def test_doesnt_raise_when_less_equal(self):
    with self.test_session():
      small = tf.constant([1, 2], name="small")
      big = tf.constant([3, 2], name="big")
      with tf.control_dependencies([tf.assert_less_equal(small, big)]):
        out = tf.identity(small)
      out.eval() 
Example #30
Source File: check_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def test_doesnt_raise_when_less_equal_and_broadcastable_shapes(self):
    with self.test_session():
      small = tf.constant([1], name="small")
      big = tf.constant([3, 1], name="big")
      with tf.control_dependencies([tf.assert_less_equal(small, big)]):
        out = tf.identity(small)
      out.eval()