Python tensorflow.python.ops.math_ops.less_equal() Examples

The following are 29 code examples of tensorflow.python.ops.math_ops.less_equal(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.math_ops , or try the search function .
Example #1
Source File: target_column.py    From lambda-packs with MIT License 6 votes vote down vote up
def __init__(self, label_name, weight_column_name):

    def loss_fn(logits, target):
      check_shape_op = control_flow_ops.Assert(
          math_ops.less_equal(array_ops.rank(target), 2),
          ["target's shape should be either [batch_size, 1] or [batch_size]"])
      with ops.control_dependencies([check_shape_op]):
        target = array_ops.reshape(
            target, shape=[array_ops.shape(target)[0], 1])
      return loss_ops.hinge_loss(logits, target)

    super(_BinarySvmTargetColumn, self).__init__(
        loss_fn=loss_fn,
        n_classes=2,
        label_name=label_name,
        weight_column_name=weight_column_name) 
Example #2
Source File: target_column.py    From keras-lambda with MIT License 6 votes vote down vote up
def __init__(self, label_name, weight_column_name):

    def loss_fn(logits, target):
      check_shape_op = control_flow_ops.Assert(
          math_ops.less_equal(array_ops.rank(target), 2),
          ["target's shape should be either [batch_size, 1] or [batch_size]"])
      with ops.control_dependencies([check_shape_op]):
        target = array_ops.reshape(
            target, shape=[array_ops.shape(target)[0], 1])
      return loss_ops.hinge_loss(logits, target)

    super(_BinarySvmTargetColumn, self).__init__(
        loss_fn=loss_fn,
        n_classes=2,
        label_name=label_name,
        weight_column_name=weight_column_name) 
Example #3
Source File: target_column.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def __init__(self, label_name, weight_column_name):

    def loss_fn(logits, target):
      check_shape_op = control_flow_ops.Assert(
          math_ops.less_equal(array_ops.rank(target), 2),
          ["target's shape should be either [batch_size, 1] or [batch_size]"])
      with ops.control_dependencies([check_shape_op]):
        target = array_ops.reshape(
            target, shape=[array_ops.shape(target)[0], 1])
      return loss_ops.hinge_loss(logits, target)

    super(_BinarySvmTargetColumn, self).__init__(
        loss_fn=loss_fn,
        n_classes=2,
        label_name=label_name,
        weight_column_name=weight_column_name) 
Example #4
Source File: head.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def __init__(self, label_name, weight_column_name, enable_centered_bias,
               head_name, thresholds):
    def loss_fn(logits, labels):
      check_shape_op = control_flow_ops.Assert(
          math_ops.less_equal(array_ops.rank(labels), 2),
          ["labels shape should be either [batch_size, 1] or [batch_size]"])
      with ops.control_dependencies([check_shape_op]):
        labels = array_ops.reshape(
            labels, shape=[array_ops.shape(labels)[0], 1])
      return losses.hinge_loss(logits, labels)

    super(_BinarySvmHead, self).__init__(
        train_loss_fn=loss_fn,
        eval_loss_fn=loss_fn,
        n_classes=2,
        label_name=label_name,
        weight_column_name=weight_column_name,
        enable_centered_bias=enable_centered_bias,
        head_name=head_name,
        thresholds=thresholds) 
Example #5
Source File: dnn.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _reshape_labels(labels):
  """"Reshapes labels into [batch_size, 1] to be compatible with logits."""
  check_shape_op = control_flow_ops.Assert(
      math_ops.less_equal(array_ops.rank(labels), 2),
      ["labels shape should be either [batch_size, 1] or [batch_size]"])
  with ops.control_dependencies([check_shape_op]):
    labels = array_ops.reshape(labels,
                               shape=[array_ops.shape(labels)[0], 1])

  return labels 
Example #6
Source File: core_test.py    From keras-lambda with MIT License 5 votes vote down vote up
def setUp(self):
    super(CoreBinaryOpsTest, self).setUp()

    self.x_probs_broadcast_tensor = array_ops.reshape(
        self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size])

    self.channel_probs_broadcast_tensor = array_ops.reshape(
        self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size])

    # == and != are not element-wise for tf.Tensor, so they shouldn't be
    # elementwise for LabeledTensor, either.
    self.ops = [
        ('add', operator.add, math_ops.add, core.add),
        ('sub', operator.sub, math_ops.subtract, core.sub),
        ('mul', operator.mul, math_ops.multiply, core.mul),
        ('div', operator.truediv, math_ops.div, core.div),
        ('mod', operator.mod, math_ops.mod, core.mod),
        ('pow', operator.pow, math_ops.pow, core.pow_function),
        ('equal', None, math_ops.equal, core.equal),
        ('less', operator.lt, math_ops.less, core.less),
        ('less_equal', operator.le, math_ops.less_equal, core.less_equal),
        ('not_equal', None, math_ops.not_equal, core.not_equal),
        ('greater', operator.gt, math_ops.greater, core.greater),
        ('greater_equal', operator.ge, math_ops.greater_equal,
         core.greater_equal),
    ]
    self.test_lt_1 = self.x_probs_lt
    self.test_lt_2 = self.channel_probs_lt
    self.test_lt_1_broadcast = self.x_probs_broadcast_tensor
    self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor
    self.broadcast_axes = [self.a0, self.a1, self.a3] 
Example #7
Source File: core.py    From keras-lambda with MIT License 5 votes vote down vote up
def __le__(self, other):
    return less_equal(self, other) 
Example #8
Source File: head.py    From keras-lambda with MIT License 5 votes vote down vote up
def _assert_labels_rank(labels):
  return control_flow_ops.Assert(
      math_ops.less_equal(array_ops.rank(labels), 2),
      ("labels shape should be either [batch_size, 1] or [batch_size]",)) 
Example #9
Source File: math_grad.py    From keras-lambda with MIT License 5 votes vote down vote up
def _MinimumGrad(op, grad):
  """Returns grad*(x < y, x >= y) with type of grad."""
  return _MaximumMinimumGrad(op, grad, math_ops.less_equal) 
Example #10
Source File: math_grad.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _MinimumGrad(op, grad):
  """Returns grad*(x < y, x >= y) with type of grad."""
  return _MaximumMinimumGrad(op, grad, math_ops.less_equal) 
Example #11
Source File: backend.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def less_equal(x, y):
  """Element-wise truth value of (x <= y).

  Arguments:
      x: Tensor or variable.
      y: Tensor or variable.

  Returns:
      A bool tensor.
  """
  return math_ops.less_equal(x, y) 
Example #12
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def test_forward_rel_ops():
    t1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
    t2 = np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]])
    _test_forward_rel_op([t1, t2], math_ops.less)
    _test_forward_rel_op([t1, t2], math_ops.greater)
    _test_forward_rel_op([t1, t2], math_ops.less_equal)
    _test_forward_rel_op([t1, t2], math_ops.greater_equal)
    _test_forward_rel_op([t1, t2], math_ops.equal)
    _test_forward_rel_op([t1, t2], math_ops.not_equal)

#######################################################################
# ExpandDims
# ---------- 
Example #13
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def _test_less_equal(data):
    """ One iteration of less_equal """
    return _test_elemwise(math_ops.less_equal, data)
#######################################################################
# Equal
# ----- 
Example #14
Source File: deepmetric.py    From Deep-Hash-Table-ICML18 with MIT License 5 votes vote down vote up
def set_up_train(self):
        self.logger.info("Model setting up train starts")

        decay_func = DECAY_DICT[self.args.dtype]
        if hasattr(self, 'start_epoch'):
            self.logger.info("Current start epoch : {}".format(self.start_epoch))
            DECAY_PARAMS_DICT[self.args.hdtype][self.args.nbatch][self.args.hdptype]['initial_step'] = self.nbatch_train*self.start_epoch
        self.lr, update_step_op = decay_func(**DECAY_PARAMS_DICT[self.args.dtype][self.args.nbatch][self.args.dptype])

        print(vars_info_vl(tf.trainable_variables()))
        update_ops = tf.get_collection("update_ops")
        with tf.control_dependencies(update_ops+[update_step_op]):
            self.train_op = get_multi_train_op(tf.train.AdamOptimizer, self.loss, [self.lr], [tf.trainable_variables()])

        self.graph_ops_dict = {'train' : [self.train_op, self.loss], 'val' : self.loss, 'test' : self.loss}
        
        self.val_embed_tensor1 = tf.placeholder(tf.float32, shape=[self.args.nbatch, self.args.m])
        self.val_embed_tensor2 = tf.placeholder(tf.float32, shape=[self.nval, self.args.m])

        self.p_dist = math_ops.add(
                    math_ops.reduce_sum(math_ops.square(self.val_embed_tensor1), axis=[1], keep_dims=True),
                    math_ops.reduce_sum(math_ops.square(array_ops.transpose(self.val_embed_tensor2)), axis=[0], keep_dims=True))-\
                2.0 * math_ops.matmul(self.val_embed_tensor1, array_ops.transpose(self.val_embed_tensor2)) # [batch_size, 1], [1, ndata],  [batch_size, ndata]

        self.p_dist = math_ops.maximum(self.p_dist, 0.0) # [batch_size, ndata] 
        self.p_dist = math_ops.multiply(self.p_dist, math_ops.to_float(math_ops.logical_not(math_ops.less_equal(self.p_dist, 0.0))))
        self.p_max_idx = tf.nn.top_k(-self.p_dist, k=2)[1] # [batch_size, 2] # get smallest 2

        self.logger.info("Model setting up train ends") 
Example #15
Source File: deepmetric.py    From Deep-Hash-Table-ICML18 with MIT License 5 votes vote down vote up
def set_up_train(self, pretrain=False):
        self.logger.info("Model setting up train starts")

        decay_func = DECAY_DICT[self.args.dtype]
        if hasattr(self, 'start_epoch'):
            self.logger.info("Current start epoch : {}".format(self.start_epoch))
            DECAY_PARAMS_DICT[self.args.hdtype][self.args.nbatch][self.args.hdptype]['initial_step'] = self.nbatch_train*self.start_epoch
        self.lr, update_step_op = decay_func(**DECAY_PARAMS_DICT[self.args.dtype][self.args.nbatch][self.args.dptype])

        print(vars_info_vl(tf.trainable_variables()))
        update_ops = tf.get_collection("update_ops")

        with tf.control_dependencies(update_ops+[update_step_op]):
            self.train_op = get_multi_train_op(tf.train.AdamOptimizer, self.loss, [self.lr], [tf.trainable_variables()])

        self.graph_ops_dict = {'train' : [self.train_op, self.loss], 'val' : self.loss, 'test' : self.loss}
        self.val_embed_tensor1 = tf.placeholder(tf.float32, shape=[self.args.nbatch, self.args.m])
        self.val_embed_tensor2 = tf.placeholder(tf.float32, shape=[self.nval, self.args.m])

        self.p_dist = math_ops.add(
                    math_ops.reduce_sum(math_ops.square(self.val_embed_tensor1), axis=[1], keep_dims=True),
                    math_ops.reduce_sum(math_ops.square(array_ops.transpose(self.val_embed_tensor2)), axis=[0], keep_dims=True))-\
                2.0 * math_ops.matmul(self.val_embed_tensor1, array_ops.transpose(self.val_embed_tensor2)) # [batch_size, 1], [1, ndata],  [batch_size, ndata]

        self.p_dist = math_ops.maximum(self.p_dist, 0.0) # [batch_size, ndata] 
        self.p_dist = math_ops.multiply(self.p_dist, math_ops.to_float(math_ops.logical_not(math_ops.less_equal(self.p_dist, 0.0))))
        self.p_max_idx = tf.nn.top_k(-self.p_dist, k=2)[1] # [batch_size, 2] # get smallest 2

        self.logger.info("Model setting up train ends") 
Example #16
Source File: sparse_optimizers.py    From rigl with Apache License 2.0 5 votes vote down vote up
def is_mask_update_iter(self, global_step, last_update_step):
    """Function for checking if the current step is a mask update step.

    It also creates the drop_fraction op and assigns it to the self object.

    Args:
      global_step: tf.Variable(int), current training step.
      last_update_step: tf.Variable(int), holding the last iteration the mask
        is updated. Used to determine whether current iteration is a mask
        update step.


    Returns:
      bool, whether the current iteration is a mask_update step.
    """
    gs_dtype = global_step.dtype
    self._begin_step = math_ops.cast(self._begin_step, gs_dtype)
    self._end_step = math_ops.cast(self._end_step, gs_dtype)
    self._frequency = math_ops.cast(self._frequency, gs_dtype)
    is_step_within_update_range = math_ops.logical_and(
        math_ops.greater_equal(global_step, self._begin_step),
        math_ops.logical_or(
            math_ops.less_equal(global_step, self._end_step),
            # If _end_step is negative, we never stop updating the mask.
            # In other words we update the mask with given frequency until the
            # training ends.
            math_ops.less(self._end_step, 0)))
    is_update_step = math_ops.less_equal(
        math_ops.add(last_update_step, self._frequency), global_step)
    is_mask_update_iter_op = math_ops.logical_and(
        is_step_within_update_range, is_update_step)
    self.drop_fraction = self.get_drop_fraction(global_step,
                                                is_mask_update_iter_op)
    return is_mask_update_iter_op 
Example #17
Source File: target_column.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def __init__(self, label_name, weight_column_name):
    def loss_fn(logits, target):
      check_shape_op = control_flow_ops.Assert(
          math_ops.less_equal(array_ops.rank(target), 2),
          ["target's shape should be either [batch_size, 1] or [batch_size]"])
      with ops.control_dependencies([check_shape_op]):
        target = array_ops.reshape(
            target, shape=[array_ops.shape(target)[0], 1])
      return losses.hinge_loss(logits, target)

    super(_BinarySvmTargetColumn, self).__init__(
        loss_fn=loss_fn,
        n_classes=2,
        label_name=label_name,
        weight_column_name=weight_column_name) 
Example #18
Source File: math_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _MinimumGrad(op, grad):
  """Returns grad*(x < y, x >= y) with type of grad."""
  return _MaximumMinimumGrad(op, grad, math_ops.less_equal) 
Example #19
Source File: math_grad.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _MinimumGrad(op, grad):
  """Returns grad*(x < y, x >= y) with type of grad."""
  return _MaximumMinimumGrad(op, grad, math_ops.less_equal) 
Example #20
Source File: test_forward.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def test_forward_rel_ops():
    t1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
    t2 = np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]])
    _test_forward_rel_op([t1, t2], math_ops.less)
    _test_forward_rel_op([t1, t2], math_ops.greater)
    _test_forward_rel_op([t1, t2], math_ops.less_equal)
    _test_forward_rel_op([t1, t2], math_ops.greater_equal)
    _test_forward_rel_op([t1, t2], math_ops.equal)
    _test_forward_rel_op([t1, t2], math_ops.not_equal)


#######################################################################
# Main
# ---- 
Example #21
Source File: core_test.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def setUp(self):
    super(CoreBinaryOpsTest, self).setUp()

    self.x_probs_broadcast_tensor = array_ops.reshape(
        self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size])

    self.channel_probs_broadcast_tensor = array_ops.reshape(
        self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size])

    # == and != are not element-wise for tf.Tensor, so they shouldn't be
    # elementwise for LabeledTensor, either.
    self.ops = [
        ('add', operator.add, math_ops.add, core.add),
        ('sub', operator.sub, math_ops.subtract, core.sub),
        ('mul', operator.mul, math_ops.multiply, core.mul),
        ('div', operator.truediv, math_ops.div, core.div),
        ('mod', operator.mod, math_ops.mod, core.mod),
        ('pow', operator.pow, math_ops.pow, core.pow_function),
        ('equal', None, math_ops.equal, core.equal),
        ('less', operator.lt, math_ops.less, core.less),
        ('less_equal', operator.le, math_ops.less_equal, core.less_equal),
        ('not_equal', None, math_ops.not_equal, core.not_equal),
        ('greater', operator.gt, math_ops.greater, core.greater),
        ('greater_equal', operator.ge, math_ops.greater_equal,
         core.greater_equal),
    ]
    self.test_lt_1 = self.x_probs_lt
    self.test_lt_2 = self.channel_probs_lt
    self.test_lt_1_broadcast = self.x_probs_broadcast_tensor
    self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor
    self.broadcast_axes = [self.a0, self.a1, self.a3] 
Example #22
Source File: core.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def __le__(self, other):
    return less_equal(self, other) 
Example #23
Source File: head.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _assert_labels_rank(labels):
  return control_flow_ops.Assert(
      math_ops.less_equal(array_ops.rank(labels), 2),
      ("labels shape should be either [batch_size, 1] or [batch_size]",)) 
Example #24
Source File: math_grad.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _MinimumGrad(op, grad):
  """Returns grad*(x < y, x >= y) with type of grad."""
  return _MaximumMinimumGrad(op, grad, math_ops.less_equal) 
Example #25
Source File: core.py    From lambda-packs with MIT License 5 votes vote down vote up
def __le__(self, other):
    return less_equal(self, other) 
Example #26
Source File: head.py    From lambda-packs with MIT License 5 votes vote down vote up
def _assert_labels_rank(labels):
  return control_flow_ops.Assert(
      math_ops.less_equal(array_ops.rank(labels), 2),
      ("labels shape should be either [batch_size, 1] or [batch_size]",)) 
Example #27
Source File: backend.py    From lambda-packs with MIT License 5 votes vote down vote up
def less_equal(x, y):
  """Element-wise truth value of (x <= y).

  Arguments:
      x: Tensor or variable.
      y: Tensor or variable.

  Returns:
      A bool tensor.
  """
  return math_ops.less_equal(x, y) 
Example #28
Source File: metric_loss_ops.py    From cluster-loss-tensorflow with BSD 2-Clause "Simplified" License 4 votes vote down vote up
def pairwise_distance(feature, squared=False):
  """Computes the pairwise distance matrix with numerical stability.

  output[i, j] = || feature[i, :] - feature[j, :] ||_2

  Args:
    feature: 2-D Tensor of size [number of data, feature dimension].
    squared: Boolean, whether or not to square the pairwise distances.

  Returns:
    pairwise_distances: 2-D Tensor of size [number of data, number of data].
  """
  pairwise_distances_squared = math_ops.add(
      math_ops.reduce_sum(
          math_ops.square(feature),
          axis=[1],
          keep_dims=True),
      math_ops.reduce_sum(
          math_ops.square(
              array_ops.transpose(feature)),
          axis=[0],
          keep_dims=True)) - 2.0 * math_ops.matmul(
              feature, array_ops.transpose(feature))

  # Deal with numerical inaccuracies. Set small negatives to zero.
  pairwise_distances_squared = math_ops.maximum(pairwise_distances_squared, 0.0)
  # Get the mask where the zero distances are at.
  error_mask = math_ops.less_equal(pairwise_distances_squared, 0.0)

  # Optionally take the sqrt.
  if squared:
    pairwise_distances = pairwise_distances_squared
  else:
    pairwise_distances = math_ops.sqrt(
        pairwise_distances_squared + math_ops.to_float(error_mask) * 1e-16)

  # Undo conditionally adding 1e-16.
  pairwise_distances = math_ops.multiply(
      pairwise_distances, math_ops.to_float(math_ops.logical_not(error_mask)))

  num_data = array_ops.shape(feature)[0]
  # Explicitly set diagonals to zero.
  mask_offdiagonals = array_ops.ones_like(pairwise_distances) - array_ops.diag(
      array_ops.ones([num_data]))
  pairwise_distances = math_ops.multiply(pairwise_distances, mask_offdiagonals)
  return pairwise_distances 
Example #29
Source File: metric_learning.py    From tf-slim with Apache License 2.0 4 votes vote down vote up
def pairwise_distance(feature, squared=False):
  """Computes the pairwise distance matrix with numerical stability.

  output[i, j] = || feature[i, :] - feature[j, :] ||_2

  Args:
    feature: 2-D Tensor of size [number of data, feature dimension].
    squared: Boolean, whether or not to square the pairwise distances.

  Returns:
    pairwise_distances: 2-D Tensor of size [number of data, number of data].
  """
  pairwise_distances_squared = math_ops.add(
      math_ops.reduce_sum(math_ops.square(feature), axis=[1], keepdims=True),
      math_ops.reduce_sum(
          math_ops.square(array_ops.transpose(feature)),
          axis=[0],
          keepdims=True)) - 2.0 * math_ops.matmul(feature,
                                                  array_ops.transpose(feature))

  # Deal with numerical inaccuracies. Set small negatives to zero.
  pairwise_distances_squared = math_ops.maximum(pairwise_distances_squared, 0.0)
  # Get the mask where the zero distances are at.
  error_mask = math_ops.less_equal(pairwise_distances_squared, 0.0)

  # Optionally take the sqrt.
  if squared:
    pairwise_distances = pairwise_distances_squared
  else:
    pairwise_distances = math_ops.sqrt(
        pairwise_distances_squared +
        math_ops.cast(error_mask, dtypes.float32) * 1e-16)

  # Undo conditionally adding 1e-16.
  pairwise_distances = math_ops.multiply(
      pairwise_distances,
      math_ops.cast(math_ops.logical_not(error_mask), dtypes.float32))

  num_data = array_ops.shape(feature)[0]
  # Explicitly set diagonals to zero.
  mask_offdiagonals = array_ops.ones_like(pairwise_distances) - array_ops.diag(
      array_ops.ones([num_data]))
  pairwise_distances = math_ops.multiply(pairwise_distances, mask_offdiagonals)
  return pairwise_distances