Python tensorflow.is_finite() Examples

The following are 28 code examples of tensorflow.is_finite(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: hmc.py    From zhusuan with MIT License 6 votes vote down vote up
def get_acceptance_rate(q, p, new_q, new_p, log_posterior, mass, data_axes):
    old_hamiltonian, old_log_prob = hamiltonian(
        q, p, log_posterior, mass, data_axes)
    new_hamiltonian, new_log_prob = hamiltonian(
        new_q, new_p, log_posterior, mass, data_axes)
    old_log_prob = tf.check_numerics(
        old_log_prob,
        'HMC: old_log_prob has numeric errors! Try better initialization.')
    acceptance_rate = tf.exp(
        tf.minimum(-new_hamiltonian + old_hamiltonian, 0.0))
    is_finite = tf.logical_and(tf.is_finite(acceptance_rate),
                               tf.is_finite(new_log_prob))
    acceptance_rate = tf.where(is_finite, acceptance_rate,
                               tf.zeros_like(acceptance_rate))
    return old_hamiltonian, new_hamiltonian, old_log_prob, new_log_prob, \
        acceptance_rate 
Example #2
Source File: postU.py    From decompose with MIT License 6 votes vote down vote up
def updateK(self, k, prepVars, U):
        f = self.__f
        UfShape = U[f].get_shape()

        lhUfk = self.__likelihood.lhUfk(U[f], prepVars, f, k)
        postfk = lhUfk*self.prior[k].cond()
        Ufk = postfk.draw()
        Ufk = tf.expand_dims(Ufk, 0)

        normUfk = tf.norm(Ufk)
        notNanNorm = tf.logical_not(tf.is_nan(normUfk))
        finiteNorm = tf.is_finite(normUfk)
        positiveNorm = normUfk > 0.
        isValid = tf.logical_and(notNanNorm,
                                 tf.logical_and(finiteNorm,
                                                positiveNorm))
        Uf = tf.cond(isValid, lambda: self.updateUf(U[f], Ufk, k),
                     lambda: U[f])

        # TODO: if valid -> self.__likelihood.lhU()[f].updateUfk(U[f][k], k)
        Uf.set_shape(UfShape)
        U[f] = Uf
        return(U) 
Example #3
Source File: loss_func.py    From Keras-FCN with MIT License 6 votes vote down vote up
def mean_IoU(y_true, y_pred):
	s = K.shape(y_true)

	# reshape such that w and h dim are multiplied together
	y_true_reshaped = K.reshape( y_true, tf.stack( [-1, s[1]*s[2], s[-1]] ) )
	y_pred_reshaped = K.reshape( y_pred, tf.stack( [-1, s[1]*s[2], s[-1]] ) )

	# correctly classified
	clf_pred = K.one_hot( K.argmax(y_pred_reshaped), nb_classes = s[-1])
	equal_entries = K.cast(K.equal(clf_pred,y_true_reshaped), dtype='float32') * y_true_reshaped

	intersection = K.sum(equal_entries, axis=1)
	union_per_class = K.sum(y_true_reshaped,axis=1) + K.sum(y_pred_reshaped,axis=1)

	iou = intersection / (union_per_class - intersection)
	iou_mask = tf.is_finite(iou)
	iou_masked = tf.boolean_mask(iou,iou_mask)

	return K.mean( iou_masked ) 
Example #4
Source File: loss_func.py    From Keras-FCN with MIT License 6 votes vote down vote up
def mean_acc(y_true, y_pred):
	s = K.shape(y_true)

	# reshape such that w and h dim are multiplied together
	y_true_reshaped = K.reshape( y_true, tf.stack( [-1, s[1]*s[2], s[-1]] ) )
	y_pred_reshaped = K.reshape( y_pred, tf.stack( [-1, s[1]*s[2], s[-1]] ) )

	# correctly classified
	clf_pred = K.one_hot( K.argmax(y_pred_reshaped), nb_classes = s[-1])
	equal_entries = K.cast(K.equal(clf_pred,y_true_reshaped), dtype='float32') * y_true_reshaped

	correct_pixels_per_class = K.sum(equal_entries, axis=1)
	n_pixels_per_class = K.sum(y_true_reshaped,axis=1)

	acc = correct_pixels_per_class / n_pixels_per_class
	acc_mask = tf.is_finite(acc)
	acc_masked = tf.boolean_mask(acc,acc_mask)

	return K.mean(acc_masked) 
Example #5
Source File: gradproc.py    From tensorpack with Apache License 2.0 5 votes vote down vote up
def _mapper(self, grad, var):
        # this was very slow.... see #3649
        # op = tf.Assert(tf.reduce_all(tf.is_finite(var)), [var], summarize=100)
        grad = tf.check_numerics(grad, 'CheckGradient/' + var.op.name)
        return grad 
Example #6
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def test_forward_isfinite():
    _verify_infiniteness_ops(tf.is_finite, "isfinite") 
Example #7
Source File: variable_mgr_util.py    From dlcookbook-dlbs with Apache License 2.0 5 votes vote down vote up
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
                                         check_inf_nan):
  """Calculate the average gradient for a shared variable across all towers.

  Note that this function provides a synchronization point across all towers.

  Args:
    grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
      (gradient, variable) pair within the outer list represents the gradient
      of the variable calculated for a single tower, and the number of pairs
      equals the number of towers.
    use_mean: if True, mean is taken, else sum of gradients is taken.
    check_inf_nan: check grads for nans and infs.

  Returns:
    The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
      gradient has been averaged across all towers. The variable is chosen from
      the first tower. The has_nan_or_inf indicates the grads has nan or inf.
  """
  grads = [g for g, _ in grad_and_vars]
  grad = tf.add_n(grads)

  if use_mean and len(grads) > 1:
    grad = tf.multiply(grad, 1.0 / len(grads))

  v = grad_and_vars[0][1]
  if check_inf_nan:
    has_nan_or_inf = tf.logical_not(tf.reduce_all(tf.is_finite(grads)))
    return (grad, v), has_nan_or_inf
  else:
    return (grad, v), None 
Example #8
Source File: gradproc.py    From ternarynet with Apache License 2.0 5 votes vote down vote up
def _mapper(self, grad, var):
        # this is very slow...
        #op = tf.Assert(tf.reduce_all(tf.is_finite(var)), [var], summarize=100)
        grad = tf.check_numerics(grad, 'CheckGradient')
        return grad 
Example #9
Source File: gradproc.py    From Distributed-BA3C with Apache License 2.0 5 votes vote down vote up
def _mapper(self, grad, var):
        # this is very slow...
        #op = tf.Assert(tf.reduce_all(tf.is_finite(var)), [var], summarize=100)
        grad = tf.check_numerics(grad, 'CheckGradient')
        return grad 
Example #10
Source File: utils.py    From OpenSeq2Seq with Apache License 2.0 5 votes vote down vote up
def mask_nans(x):
  x_zeros = tf.zeros_like(x)
  x_mask = tf.is_finite(x)
  y = tf.where(x_mask, x, x_zeros)
  return y 
Example #11
Source File: gradproc.py    From ADL with MIT License 5 votes vote down vote up
def _mapper(self, grad, var):
        # this was very slow.... see #3649
        # op = tf.Assert(tf.reduce_all(tf.is_finite(var)), [var], summarize=100)
        grad = tf.check_numerics(grad, 'CheckGradient/' + var.op.name)
        return grad 
Example #12
Source File: gradproc.py    From petridishnn with MIT License 5 votes vote down vote up
def _mapper(self, grad, var):
        # this was very slow.... see #3649
        # op = tf.Assert(tf.reduce_all(tf.is_finite(var)), [var], summarize=100)
        grad = tf.check_numerics(grad, 'CheckGradient/' + var.op.name)
        return grad 
Example #13
Source File: gradproc.py    From VDAIC2017 with MIT License 5 votes vote down vote up
def _mapper(self, grad, var):
        # this is very slow...
        #op = tf.Assert(tf.reduce_all(tf.is_finite(var)), [var], summarize=100)
        grad = tf.check_numerics(grad, 'CheckGradient')
        return grad 
Example #14
Source File: cwise_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _compare(self, x, use_gpu):
    np_finite, np_inf, np_nan = np.isfinite(x), np.isinf(x), np.isnan(x)
    with self.test_session(use_gpu=use_gpu) as sess:
      inx = tf.convert_to_tensor(x)
      ofinite, oinf, onan = tf.is_finite(inx), tf.is_inf(
          inx), tf.is_nan(inx)
      tf_finite, tf_inf, tf_nan = sess.run([ofinite, oinf, onan])
    self.assertAllEqual(np_inf, tf_inf)
    self.assertAllEqual(np_nan, tf_nan)
    self.assertAllEqual(np_finite, tf_finite)
    self.assertShapeEqual(np_inf, oinf)
    self.assertShapeEqual(np_nan, onan)
    self.assertShapeEqual(np_finite, ofinite) 
Example #15
Source File: differentiable_tls.py    From SPFN with MIT License 5 votes vote down vote up
def custom_svd_v_column(M, col_index=-1):
    # Must make sure M is finite. Otherwise cudaSolver might fail.
    assert_op = tf.Assert(tf.logical_not(tf.reduce_any(tf.logical_not(tf.is_finite(M)))), [M], summarize=10)
    with tf.control_dependencies([assert_op]):
        with tf.get_default_graph().gradient_override_map({'Svd': 'CustomSvd'}):
            s, u, v = tf.svd(M, name='Svd') # M = usv^T
            return v[:, :, col_index] 
Example #16
Source File: variable_mgr_util.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
                                         check_inf_nan):
  """Calculate the average gradient for a shared variable across all towers.

  Note that this function provides a synchronization point across all towers.

  Args:
    grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
      (gradient, variable) pair within the outer list represents the gradient
      of the variable calculated for a single tower, and the number of pairs
      equals the number of towers.
    use_mean: if True, mean is taken, else sum of gradients is taken.
    check_inf_nan: check grads for nans and infs.

  Returns:
    The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
      gradient has been averaged across all towers. The variable is chosen from
      the first tower. The has_nan_or_inf indicates the grads has nan or inf.
  """
  grads = [g for g, _ in grad_and_vars]
  if any(isinstance(g, tf.IndexedSlices) for g in grads):
    # TODO(reedwm): All-reduce IndexedSlices more effectively.
    grad = gradients_impl._AggregateIndexedSlicesGradients(grads)  # pylint: disable=protected-access
  else:
    grad = tf.add_n(grads)

  if use_mean and len(grads) > 1:
    grad = tf.scalar_mul(1.0 / len(grads), grad)

  v = grad_and_vars[0][1]
  if check_inf_nan:
    with tf.name_scope('check_for_inf_and_nan'):
      has_nan_or_inf = tf.logical_not(tf.reduce_all(tf.is_finite(grads)))
    return (grad, v), has_nan_or_inf
  else:
    return (grad, v), None 
Example #17
Source File: variable_mgr_util.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
                                         check_inf_nan):
  """Calculate the average gradient for a shared variable across all towers.

  Note that this function provides a synchronization point across all towers.

  Args:
    grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
      (gradient, variable) pair within the outer list represents the gradient
      of the variable calculated for a single tower, and the number of pairs
      equals the number of towers.
    use_mean: if True, mean is taken, else sum of gradients is taken.
    check_inf_nan: check grads for nans and infs.

  Returns:
    The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
      gradient has been averaged across all towers. The variable is chosen from
      the first tower. The has_nan_or_inf indicates the grads has nan or inf.
  """
  grads = [g for g, _ in grad_and_vars]
  if any(isinstance(g, tf.IndexedSlices) for g in grads):
    # TODO(reedwm): All-reduce IndexedSlices more effectively.
    grad = gradients_impl._AggregateIndexedSlicesGradients(grads)  # pylint: disable=protected-access
  else:
    grad = tf.add_n(grads)

  if use_mean and len(grads) > 1:
    grad = tf.scalar_mul(1.0 / len(grads), grad)

  v = grad_and_vars[0][1]
  if check_inf_nan:
    with tf.name_scope('check_for_inf_and_nan'):
      has_nan_or_inf = tf.logical_not(tf.reduce_all(tf.is_finite(grads)))
    return (grad, v), has_nan_or_inf
  else:
    return (grad, v), None 
Example #18
Source File: variable_mgr_util.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
                                         check_inf_nan):
  """Calculate the average gradient for a shared variable across all towers.

  Note that this function provides a synchronization point across all towers.

  Args:
    grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
      (gradient, variable) pair within the outer list represents the gradient
      of the variable calculated for a single tower, and the number of pairs
      equals the number of towers.
    use_mean: if True, mean is taken, else sum of gradients is taken.
    check_inf_nan: check grads for nans and infs.

  Returns:
    The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
      gradient has been averaged across all towers. The variable is chosen from
      the first tower. The has_nan_or_inf indicates the grads has nan or inf.
  """
  grads = [g for g, _ in grad_and_vars]
  if any(isinstance(g, tf.IndexedSlices) for g in grads):
    # TODO(reedwm): All-reduce IndexedSlices more effectively.
    grad = gradients_impl._AggregateIndexedSlicesGradients(grads)  # pylint: disable=protected-access
  else:
    grad = tf.add_n(grads)

  if use_mean and len(grads) > 1:
    grad = tf.scalar_mul(1.0 / len(grads), grad)

  v = grad_and_vars[0][1]
  if check_inf_nan:
    with tf.name_scope('check_for_inf_and_nan'):
      has_nan_or_inf = tf.logical_not(tf.reduce_all(tf.is_finite(grads)))
    return (grad, v), has_nan_or_inf
  else:
    return (grad, v), None 
Example #19
Source File: gradproc.py    From DDRL with Apache License 2.0 5 votes vote down vote up
def _mapper(self, grad, var):
        # this is very slow...
        #op = tf.Assert(tf.reduce_all(tf.is_finite(var)), [var], summarize=100)
        grad = tf.check_numerics(grad, 'CheckGradient')
        return grad 
Example #20
Source File: utils.py    From srcnn-tensorflow with Apache License 2.0 5 votes vote down vote up
def nanvar(x, axis=None):
    x_filled = fill_na(x, 0)
    x_count = tf.reduce_sum(tf.cast(tf.is_finite(x), tf.float32), axis=axis)
    x_mean = nanmean(x, axis=axis)
    x_ss = tf.reduce_sum((x_filled - x_mean)**2, axis=axis)
    return x_ss / x_count 
Example #21
Source File: utils.py    From srcnn-tensorflow with Apache License 2.0 5 votes vote down vote up
def nanmean(x, axis=None):
    x_filled = fill_na(x, 0)
    x_sum = tf.reduce_sum(x_filled, axis=axis)
    x_count = tf.reduce_sum(tf.cast(tf.is_finite(x), tf.float32), axis=axis)
    return tf.div(x_sum, x_count) 
Example #22
Source File: utils.py    From srcnn-tensorflow with Apache License 2.0 5 votes vote down vote up
def fill_na(x, fillval=0):
    fill = tf.ones_like(x) * fillval
    return tf.where(tf.is_finite(x), x, fill) 
Example #23
Source File: srcnn.py    From srcnn-tensorflow with Apache License 2.0 5 votes vote down vote up
def _loss(self, predictions):
        with tf.name_scope("loss"):
            # if training then crop center of y, else, padding was applied
            slice_amt = (np.sum(self.filter_sizes) - len(self.filter_sizes)) / 2
            slice_y = self.y_norm[:,slice_amt:-slice_amt, slice_amt:-slice_amt]
            _y = tf.cond(self.is_training, lambda: slice_y, lambda: self.y_norm)
            tf.subtract(predictions, _y)
            err = tf.square(predictions - _y)
            err_filled = utils.fill_na(err, 0)
            finite_count = tf.reduce_sum(tf.cast(tf.is_finite(err), tf.float32))
            mse = tf.reduce_sum(err_filled) / finite_count
            return mse 
Example #24
Source File: image_sample.py    From VDAIC2017 with MIT License 4 votes vote down vote up
def ImageSample(inputs):
    """
    Sample the template image, using the given coordinate, by bilinear interpolation.
    It mimics the same behavior described in:
    `Spatial Transformer Networks <http://arxiv.org/abs/1506.02025>`_.

    :param input: [template, mapping]. template of shape NHWC. mapping of
        shape NHW2, where each pair of the last dimension is a (y, x) real-value
        coordinate.
    :returns: a NHWC output tensor.
    """
    template, mapping = inputs
    assert template.get_shape().ndims == 4 and mapping.get_shape().ndims == 4

    mapping = tf.maximum(mapping, 0.0)
    lcoor = tf.cast(mapping, tf.int32)  # floor
    ucoor = lcoor + 1

    # has to cast to int32 and then cast back
    # tf.floor have gradient 1 w.r.t input
    # TODO bug fixed in #951
    diff = mapping - tf.cast(lcoor, tf.float32)
    neg_diff = 1.0 - diff   #bxh2xw2x2

    lcoory, lcoorx = tf.split(3, 2, lcoor)
    ucoory, ucoorx = tf.split(3, 2, ucoor)

    lyux = tf.concat(3, [lcoory, ucoorx])
    uylx = tf.concat(3, [ucoory, lcoorx])

    diffy, diffx = tf.split(3, 2, diff)
    neg_diffy, neg_diffx = tf.split(3, 2, neg_diff)

    #prod = tf.reduce_prod(diff, 3, keep_dims=True)
    #diff = tf.Print(diff, [tf.is_finite(tf.reduce_sum(diff)), tf.shape(prod),
                          #tf.reduce_max(diff), diff],
                    #summarize=50)

    return tf.add_n([sample(template, lcoor) * neg_diffx * neg_diffy,
           sample(template, ucoor) * diffx * diffy,
           sample(template, lyux) * neg_diffy * diffx,
           sample(template, uylx) * diffy * neg_diffx], name='sampled') 
Example #25
Source File: image_sample.py    From Distributed-BA3C with Apache License 2.0 4 votes vote down vote up
def ImageSample(inputs):
    """
    Sample the template image, using the given coordinate, by bilinear interpolation.
    It mimics the same behavior described in:
    `Spatial Transformer Networks <http://arxiv.org/abs/1506.02025>`_.

    :param input: [template, mapping]. template of shape NHWC. mapping of
        shape NHW2, where each pair of the last dimension is a (y, x) real-value
        coordinate.
    :returns: a NHWC output tensor.
    """
    template, mapping = inputs
    assert template.get_shape().ndims == 4 and mapping.get_shape().ndims == 4

    mapping = tf.maximum(mapping, 0.0)
    lcoor = tf.cast(mapping, tf.int32)  # floor
    ucoor = lcoor + 1

    # has to cast to int32 and then cast back
    # tf.floor have gradient 1 w.r.t input
    # TODO bug fixed in #951
    diff = mapping - tf.cast(lcoor, tf.float32)
    neg_diff = 1.0 - diff   #bxh2xw2x2

    lcoory, lcoorx = tf.split(3, 2, lcoor)
    ucoory, ucoorx = tf.split(3, 2, ucoor)

    lyux = tf.concat([lcoory, ucoorx], 3)
    uylx = tf.concat([ucoory, lcoorx], 3)

    diffy, diffx = tf.split(3, 2, diff)
    neg_diffy, neg_diffx = tf.split(3, 2, neg_diff)

    #prod = tf.reduce_prod(diff, 3, keep_dims=True)
    #diff = tf.Print(diff, [tf.is_finite(tf.reduce_sum(diff)), tf.shape(prod),
                          #tf.reduce_max(diff), diff],
                    #summarize=50)

    return tf.add_n([sample(template, lcoor) * neg_diffx * neg_diffy,
           sample(template, ucoor) * diffx * diffy,
           sample(template, lyux) * neg_diffy * diffx,
           sample(template, uylx) * diffy * neg_diffx], name='sampled') 
Example #26
Source File: image_sample.py    From ternarynet with Apache License 2.0 4 votes vote down vote up
def ImageSample(inputs):
    """
    Sample the template image, using the given coordinate, by bilinear interpolation.
    It mimics the same behavior described in:
    `Spatial Transformer Networks <http://arxiv.org/abs/1506.02025>`_.

    :param input: [template, mapping]. template of shape NHWC. mapping of
        shape NHW2, where each pair of the last dimension is a (y, x) real-value
        coordinate.
    :returns: a NHWC output tensor.
    """
    template, mapping = inputs
    assert template.get_shape().ndims == 4 and mapping.get_shape().ndims == 4

    mapping = tf.maximum(mapping, 0.0)
    lcoor = tf.cast(mapping, tf.int32)  # floor
    ucoor = lcoor + 1

    # has to cast to int32 and then cast back
    # tf.floor have gradient 1 w.r.t input
    # TODO bug fixed in #951
    diff = mapping - tf.cast(lcoor, tf.float32)
    neg_diff = 1.0 - diff   #bxh2xw2x2

    lcoory, lcoorx = tf.split(3, 2, lcoor)
    ucoory, ucoorx = tf.split(3, 2, ucoor)

    lyux = tf.concat(3, [lcoory, ucoorx])
    uylx = tf.concat(3, [ucoory, lcoorx])

    diffy, diffx = tf.split(3, 2, diff)
    neg_diffy, neg_diffx = tf.split(3, 2, neg_diff)

    #prod = tf.reduce_prod(diff, 3, keep_dims=True)
    #diff = tf.Print(diff, [tf.is_finite(tf.reduce_sum(diff)), tf.shape(prod),
                          #tf.reduce_max(diff), diff],
                    #summarize=50)

    return tf.add_n([sample(template, lcoor) * neg_diffx * neg_diffy,
           sample(template, ucoor) * diffx * diffy,
           sample(template, lyux) * neg_diffy * diffx,
           sample(template, uylx) * diffy * neg_diffx], name='sampled') 
Example #27
Source File: image_sample.py    From DDRL with Apache License 2.0 4 votes vote down vote up
def ImageSample(inputs):
    """
    Sample the template image, using the given coordinate, by bilinear interpolation.
    It mimics the same behavior described in:
    `Spatial Transformer Networks <http://arxiv.org/abs/1506.02025>`_.

    :param input: [template, mapping]. template of shape NHWC. mapping of
        shape NHW2, where each pair of the last dimension is a (y, x) real-value
        coordinate.
    :returns: a NHWC output tensor.
    """
    template, mapping = inputs
    assert template.get_shape().ndims == 4 and mapping.get_shape().ndims == 4

    mapping = tf.maximum(mapping, 0.0)
    lcoor = tf.cast(mapping, tf.int32)  # floor
    ucoor = lcoor + 1

    # has to cast to int32 and then cast back
    # tf.floor have gradient 1 w.r.t input
    # TODO bug fixed in #951
    diff = mapping - tf.cast(lcoor, tf.float32)
    neg_diff = 1.0 - diff   #bxh2xw2x2

    lcoory, lcoorx = tf.split(3, 2, lcoor)
    ucoory, ucoorx = tf.split(3, 2, ucoor)

    lyux = tf.concat([lcoory, ucoorx], 3)
    uylx = tf.concat([ucoory, lcoorx], 3)

    diffy, diffx = tf.split(3, 2, diff)
    neg_diffy, neg_diffx = tf.split(3, 2, neg_diff)

    #prod = tf.reduce_prod(diff, 3, keep_dims=True)
    #diff = tf.Print(diff, [tf.is_finite(tf.reduce_sum(diff)), tf.shape(prod),
                          #tf.reduce_max(diff), diff],
                    #summarize=50)

    return tf.add_n([sample(template, lcoor) * neg_diffx * neg_diffy,
           sample(template, ucoor) * diffx * diffy,
           sample(template, lyux) * neg_diffy * diffx,
           sample(template, uylx) * diffy * neg_diffx], name='sampled') 
Example #28
Source File: general.py    From hand3d with GNU General Public License v2.0 4 votes vote down vote up
def calc_center_bb(binary_class_mask):
    """ Returns the center of mass coordinates for the given binary_class_mask. """
    with tf.variable_scope('calc_center_bb'):
        binary_class_mask = tf.cast(binary_class_mask, tf.int32)
        binary_class_mask = tf.equal(binary_class_mask, 1)
        s = binary_class_mask.get_shape().as_list()
        if len(s) == 4:
            binary_class_mask = tf.squeeze(binary_class_mask, [3])

        s = binary_class_mask.get_shape().as_list()
        assert len(s) == 3, "binary_class_mask must be 3D."
        assert (s[0] < s[1]) and (s[0] < s[2]), "binary_class_mask must be [Batch, Width, Height]"

        # my meshgrid
        x_range = tf.expand_dims(tf.range(s[1]), 1)
        y_range = tf.expand_dims(tf.range(s[2]), 0)
        X = tf.tile(x_range, [1, s[2]])
        Y = tf.tile(y_range, [s[1], 1])

        bb_list = list()
        center_list = list()
        crop_size_list = list()
        for i in range(s[0]):
            X_masked = tf.cast(tf.boolean_mask(X, binary_class_mask[i, :, :]), tf.float32)
            Y_masked = tf.cast(tf.boolean_mask(Y, binary_class_mask[i, :, :]), tf.float32)

            x_min = tf.reduce_min(X_masked)
            x_max = tf.reduce_max(X_masked)
            y_min = tf.reduce_min(Y_masked)
            y_max = tf.reduce_max(Y_masked)

            start = tf.stack([x_min, y_min])
            end = tf.stack([x_max, y_max])
            bb = tf.stack([start, end], 1)
            bb_list.append(bb)

            center_x = 0.5*(x_max + x_min)
            center_y = 0.5*(y_max + y_min)
            center = tf.stack([center_x, center_y], 0)

            center = tf.cond(tf.reduce_all(tf.is_finite(center)), lambda: center,
                                  lambda: tf.constant([160.0, 160.0]))
            center.set_shape([2])
            center_list.append(center)

            crop_size_x = x_max - x_min
            crop_size_y = y_max - y_min
            crop_size = tf.expand_dims(tf.maximum(crop_size_x, crop_size_y), 0)
            crop_size = tf.cond(tf.reduce_all(tf.is_finite(crop_size)), lambda: crop_size,
                                  lambda: tf.constant([100.0]))
            crop_size.set_shape([1])
            crop_size_list.append(crop_size)

        bb = tf.stack(bb_list)
        center = tf.stack(center_list)
        crop_size = tf.stack(crop_size_list)

        return center, bb, crop_size