Python tensorflow.python.ops.math_ops.greater() Examples
The following are 30
code examples of tensorflow.python.ops.math_ops.greater().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.math_ops
, or try the search function
.
Example #1
Source File: metric_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _safe_div(numerator, denominator, name): """Divides two values, returning 0 if the denominator is <= 0. Args: numerator: A real `Tensor`. denominator: A real `Tensor`, with dtype matching `numerator`. name: Name for the returned op. Returns: 0 if `denominator` <= 0, else `numerator` / `denominator` """ return array_ops.where( math_ops.greater(denominator, 0), math_ops.truediv(numerator, denominator), 0, name=name)
Example #2
Source File: tensor_forest.py From deep-learning with MIT License | 6 votes |
def average_impurity(self): """Constructs a TF graph for evaluating the average leaf impurity of a tree. If in regression mode, this is the leaf variance. If in classification mode, this is the gini impurity. Returns: The last op in the graph. """ children = array_ops.squeeze(array_ops.slice( self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1]) is_leaf = math_ops.equal(constants.LEAF_NODE, children) leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf), squeeze_dims=[1])) counts = array_ops.gather(self.variables.node_sums, leaves) gini = self._weighted_gini(counts) # Guard against step 1, when there often are no leaves yet. def impurity(): return gini # Since average impurity can be used for loss, when there's no data just # return a big number so that loss always decreases. def big(): return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000. return control_flow_ops.cond(math_ops.greater( array_ops.shape(leaves)[0], 0), impurity, big)
Example #3
Source File: dirichlet.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _mode(self): mode = ((self.alpha - 1.) / (array_ops.expand_dims(self.alpha_sum, dim=-1) - math_ops.cast(self.event_shape()[0], self.dtype))) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) shape = array_ops.concat((self.batch_shape(), self.event_shape()), 0) return array_ops.where( math_ops.greater(self.alpha, 1.), mode, array_ops.fill(shape, nan, name="nan")) else: return control_flow_ops.with_dependencies([ check_ops.assert_less( array_ops.ones((), dtype=self.dtype), self.alpha, message="mode not defined for components of alpha <= 1") ], mode)
Example #4
Source File: tensor_forest.py From lambda-packs with MIT License | 6 votes |
def average_impurity(self): """Constructs a TF graph for evaluating the average leaf impurity of a tree. If in regression mode, this is the leaf variance. If in classification mode, this is the gini impurity. Returns: The last op in the graph. """ children = array_ops.squeeze(array_ops.slice( self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1]) is_leaf = math_ops.equal(constants.LEAF_NODE, children) leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf), squeeze_dims=[1])) counts = array_ops.gather(self.variables.node_sums, leaves) gini = self._weighted_gini(counts) # Guard against step 1, when there often are no leaves yet. def impurity(): return gini # Since average impurity can be used for loss, when there's no data just # return a big number so that loss always decreases. def big(): return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000. return control_flow_ops.cond(math_ops.greater( array_ops.shape(leaves)[0], 0), impurity, big)
Example #5
Source File: mertric.py From tf.fashionAI with Apache License 2.0 | 6 votes |
def _safe_div(numerator, denominator, name): """Divides two tensors element-wise, returning 0 if the denominator is <= 0. Args: numerator: A real `Tensor`. denominator: A real `Tensor`, with dtype matching `numerator`. name: Name for the returned op. Returns: 0 if `denominator` <= 0, else `numerator` / `denominator` """ t = math_ops.truediv(numerator, denominator) zero = array_ops.zeros_like(t, dtype=denominator.dtype) condition = math_ops.greater(denominator, zero) zero = math_ops.cast(zero, t.dtype) return array_ops.where(condition, t, zero, name=name)
Example #6
Source File: loss_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _safe_div(numerator, denominator, name="value"): """Computes a safe divide which returns 0 if the denominator is zero. Note that the function contains an additional conditional check that is necessary for avoiding situations where the loss is zero causing NaNs to creep into the gradient computation. Args: numerator: An arbitrary `Tensor`. denominator: A `Tensor` whose shape matches `numerator` and whose values are assumed to be non-negative. name: An optional name for the returned op. Returns: The element-wise value of the numerator divided by the denominator. """ return array_ops.where( math_ops.greater(denominator, 0), math_ops.div(numerator, array_ops.where( math_ops.equal(denominator, 0), array_ops.ones_like(denominator), denominator)), array_ops.zeros_like(numerator), name=name)
Example #7
Source File: student_t.py From lambda-packs with MIT License | 6 votes |
def _mean(self): mean = self.loc * array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return array_ops.where( math_ops.greater( self.df, array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)), mean, array_ops.fill(self.batch_shape_tensor(), nan, name="nan")) else: return control_flow_ops.with_dependencies( [ check_ops.assert_less( array_ops.ones([], dtype=self.dtype), self.df, message="mean not defined for components of df <= 1"), ], mean)
Example #8
Source File: metrics_impl.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _safe_div(numerator, denominator, name): """Divides two values, returning 0 if the denominator is <= 0. Args: numerator: A real `Tensor`. denominator: A real `Tensor`, with dtype matching `numerator`. name: Name for the returned op. Returns: 0 if `denominator` <= 0, else `numerator` / `denominator` """ return array_ops.where( math_ops.greater(denominator, 0), math_ops.truediv(numerator, denominator), 0, name=name)
Example #9
Source File: student_t.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _mean(self): mean = self.loc * array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return array_ops.where( math_ops.greater( self.df, array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)), mean, array_ops.fill(self.batch_shape_tensor(), nan, name="nan")) else: return control_flow_ops.with_dependencies( [ check_ops.assert_less( array_ops.ones([], dtype=self.dtype), self.df, message="mean not defined for components of df <= 1"), ], mean)
Example #10
Source File: metrics_impl.py From lambda-packs with MIT License | 6 votes |
def _safe_div(numerator, denominator, name): """Divides two values, returning 0 if the denominator is <= 0. Args: numerator: A real `Tensor`. denominator: A real `Tensor`, with dtype matching `numerator`. name: Name for the returned op. Returns: 0 if `denominator` <= 0, else `numerator` / `denominator` """ return array_ops.where( math_ops.greater(denominator, 0), math_ops.truediv(numerator, denominator), 0, name=name)
Example #11
Source File: losses_impl.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _safe_div(numerator, denominator, name="value"): """Computes a safe divide which returns 0 if the denominator is zero. Note that the function contains an additional conditional check that is necessary for avoiding situations where the loss is zero causing NaNs to creep into the gradient computation. Args: numerator: An arbitrary `Tensor`. denominator: `Tensor` whose shape matches `numerator` and whose values are assumed to be non-negative. name: An optional name for the returned op. Returns: The element-wise value of the numerator divided by the denominator. """ return array_ops.where( math_ops.greater(denominator, 0), math_ops.div(numerator, array_ops.where( math_ops.equal(denominator, 0), array_ops.ones_like(denominator), denominator)), array_ops.zeros_like(numerator), name=name)
Example #12
Source File: losses_impl.py From lambda-packs with MIT License | 6 votes |
def _safe_div(numerator, denominator, name="value"): """Computes a safe divide which returns 0 if the denominator is zero. Note that the function contains an additional conditional check that is necessary for avoiding situations where the loss is zero causing NaNs to creep into the gradient computation. Args: numerator: An arbitrary `Tensor`. denominator: `Tensor` whose shape matches `numerator` and whose values are assumed to be non-negative. name: An optional name for the returned op. Returns: The element-wise value of the numerator divided by the denominator. """ return array_ops.where( math_ops.greater(denominator, 0), math_ops.div(numerator, array_ops.where( math_ops.equal(denominator, 0), array_ops.ones_like(denominator), denominator)), array_ops.zeros_like(numerator), name=name)
Example #13
Source File: util.py From fully-convolutional-point-network with MIT License | 6 votes |
def tf_safe_div(numerator, denominator): """ Computes a safe division which returns 0 if the denominator is zero. Note that the function contains an additional conditional check that is necessary for avoiding situations where the loss is zero causing NaNs to creep into the gradient computation. Args: numerator: tf.tensor denominator: tf.tensor Returns: tf.tensor """ return array_ops.where( math_ops.greater(denominator, 0), math_ops.div(numerator, array_ops.where( math_ops.equal(denominator, 0), array_ops.ones_like(denominator), denominator)), array_ops.zeros_like(numerator))
Example #14
Source File: student_t.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _mean(self): mean = self.mu * array_ops.ones(self.batch_shape(), dtype=self.dtype) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return array_ops.where( math_ops.greater( self.df, array_ops.ones(self.batch_shape(), dtype=self.dtype)), mean, array_ops.fill(self.batch_shape(), nan, name="nan")) else: return control_flow_ops.with_dependencies( [ check_ops.assert_less( array_ops.ones((), dtype=self.dtype), self.df, message="mean not defined for components of df <= 1"), ], mean)
Example #15
Source File: metric_ops.py From lambda-packs with MIT License | 6 votes |
def _safe_div(numerator, denominator, name): """Divides two values, returning 0 if the denominator is <= 0. Args: numerator: A real `Tensor`. denominator: A real `Tensor`, with dtype matching `numerator`. name: Name for the returned op. Returns: 0 if `denominator` <= 0, else `numerator` / `denominator` """ return array_ops.where( math_ops.greater(denominator, 0), math_ops.truediv(numerator, denominator), 0, name=name)
Example #16
Source File: metric_ops.py From deep_image_model with Apache License 2.0 | 6 votes |
def _safe_div(numerator, denominator, name): """Divides two values, returning 0 if the denominator is <= 0. Args: numerator: A real `Tensor`. denominator: A real `Tensor`, with dtype matching `numerator`. name: Name for the returned op. Returns: 0 if `denominator` <= 0, else `numerator` / `denominator` """ return math_ops.select( math_ops.greater(denominator, 0), math_ops.truediv(numerator, denominator), 0, name=name)
Example #17
Source File: topn.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def insert(self, ids, scores): """Insert the ids and scores into the TopN.""" with ops.control_dependencies(self.last_ops): scatter_op = state_ops.scatter_update(self.id_to_score, ids, scores) larger_scores = math_ops.greater(scores, self.sl_scores[0]) def shortlist_insert(): larger_ids = array_ops.boolean_mask( math_ops.to_int64(ids), larger_scores) larger_score_values = array_ops.boolean_mask(scores, larger_scores) shortlist_ids, new_ids, new_scores = tensor_forest_ops.top_n_insert( self.sl_ids, self.sl_scores, larger_ids, larger_score_values) u1 = state_ops.scatter_update(self.sl_ids, shortlist_ids, new_ids) u2 = state_ops.scatter_update(self.sl_scores, shortlist_ids, new_scores) return control_flow_ops.group(u1, u2) # We only need to insert into the shortlist if there are any # scores larger than the threshold. cond_op = control_flow_ops.cond( math_ops.reduce_any(larger_scores), shortlist_insert, control_flow_ops.no_op) with ops.control_dependencies([cond_op]): self.last_ops = [scatter_op, cond_op]
Example #18
Source File: tensor_forest.py From deep_image_model with Apache License 2.0 | 6 votes |
def average_impurity(self): """Constructs a TF graph for evaluating the average leaf impurity of a tree. If in regression mode, this is the leaf variance. If in classification mode, this is the gini impurity. Returns: The last op in the graph. """ children = array_ops.squeeze(array_ops.slice( self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1]) is_leaf = math_ops.equal(constants.LEAF_NODE, children) leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf), squeeze_dims=[1])) counts = array_ops.gather(self.variables.node_sums, leaves) gini = self._weighted_gini(counts) # Guard against step 1, when there often are no leaves yet. def impurity(): return gini # Since average impurity can be used for loss, when there's no data just # return a big number so that loss always decreases. def big(): return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000. return control_flow_ops.cond(math_ops.greater( array_ops.shape(leaves)[0], 0), impurity, big)
Example #19
Source File: student_t.py From deep_image_model with Apache License 2.0 | 6 votes |
def _variance(self): var = (self._ones() * math_ops.square(self.sigma) * self.df / (self.df - 2)) # When 1 < df <= 2, variance is infinite. inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype()) result_where_defined = math_ops.select( math_ops.greater(self.df, array_ops.fill(self.batch_shape(), 2.)), var, array_ops.fill(self.batch_shape(), inf, name="inf")) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return math_ops.select( math_ops.greater(self.df, self._ones()), result_where_defined, array_ops.fill(self.batch_shape(), nan, name="nan")) else: return control_flow_ops.with_dependencies([ check_ops.assert_less( array_ops.ones((), dtype=self.dtype), self.df, message="variance not defined for components of df <= 1"), ], result_where_defined)
Example #20
Source File: dirichlet.py From deep_image_model with Apache License 2.0 | 6 votes |
def _mode(self): mode = ((self.alpha - 1.) / (array_ops.expand_dims(self.alpha_sum, dim=-1) - math_ops.cast(self.event_shape()[0], self.dtype))) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) shape = array_ops.concat(0, (self.batch_shape(), self.event_shape())) return math_ops.select( math_ops.greater(self.alpha, 1.), mode, array_ops.fill(shape, nan, name="nan")) else: return control_flow_ops.with_dependencies([ check_ops.assert_less( array_ops.ones((), dtype=self.dtype), self.alpha, message="mode not defined for components of alpha <= 1") ], mode)
Example #21
Source File: losses_impl.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _safe_div(numerator, denominator, name="value"): """Computes a safe divide which returns 0 if the denominator is zero. Note that the function contains an additional conditional check that is necessary for avoiding situations where the loss is zero causing NaNs to creep into the gradient computation. Args: numerator: An arbitrary `Tensor`. denominator: `Tensor` whose shape matches `numerator` and whose values are assumed to be non-negative. name: An optional name for the returned op. Returns: The element-wise value of the numerator divided by the denominator. """ return array_ops.where( math_ops.greater(denominator, 0), math_ops.div(numerator, array_ops.where( math_ops.equal(denominator, 0), array_ops.ones_like(denominator), denominator)), array_ops.zeros_like(numerator), name=name)
Example #22
Source File: metrics_impl.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _safe_div(numerator, denominator, name): """Divides two values, returning 0 if the denominator is <= 0. Args: numerator: A real `Tensor`. denominator: A real `Tensor`, with dtype matching `numerator`. name: Name for the returned op. Returns: 0 if `denominator` <= 0, else `numerator` / `denominator` """ return array_ops.where( math_ops.greater(denominator, 0), math_ops.truediv(numerator, denominator), 0, name=name)
Example #23
Source File: beta.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _mode(self): mode = (self.a - 1.)/ (self.a_b_sum - 2.) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return array_ops.where( math_ops.logical_and( math_ops.greater(self.a, 1.), math_ops.greater(self.b, 1.)), mode, array_ops.fill(self.batch_shape(), nan, name="nan")) else: return control_flow_ops.with_dependencies([ check_ops.assert_less( array_ops.ones((), dtype=self.dtype), self.a, message="Mode not defined for components of a <= 1."), check_ops.assert_less( array_ops.ones((), dtype=self.dtype), self.b, message="Mode not defined for components of b <= 1."), ], mode)
Example #24
Source File: loss_ops.py From deep_image_model with Apache License 2.0 | 6 votes |
def _safe_div(numerator, denominator, name="value"): """Computes a safe divide which returns 0 if the denominator is zero. Note that the function contains an additional conditional check that is necessary for avoiding situations where the loss is zero causing NaNs to creep into the gradient computation. Args: numerator: An arbitrary `Tensor`. denominator: A `Tensor` whose shape matches `numerator` and whose values are assumed to be non-negative. name: An optional name for the returned op. Returns: The element-wise value of the numerator divided by the denominator. """ return math_ops.select( math_ops.greater(denominator, 0), math_ops.div(numerator, math_ops.select( math_ops.equal(denominator, 0), array_ops.ones_like(denominator), denominator)), array_ops.zeros_like(numerator), name=name)
Example #25
Source File: loss_ops.py From lambda-packs with MIT License | 6 votes |
def _safe_div(numerator, denominator, name="value"): """Computes a safe divide which returns 0 if the denominator is zero. Note that the function contains an additional conditional check that is necessary for avoiding situations where the loss is zero causing NaNs to creep into the gradient computation. Args: numerator: An arbitrary `Tensor`. denominator: A `Tensor` whose shape matches `numerator` and whose values are assumed to be non-negative. name: An optional name for the returned op. Returns: The element-wise value of the numerator divided by the denominator. """ return array_ops.where( math_ops.greater(denominator, 0), math_ops.div(numerator, array_ops.where( math_ops.equal(denominator, 0), array_ops.ones_like(denominator), denominator)), array_ops.zeros_like(numerator), name=name)
Example #26
Source File: customlayers.py From AiGEM_TeamHeidelberg2017 with MIT License | 6 votes |
def spp(self, sequences, lengths, levels, divsPerLevel): with tf.name_scope("spp"): batch_size, _, channels = sequences.get_shape() batchValues = [] # we cannot max_pool2d because sequences have different lengths for b in range(batch_size): currLength = lengths[b] pooledValues = [] for level in range(levels): ndiv = divsPerLevel ** level assert ndiv > 0 divLength = tf.cast(tf.ceil(tf.truediv(currLength, ndiv) - 1e-8), tf.int32) for divIndex in range(ndiv): divStart = 0 if ndiv <= 1 else tf.cast( tf.round(tf.truediv(currLength - divLength, ndiv - 1) * divIndex), tf.int32) pooledValues.append(tf.cond(tf.greater(divLength, 0), lambda: tf.reduce_max( sequences[b, divStart:divStart + divLength, :], 0), lambda: tf.zeros(shape=[channels], dtype=tf.float32))) spp_count = len(pooledValues) pooledValue = tf.stack(pooledValues, 0) pooledValue.set_shape([spp_count, channels]) batchValues.append(pooledValue) result = tf.stack(batchValues, 0) result.set_shape([batch_size, spp_count, channels]) return result
Example #27
Source File: customlayers.py From AiGEM_TeamHeidelberg2017 with MIT License | 5 votes |
def focal_loss(labels=[], logits=[], pos_weights=[], gamma=2., clips=[], name='focal_loss'): """ Add focal loss weigths to the wigthted sigmoid cross entropy :return: """ batchsize = labels.get_shape().as_list()[0] n_classes = labels.get_shape().as_list()[1] with tf.variable_scope(name) as vs: # first get a sigmoid to determine the focal loss weigths: sigmoid_logits = tf.nn.sigmoid(logits) # determine the focal loss weigths: labels = math_ops.to_float(labels) sigmoid_logits.get_shape().assert_is_compatible_with(labels.get_shape()) preds = array_ops.where(math_ops.equal(labels, 1.), sigmoid_logits, 1. - sigmoid_logits) focal_weights = (math_ops.subtract(1., preds)) ** gamma print(focal_weights) # clip the weights at E-3 and E3 up_clip = math_ops.multiply(tf.ones([batchsize, n_classes]), clips[1]) low_clip = math_ops.multiply(tf.ones([batchsize, n_classes]), clips[0]) focal_weights = array_ops.where(math_ops.greater(focal_weights, clips[1]), up_clip, focal_weights) focal_weights = array_ops.where(math_ops.less(focal_weights, clips[0]), low_clip, focal_weights) log_weight = 1. + (pos_weights - 1.) * labels # now put them into a weighted softmax ce: loss = math_ops.multiply(math_ops.add((1. - labels) * logits, log_weight * (math_ops.log1p(math_ops.exp(-math_ops.abs(logits))) + nn_ops.relu(-logits))), focal_weights, name='sc_entropy') return loss
Example #28
Source File: metric_ops_large_test.py From tf-slim with Apache License 2.0 | 5 votes |
def testLargeCase(self): shape = [32, 512, 256, 1] predictions = random_ops.random_uniform( shape, 0.0, 1.0, dtype=dtypes_lib.float32) labels = math_ops.greater(random_ops.random_uniform(shape, 0.0, 1.0), 0.5) result, update_op = metric_ops.precision_recall_at_equal_thresholds( labels=labels, predictions=predictions, num_thresholds=201) # Run many updates, enough to cause highly inaccurate values if the # code used float32 for accumulation. num_updates = 71 with self.cached_session() as sess: sess.run(variables.local_variables_initializer()) for _ in xrange(num_updates): sess.run(update_op) prdata = sess.run(result) # Since we use random values, we won't know the tp/fp/tn/fn values, but # tp and fp at threshold 0 should be the total number of positive and # negative labels, hence their sum should be total number of pixels. expected_value = 1.0 * np.product(shape) * num_updates got_value = prdata.tp[0] + prdata.fp[0] # They should be at least within 1. self.assertNear(got_value, expected_value, 1.0)
Example #29
Source File: ssd_augmenter.py From lambda-deep-learning-demo with Apache License 2.0 | 5 votes |
def safe_divide(numerator, denominator, name): """Divides two values, returning 0 if the denominator is <= 0. Args: numerator: A real `Tensor`. denominator: A real `Tensor`, with dtype matching `numerator`. name: Name for the returned op. Returns: 0 if `denominator` <= 0, else `numerator` / `denominator` """ return tf.where( math_ops.greater(denominator, 0), math_ops.divide(numerator, denominator), tf.zeros_like(numerator), name=name)
Example #30
Source File: customlayers.py From AiGEM_TeamHeidelberg2017 with MIT License | 5 votes |
def focal_loss_alpha(labels=[], logits=[], pos_weights=[], gamma=2., clips=[], name='focal_loss'): """ Add focal loss weigths to the wigthted sigmoid cross entropy :return: """ batchsize = labels.get_shape().as_list()[0] n_classes = labels.get_shape().as_list()[1] with tf.variable_scope(name) as vs: # first get a sigmoid to determine the focal loss weigths: sigmoid_logits = tf.nn.sigmoid(logits) # determine the focal loss weigths: labels = math_ops.to_float(labels) sigmoid_logits.get_shape().assert_is_compatible_with(labels.get_shape()) preds = array_ops.where(math_ops.equal(labels, 1.), sigmoid_logits, 1. - sigmoid_logits) focal_weights = (math_ops.subtract(1., preds)) ** gamma print(focal_weights) # clip the weights at E-3 and E3 up_clip = math_ops.multiply(tf.ones([batchsize, n_classes]), clips[1]) low_clip = math_ops.multiply(tf.ones([batchsize, n_classes]), clips[0]) focal_weights = array_ops.where(math_ops.greater(focal_weights, clips[1]), up_clip, focal_weights) focal_weights = array_ops.where(math_ops.less(focal_weights, clips[0]), low_clip, focal_weights) log_weight = 1. + (pos_weights - 1.) * labels # now put them into a weighted softmax ce: loss = math_ops.multiply(math_ops.add((1. - labels) * logits, log_weight * (math_ops.log1p(math_ops.exp(-math_ops.abs(logits))) + nn_ops.relu(-logits))), focal_weights, name='sc_entropy') return loss