Python tensorflow.python.ops.math_ops.subtract() Examples
The following are 30
code examples of tensorflow.python.ops.math_ops.subtract().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.math_ops
, or try the search function
.
Example #1
Source File: loss_ops.py From keras-lambda with MIT License | 6 votes |
def hinge_loss(logits, labels=None, scope=None): """Method that returns the loss tensor for hinge loss. Args: logits: The logits, a float tensor. labels: The ground truth output tensor. Its shape should match the shape of logits. The values of the tensor are expected to be 0.0 or 1.0. scope: The scope for the operations performed in computing the loss. Returns: A `Tensor` of same shape as `logits` and `labels` representing the loss values across the batch. Raises: ValueError: If the shapes of `logits` and `labels` don't match. """ with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope: logits.get_shape().assert_is_compatible_with(labels.get_shape()) # We first need to convert binary labels to -1/1 labels (as floats). labels = math_ops.to_float(labels) all_ones = array_ops.ones_like(labels) labels = math_ops.subtract(2 * labels, all_ones) return nn_ops.relu( math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
Example #2
Source File: loss_ops.py From lambda-packs with MIT License | 6 votes |
def hinge_loss(logits, labels=None, scope=None): """Method that returns the loss tensor for hinge loss. Args: logits: The logits, a float tensor. labels: The ground truth output tensor. Its shape should match the shape of logits. The values of the tensor are expected to be 0.0 or 1.0. scope: The scope for the operations performed in computing the loss. Returns: A `Tensor` of same shape as `logits` and `labels` representing the loss values across the batch. Raises: ValueError: If the shapes of `logits` and `labels` don't match. """ with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope: logits.get_shape().assert_is_compatible_with(labels.get_shape()) # We first need to convert binary labels to -1/1 labels (as floats). labels = math_ops.to_float(labels) all_ones = array_ops.ones_like(labels) labels = math_ops.subtract(2 * labels, all_ones) return nn_ops.relu( math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
Example #3
Source File: nn_ops.py From lambda-packs with MIT License | 6 votes |
def _flatten_outer_dims(logits): """Flattens logits' outer dimensions and keep its last dimension.""" rank = array_ops.rank(logits) last_dim_size = array_ops.slice( array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1]) output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0)) # Set output shape if known. shape = logits.get_shape() if shape is not None and shape.dims is not None: shape = shape.as_list() product = 1 product_valid = True for d in shape[:-1]: if d is None: product_valid = False break else: product *= d if product_valid: output_shape = [product, shape[-1]] output.set_shape(output_shape) return output
Example #4
Source File: stepper_cli_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def setUp(self): self.a = variables.Variable(10.0, name="a") self.b = variables.Variable(20.0, name="b") self.c = math_ops.add(self.a, self.b, name="c") # Should be 30.0. self.d = math_ops.subtract(self.a, self.c, name="d") # Should be -20.0. self.e = math_ops.multiply(self.c, self.d, name="e") # Should be -600.0. self.ph = array_ops.placeholder(dtypes.float32, shape=(2, 2), name="ph") self.f = math_ops.multiply(self.e, self.ph, name="f") self.opt = gradient_descent.GradientDescentOptimizer(0.1).minimize( self.e, name="opt") self.sess = session.Session() self.sess.run(self.a.initializer) self.sess.run(self.b.initializer)
Example #5
Source File: nn_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _flatten_outer_dims(logits): """Flattens logits' outer dimensions and keep its last dimension.""" rank = array_ops.rank(logits) last_dim_size = array_ops.slice( array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1]) output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0)) # Set output shape if known. shape = logits.get_shape() if shape is not None and shape.dims is not None: shape = shape.as_list() product = 1 product_valid = True for d in shape[:-1]: if d is None: product_valid = False break else: product *= d if product_valid: output_shape = [product, shape[-1]] output.set_shape(output_shape) return output
Example #6
Source File: loss_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def hinge_loss(logits, labels=None, scope=None): """Method that returns the loss tensor for hinge loss. Args: logits: The logits, a float tensor. labels: The ground truth output tensor. Its shape should match the shape of logits. The values of the tensor are expected to be 0.0 or 1.0. scope: The scope for the operations performed in computing the loss. Returns: A `Tensor` of same shape as `logits` and `labels` representing the loss values across the batch. Raises: ValueError: If the shapes of `logits` and `labels` don't match. """ with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope: logits.get_shape().assert_is_compatible_with(labels.get_shape()) # We first need to convert binary labels to -1/1 labels (as floats). labels = math_ops.to_float(labels) all_ones = array_ops.ones_like(labels) labels = math_ops.subtract(2 * labels, all_ones) return nn_ops.relu( math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
Example #7
Source File: deep_mlp_uq_model.py From deep-quant with MIT License | 6 votes |
def _huber_loss(labels, predictions, config): """ Huber loss tensor""" delta = config.huber_delta predictions = math_ops.to_float(predictions) labels = math_ops.to_float(labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) error = math_ops.subtract(predictions, labels) abs_error = math_ops.abs(error) quadratic = math_ops.minimum(abs_error, delta) # The following expression is the same in value as # tf.maximum(abs_error - delta, 0), but importantly the gradient for the # expression when abs_error == delta is 0 (for tf.maximum it would be 1). # This is necessary to avoid doubling the gradient, since there is already a # nonzero contribution to the gradient from the quadratic term. linear = math_ops.subtract(abs_error, quadratic) losses = math_ops.add( math_ops.multiply( ops.convert_to_tensor(0.5, dtype=quadratic.dtype), math_ops.multiply(quadratic, quadratic)), math_ops.multiply(delta, linear)) return losses
Example #8
Source File: deep_loglikelihood_uq_model.py From deep-quant with MIT License | 6 votes |
def _huber_loss(labels, predictions, config): """ Huber loss tensor""" delta = config.huber_delta predictions = math_ops.to_float(predictions) labels = math_ops.to_float(labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) error = math_ops.subtract(predictions, labels) abs_error = math_ops.abs(error) quadratic = math_ops.minimum(abs_error, delta) # The following expression is the same in value as # tf.maximum(abs_error - delta, 0), but importantly the gradient for the # expression when abs_error == delta is 0 (for tf.maximum it would be 1). # This is necessary to avoid doubling the gradient, since there is already a # nonzero contribution to the gradient from the quadratic term. linear = math_ops.subtract(abs_error, quadratic) losses = math_ops.add( math_ops.multiply( ops.convert_to_tensor(0.5, dtype=quadratic.dtype), math_ops.multiply(quadratic, quadratic)), math_ops.multiply(delta, linear)) return losses
Example #9
Source File: deep_rnn_model_huber_loss.py From deep-quant with MIT License | 6 votes |
def _huber_loss(labels, predictions, mask, config): """ Huber loss according to masking""" delta = config.huber_delta predictions = math_ops.to_float(predictions) labels = math_ops.to_float(labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) error = math_ops.subtract(predictions, labels) abs_error = math_ops.abs(error) quadratic = math_ops.minimum(abs_error, delta) # The following expression is the same in value as # tf.maximum(abs_error - delta, 0), but importantly the gradient for the # expression when abs_error == delta is 0 (for tf.maximum it would be 1). # This is necessary to avoid doubling the gradient, since there is already a # nonzero contribution to the gradient from the quadratic term. linear = math_ops.subtract(abs_error, quadratic) losses = math_ops.add( math_ops.multiply( ops.convert_to_tensor(0.5, dtype=quadratic.dtype), math_ops.multiply(quadratic, quadratic)), math_ops.multiply(delta, linear)) huber_loss = tf.reduce_sum(losses) / tf.reduce_sum(mask) return huber_loss
Example #10
Source File: nn_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _flatten_outer_dims(logits): """Flattens logits' outer dimensions and keep its last dimension.""" rank = array_ops.rank(logits) last_dim_size = array_ops.slice( array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1]) output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0)) # Set output shape if known. if context.in_graph_mode(): shape = logits.get_shape() if shape is not None and shape.dims is not None: shape = shape.as_list() product = 1 product_valid = True for d in shape[:-1]: if d is None: product_valid = False break else: product *= d if product_valid: output_shape = [product, shape[-1]] output.set_shape(output_shape) return output
Example #11
Source File: stepper_cli_test.py From keras-lambda with MIT License | 6 votes |
def setUp(self): self.a = variables.Variable(10.0, name="a") self.b = variables.Variable(20.0, name="b") self.c = math_ops.add(self.a, self.b, name="c") # Should be 30.0. self.d = math_ops.subtract(self.a, self.c, name="d") # Should be -20.0. self.e = math_ops.multiply(self.c, self.d, name="e") # Should be -600.0. self.ph = array_ops.placeholder(dtypes.float32, shape=(2, 2), name="ph") self.f = math_ops.multiply(self.e, self.ph, name="f") self.opt = gradient_descent.GradientDescentOptimizer(0.1).minimize( self.e, name="opt") self.sess = session.Session() self.sess.run(self.a.initializer) self.sess.run(self.b.initializer)
Example #12
Source File: nn_ops.py From keras-lambda with MIT License | 6 votes |
def _flatten_outer_dims(logits): """Flattens logits' outer dimensions and keep its last dimension.""" rank = array_ops.rank(logits) last_dim_size = array_ops.slice( array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1]) output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0)) # Set output shape if known. shape = logits.get_shape() if shape is not None and shape.dims is not None: shape = shape.as_list() product = 1 product_valid = True for d in shape[:-1]: if d is None: product_valid = False break else: product *= d if product_valid: output_shape = [product, shape[-1]] output.set_shape(output_shape) return output
Example #13
Source File: nn_impl.py From keras-lambda with MIT License | 5 votes |
def normalize_moments(counts, mean_ss, variance_ss, shift, name=None): """Calculate the mean and variance of based on the sufficient statistics. Args: counts: A `Tensor` containing a the total count of the data (one value). mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly shifted) sum of the elements to average over. variance_ss: A `Tensor` containing the variance sufficient statistics: the (possibly shifted) squared sum of the data to compute the variance over. shift: A `Tensor` containing the value by which the data is shifted for numerical stability, or `None` if no shift was performed. name: Name used to scope the operations that compute the moments. Returns: Two `Tensor` objects: `mean` and `variance`. """ with ops.name_scope(name, "normalize", [counts, mean_ss, variance_ss, shift]): divisor = math_ops.reciprocal(counts, name="divisor") if shift is not None: shifted_mean = math_ops.multiply(mean_ss, divisor, name="shifted_mean") mean = math_ops.add(shifted_mean, shift, name="mean") else: # no shift. shifted_mean = math_ops.multiply(mean_ss, divisor, name="mean") mean = shifted_mean variance = math_ops.subtract(math_ops.multiply(variance_ss, divisor), math_ops.square(shifted_mean), name="variance") return (mean, variance)
Example #14
Source File: core_test.py From keras-lambda with MIT License | 5 votes |
def setUp(self): super(CoreBinaryOpsTest, self).setUp() self.x_probs_broadcast_tensor = array_ops.reshape( self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size]) self.channel_probs_broadcast_tensor = array_ops.reshape( self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size]) # == and != are not element-wise for tf.Tensor, so they shouldn't be # elementwise for LabeledTensor, either. self.ops = [ ('add', operator.add, math_ops.add, core.add), ('sub', operator.sub, math_ops.subtract, core.sub), ('mul', operator.mul, math_ops.multiply, core.mul), ('div', operator.truediv, math_ops.div, core.div), ('mod', operator.mod, math_ops.mod, core.mod), ('pow', operator.pow, math_ops.pow, core.pow_function), ('equal', None, math_ops.equal, core.equal), ('less', operator.lt, math_ops.less, core.less), ('less_equal', operator.le, math_ops.less_equal, core.less_equal), ('not_equal', None, math_ops.not_equal, core.not_equal), ('greater', operator.gt, math_ops.greater, core.greater), ('greater_equal', operator.ge, math_ops.greater_equal, core.greater_equal), ] self.test_lt_1 = self.x_probs_lt self.test_lt_2 = self.channel_probs_lt self.test_lt_1_broadcast = self.x_probs_broadcast_tensor self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor self.broadcast_axes = [self.a0, self.a1, self.a3]
Example #15
Source File: loss_ops.py From keras-lambda with MIT License | 5 votes |
def absolute_difference(predictions, labels=None, weights=1.0, scope=None): """Adds an Absolute Difference loss to the training procedure. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size [batch_size], then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: predictions: The predicted outputs. labels: The ground truth output tensor, same dimensions as 'predictions'. weights: Coefficients for the loss a scalar, a tensor of shape [batch_size] or a tensor whose shape matches `predictions`. scope: The scope for the operations performed in computing the loss. Returns: A scalar `Tensor` representing the loss value. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. """ with ops.name_scope(scope, "absolute_difference", [predictions, labels, weights]) as scope: predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions = math_ops.to_float(predictions) labels = math_ops.to_float(labels) losses = math_ops.abs(math_ops.subtract(predictions, labels)) return compute_weighted_loss(losses, weights, scope=scope)
Example #16
Source File: loss_ops.py From keras-lambda with MIT License | 5 votes |
def mean_squared_error(predictions, labels=None, weights=1.0, scope=None): """Adds a Sum-of-Squares loss to the training procedure. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size [batch_size], then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: predictions: The predicted outputs. labels: The ground truth output tensor, same dimensions as 'predictions'. weights: Coefficients for the loss a scalar, a tensor of shape [batch_size] or a tensor whose shape matches `predictions`. scope: The scope for the operations performed in computing the loss. Returns: A scalar `Tensor` representing the loss value. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. """ with ops.name_scope(scope, "mean_squared_error", [predictions, labels, weights]) as scope: predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions = math_ops.to_float(predictions) labels = math_ops.to_float(labels) losses = math_ops.square(math_ops.subtract(predictions, labels)) return compute_weighted_loss(losses, weights, scope=scope)
Example #17
Source File: image_ops_impl.py From keras-lambda with MIT License | 5 votes |
def per_image_standardization(image): """Linearly scales `image` to have zero mean and unit norm. This op computes `(x - mean) / adjusted_stddev`, where `mean` is the average of all values in image, and `adjusted_stddev = max(stddev, 1.0/sqrt(image.NumElements()))`. `stddev` is the standard deviation of all values in `image`. It is capped away from zero to protect against division by 0 when handling uniform images. Args: image: 3-D tensor of shape `[height, width, channels]`. Returns: The standardized image with same shape as `image`. Raises: ValueError: if the shape of 'image' is incompatible with this function. """ image = ops.convert_to_tensor(image, name='image') _Check3DImage(image, require_static=False) num_pixels = math_ops.reduce_prod(array_ops.shape(image)) image = math_ops.cast(image, dtype=dtypes.float32) image_mean = math_ops.reduce_mean(image) variance = (math_ops.reduce_mean(math_ops.square(image)) - math_ops.square(image_mean)) variance = gen_nn_ops.relu(variance) stddev = math_ops.sqrt(variance) # Apply a minimum normalization that protects us against uniform images. min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, dtypes.float32)) pixel_value_scale = math_ops.maximum(stddev, min_stddev) pixel_value_offset = image_mean image = math_ops.subtract(image, pixel_value_offset) image = math_ops.div(image, pixel_value_scale) return image
Example #18
Source File: official_tf_image.py From X-Detector with Apache License 2.0 | 5 votes |
def per_image_standardization(image): """Linearly scales `image` to have zero mean and unit norm. This op computes `(x - mean) / adjusted_stddev`, where `mean` is the average of all values in image, and `adjusted_stddev = max(stddev, 1.0/sqrt(image.NumElements()))`. `stddev` is the standard deviation of all values in `image`. It is capped away from zero to protect against division by 0 when handling uniform images. Args: image: 3-D tensor of shape `[height, width, channels]`. Returns: The standardized image with same shape as `image`. Raises: ValueError: if the shape of 'image' is incompatible with this function. """ image = ops.convert_to_tensor(image, name='image') image = control_flow_ops.with_dependencies( _Check3DImage(image, require_static=False), image) num_pixels = math_ops.reduce_prod(array_ops.shape(image)) image = math_ops.cast(image, dtype=dtypes.float32) image_mean = math_ops.reduce_mean(image) variance = (math_ops.reduce_mean(math_ops.square(image)) - math_ops.square(image_mean)) variance = gen_nn_ops.relu(variance) stddev = math_ops.sqrt(variance) # Apply a minimum normalization that protects us against uniform images. min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, dtypes.float32)) pixel_value_scale = math_ops.maximum(stddev, min_stddev) pixel_value_offset = image_mean image = math_ops.subtract(image, pixel_value_offset) image = math_ops.div(image, pixel_value_scale) return image
Example #19
Source File: losses_impl.py From keras-lambda with MIT License | 5 votes |
def mean_squared_error(labels, predictions, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES): """Adds a Sum-of-Squares loss to the training procedure. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size [batch_size], then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: labels: The ground truth output tensor, same dimensions as 'predictions'. predictions: The predicted outputs. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. Returns: A scalar `Tensor` representing the loss value. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. """ with ops.name_scope(scope, "mean_squared_error", (predictions, labels, weights)) as scope: predictions = math_ops.to_float(predictions) labels = math_ops.to_float(labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) losses = math_ops.square(math_ops.subtract(predictions, labels)) return compute_weighted_loss(losses, weights, scope, loss_collection)
Example #20
Source File: losses_impl.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def hinge_loss(labels, logits, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Adds a hinge loss to the training procedure. Args: labels: The ground truth output tensor. Its shape should match the shape of logits. The values of the tensor are expected to be 0.0 or 1.0. logits: The logits, a float tensor. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shapes of `logits` and `labels` don't match or if `labels` or `logits` is None. """ if labels is None: raise ValueError("labels must not be None.") if logits is None: raise ValueError("logits must not be None.") with ops.name_scope(scope, "hinge_loss", (logits, labels, weights)) as scope: logits = math_ops.to_float(logits) labels = math_ops.to_float(labels) logits.get_shape().assert_is_compatible_with(labels.get_shape()) # We first need to convert binary labels to -1/1 labels (as floats). all_ones = array_ops.ones_like(labels) labels = math_ops.subtract(2 * labels, all_ones) losses = nn_ops.relu( math_ops.subtract(all_ones, math_ops.multiply(labels, logits))) return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction)
Example #21
Source File: image_ops_impl.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def per_image_standardization(image): """Linearly scales `image` to have zero mean and unit norm. This op computes `(x - mean) / adjusted_stddev`, where `mean` is the average of all values in image, and `adjusted_stddev = max(stddev, 1.0/sqrt(image.NumElements()))`. `stddev` is the standard deviation of all values in `image`. It is capped away from zero to protect against division by 0 when handling uniform images. Args: image: 3-D tensor of shape `[height, width, channels]`. Returns: The standardized image with same shape as `image`. Raises: ValueError: if the shape of 'image' is incompatible with this function. """ image = ops.convert_to_tensor(image, name='image') image = control_flow_ops.with_dependencies( _Check3DImage(image, require_static=False), image) num_pixels = math_ops.reduce_prod(array_ops.shape(image)) image = math_ops.cast(image, dtype=dtypes.float32) image_mean = math_ops.reduce_mean(image) variance = (math_ops.reduce_mean(math_ops.square(image)) - math_ops.square(image_mean)) variance = gen_nn_ops.relu(variance) stddev = math_ops.sqrt(variance) # Apply a minimum normalization that protects us against uniform images. min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, dtypes.float32)) pixel_value_scale = math_ops.maximum(stddev, min_stddev) pixel_value_offset = image_mean image = math_ops.subtract(image, pixel_value_offset) image = math_ops.div(image, pixel_value_scale) return image
Example #22
Source File: losses_impl.py From lambda-packs with MIT License | 5 votes |
def absolute_difference( labels, predictions, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Adds an Absolute Difference loss to the training procedure. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a `Tensor` of shape `[batch_size]`, then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: labels: The ground truth output tensor, same dimensions as 'predictions'. predictions: The predicted outputs. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which this loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. """ with ops.name_scope(scope, "absolute_difference", (predictions, labels, weights)) as scope: predictions = math_ops.to_float(predictions) labels = math_ops.to_float(labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) losses = math_ops.abs(math_ops.subtract(predictions, labels)) return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction)
Example #23
Source File: session_debug_testlib.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def testGraphPathFindingOnControlEdgesWorks(self): with session.Session(config=no_rewrite_session_config()) as sess: v1 = variables.Variable(1.0, name="v1") v2 = variables.Variable(2.0, name="v2") v3 = variables.Variable(3.0, name="v3") a = math_ops.add(v1, v2, name="a") with ops.control_dependencies([a]): c = math_ops.subtract(v3, v3, name="c") sess.run(variables.global_variables_initializer()) _, dump = self._debug_run_and_get_dump(sess, c) self.assertEqual(["v1", "v1/read", "a", "c"], dump.find_some_path("v1", "c")) self.assertIsNone(dump.find_some_path("v1", "c", include_control=False))
Example #24
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 5 votes |
def _test_sub(data, fused_activation_function=None, quantized=False, qnn_op=None): """ One iteration of subtract """ return _test_elemwise(math_ops.subtract, data, fused_activation_function, quantized, qnn_op) ####################################################################### # Mul # ---
Example #25
Source File: losses.py From DeepLabCut with GNU Lesser General Public License v3.0 | 5 votes |
def huber_loss(labels, predictions, weight=1.0, k=1.0, scope=None): """Define a huber loss https://en.wikipedia.org/wiki/Huber_loss tensor: tensor to regularize. k: value of k in the huber loss scope: Optional scope for op_scope. Huber loss: f(x) = if |x| <= k: 0.5 * x^2 else: k * |x| - 0.5 * k^2 Returns: the L1 loss op. http://concise-bio.readthedocs.io/en/latest/_modules/concise/tf_helper.html """ with ops.name_scope(scope, "absolute_difference", [predictions, labels]) as scope: predictions.get_shape().assert_is_compatible_with(labels.get_shape()) if weight is None: raise ValueError("`weight` cannot be None") predictions = math_ops.to_float(predictions) labels = math_ops.to_float(labels) diff = math_ops.subtract(predictions, labels) abs_diff = tf.abs(diff) losses = tf.where( abs_diff < k, 0.5 * tf.square(diff), k * abs_diff - 0.5 * k ** 2 ) return TF.losses.compute_weighted_loss(losses, weight)
Example #26
Source File: unicode_script_tokenizer.py From text with Apache License 2.0 | 5 votes |
def _tokenize_with_offsets_encode_decode_wrapper(self, input_tensor): """Tokenizes a tensor of UTF-8 strings with rank of 1. Args: input_tensor: The single dimensional Tensor to tokenize. Returns: Tuple of RaggedTensors of tokenized text and byte offsets, with shapes [num_strings, (num_tokens or num_offsets)]. """ # Decode the strings and get byte offsets (codepoints, byte_start_offsets) = ( ragged_string_ops.unicode_decode_with_offsets(input_tensor, "UTF-8")) byte_limit_offsets = array_ops.concat([ byte_start_offsets[:, 1:], math_ops.cast( array_ops.expand_dims(string_ops.string_length(input_tensor), 1), dtypes.int64) ], 1) # Tokenize (codepoint_tokens, codepoint_start_offsets, codepoint_limit_offsets) = ( self._tokenize_codepoints_with_offsets(codepoints)) # Encode the codepoints and translate the codepoint offsets to byte offsets. return (ragged_string_ops.unicode_encode(codepoint_tokens, "UTF-8"), array_ops.batch_gather(byte_start_offsets, codepoint_start_offsets), array_ops.batch_gather( byte_limit_offsets, math_ops.subtract(codepoint_limit_offsets, [1])))
Example #27
Source File: train_imagenet_resnet_hvd.py From sagemaker-tensorflow-training-toolkit with Apache License 2.0 | 5 votes |
def warmup_decay(warmup_lr, global_step, warmup_steps, warmup_end_lr): from tensorflow.python.ops import math_ops p = tf.cast(global_step, tf.float32) / tf.cast(warmup_steps, tf.float32) diff = math_ops.subtract(warmup_end_lr, warmup_lr) res = math_ops.add(warmup_lr, math_ops.multiply(diff, p)) return res
Example #28
Source File: losses_impl.py From lambda-packs with MIT License | 5 votes |
def hinge_loss(labels, logits, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Adds a hinge loss to the training procedure. Args: labels: The ground truth output tensor. Its shape should match the shape of logits. The values of the tensor are expected to be 0.0 or 1.0. logits: The logits, a float tensor. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shapes of `logits` and `labels` don't match. """ with ops.name_scope(scope, "hinge_loss", (logits, labels)) as scope: logits = math_ops.to_float(logits) labels = math_ops.to_float(labels) logits.get_shape().assert_is_compatible_with(labels.get_shape()) # We first need to convert binary labels to -1/1 labels (as floats). all_ones = array_ops.ones_like(labels) labels = math_ops.subtract(2 * labels, all_ones) losses = nn_ops.relu( math_ops.subtract(all_ones, math_ops.multiply(labels, logits))) return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction)
Example #29
Source File: losses_impl.py From lambda-packs with MIT License | 5 votes |
def mean_squared_error( labels, predictions, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Adds a Sum-of-Squares loss to the training procedure. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size [batch_size], then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: labels: The ground truth output tensor, same dimensions as 'predictions'. predictions: The predicted outputs. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. """ with ops.name_scope(scope, "mean_squared_error", (predictions, labels, weights)) as scope: predictions = math_ops.to_float(predictions) labels = math_ops.to_float(labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) losses = math_ops.square(math_ops.subtract(predictions, labels)) return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction)
Example #30
Source File: nn_impl.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def normalize_moments(counts, mean_ss, variance_ss, shift, name=None): """Calculate the mean and variance of based on the sufficient statistics. Args: counts: A `Tensor` containing a the total count of the data (one value). mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly shifted) sum of the elements to average over. variance_ss: A `Tensor` containing the variance sufficient statistics: the (possibly shifted) squared sum of the data to compute the variance over. shift: A `Tensor` containing the value by which the data is shifted for numerical stability, or `None` if no shift was performed. name: Name used to scope the operations that compute the moments. Returns: Two `Tensor` objects: `mean` and `variance`. """ with ops.name_scope(name, "normalize", [counts, mean_ss, variance_ss, shift]): divisor = math_ops.reciprocal(counts, name="divisor") if shift is not None: shifted_mean = math_ops.multiply(mean_ss, divisor, name="shifted_mean") mean = math_ops.add(shifted_mean, shift, name="mean") else: # no shift. shifted_mean = math_ops.multiply(mean_ss, divisor, name="mean") mean = shifted_mean variance = math_ops.subtract( math_ops.multiply(variance_ss, divisor), math_ops.square(shifted_mean), name="variance") return (mean, variance)