Python tensorflow.python.ops.math_ops.squared_difference() Examples
The following are 16
code examples of tensorflow.python.ops.math_ops.squared_difference().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.math_ops
, or try the search function
.
Example #1
Source File: core_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def setUp(self): super(FloatBinaryOpsTest, self).setUp() self.ops = [ ('igamma', None, math_ops.igamma, core.igamma), ('igammac', None, math_ops.igammac, core.igammac), ('zeta', None, math_ops.zeta, core.zeta), ('polygamma', None, math_ops.polygamma, core.polygamma), ('maximum', None, math_ops.maximum, core.maximum), ('minimum', None, math_ops.minimum, core.minimum), ('squared_difference', None, math_ops.squared_difference, core.squared_difference), ] total_size = np.prod([v.size for v in self.original_lt.axes.values()]) test_lt = core.LabeledTensor( math_ops.cast(self.original_lt, dtypes.float32) / total_size, self.original_lt.axes) self.test_lt_1 = test_lt self.test_lt_2 = 1.0 - test_lt self.test_lt_1_broadcast = self.test_lt_1.tensor self.test_lt_2_broadcast = self.test_lt_2.tensor self.broadcast_axes = self.test_lt_1.axes
Example #2
Source File: core_test.py From keras-lambda with MIT License | 6 votes |
def setUp(self): super(FloatBinaryOpsTest, self).setUp() self.ops = [ ('igamma', None, math_ops.igamma, core.igamma), ('igammac', None, math_ops.igammac, core.igammac), ('zeta', None, math_ops.zeta, core.zeta), ('polygamma', None, math_ops.polygamma, core.polygamma), ('maximum', None, math_ops.maximum, core.maximum), ('minimum', None, math_ops.minimum, core.minimum), ('squared_difference', None, math_ops.squared_difference, core.squared_difference), ] total_size = np.prod([v.size for v in self.original_lt.axes.values()]) test_lt = core.LabeledTensor( math_ops.cast(self.original_lt, dtypes.float32) / total_size, self.original_lt.axes) self.test_lt_1 = test_lt self.test_lt_2 = 1.0 - test_lt self.test_lt_1_broadcast = self.test_lt_1.tensor self.test_lt_2_broadcast = self.test_lt_2.tensor self.broadcast_axes = self.test_lt_1.axes
Example #3
Source File: loss_ops.py From tf-slim with Apache License 2.0 | 5 votes |
def mean_squared_error(predictions, labels=None, weights=1.0, scope=None): """Adds a Sum-of-Squares loss to the training procedure. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size [batch_size], then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: predictions: The predicted outputs. labels: The ground truth output tensor, same dimensions as 'predictions'. weights: Coefficients for the loss a scalar, a tensor of shape [batch_size] or a tensor whose shape matches `predictions`. scope: The scope for the operations performed in computing the loss. Returns: A scalar `Tensor` representing the loss value. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. """ with ops.name_scope(scope, "mean_squared_error", [predictions, labels, weights]) as scope: predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions = math_ops.cast(predictions, dtypes.float32) labels = math_ops.cast(labels, dtypes.float32) losses = math_ops.squared_difference(predictions, labels) return compute_weighted_loss(losses, weights, scope=scope)
Example #4
Source File: metric_learning.py From tf-slim with Apache License 2.0 | 5 votes |
def contrastive_loss(labels, embeddings_anchor, embeddings_positive, margin=1.0): """Computes the contrastive loss. This loss encourages the embedding to be close to each other for the samples of the same label and the embedding to be far apart at least by the margin constant for the samples of different labels. See: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf Args: labels: 1-D tf.int32 `Tensor` with shape [batch_size] of binary labels indicating positive vs negative pair. embeddings_anchor: 2-D float `Tensor` of embedding vectors for the anchor images. Embeddings should be l2 normalized. embeddings_positive: 2-D float `Tensor` of embedding vectors for the positive images. Embeddings should be l2 normalized. margin: margin term in the loss definition. Returns: contrastive_loss: tf.float32 scalar. """ # Get per pair distances distances = math_ops.sqrt( math_ops.reduce_sum( math_ops.squared_difference(embeddings_anchor, embeddings_positive), 1)) # Add contrastive loss for the siamese network. # label here is {0,1} for neg, pos. return math_ops.reduce_mean( math_ops.cast(labels, distances.dtype) * math_ops.square(distances) + (1. - math_ops.cast(labels, distances.dtype)) * math_ops.square(math_ops.maximum(margin - distances, 0.)), name='contrastive_loss')
Example #5
Source File: math_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testSquaredDifference(self): for dtype in [np.int32, np.float16]: x = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) y = np.array([-3, -2, -1], dtype=dtype) z = (x - y)*(x - y) with self.test_session(use_gpu=True): z_tf = math_ops.squared_difference(x, y).eval() self.assertAllClose(z, z_tf)
Example #6
Source File: deep_rnn_model.py From deep-quant with MIT License | 5 votes |
def _mean_squared_error(self, targets, outputs, mask): loss = math_ops.squared_difference(targets, outputs) # TODO: Make the below safe to div by zero mse = tf.reduce_sum( loss ) / tf.reduce_sum( mask ) return mse
Example #7
Source File: deep_rnn_model_huber_loss.py From deep-quant with MIT License | 5 votes |
def _mean_squared_error(targets, outputs, mask): loss = math_ops.squared_difference(targets, outputs) # TODO: Make the below safe to div by zero mse = tf.reduce_sum(loss) / tf.reduce_sum(mask) return mse
Example #8
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 5 votes |
def _test_squared_difference(data): """ One iteration of squared difference """ return _test_elemwise(math_ops.squared_difference, data) ####################################################################### # Floor_divide # ------------
Example #9
Source File: nn_impl.py From lambda-packs with MIT License | 4 votes |
def sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None): """Calculate the sufficient statistics for the mean and variance of `x`. These sufficient statistics are computed using the one pass algorithm on an input that's optionally shifted. See: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data Args: x: A `Tensor`. axes: Array of ints. Axes along which to compute mean and variance. shift: A `Tensor` containing the value by which to shift the data for numerical stability, or `None` if no shift is to be performed. A shift close to the true mean provides the most numerically stable results. keep_dims: produce statistics with the same dimensionality as the input. name: Name used to scope the operations that compute the sufficient stats. Returns: Four `Tensor` objects of the same type as `x`: * the count (number of elements to average over). * the (possibly shifted) sum of the elements in the array. * the (possibly shifted) sum of squares of the elements in the array. * the shift by which the mean must be corrected or None if `shift` is None. """ axes = list(set(axes)) with ops.name_scope(name, "sufficient_statistics", [x, shift]): x = ops.convert_to_tensor(x, name="x") x_shape = x.get_shape() if all(x_shape[d].value is not None for d in axes): counts = 1 for d in axes: counts *= x_shape[d].value counts = constant_op.constant(counts, dtype=x.dtype) else: # shape needs to be inferred at runtime. x_dims = array_ops.gather( math_ops.cast(array_ops.shape(x), x.dtype), axes) counts = math_ops.reduce_prod(x_dims, name="count") if shift is not None: shift = ops.convert_to_tensor(shift, name="shift") m_ss = math_ops.subtract(x, shift) v_ss = math_ops.squared_difference(x, shift) else: # no shift. m_ss = x v_ss = math_ops.square(x) m_ss = math_ops.reduce_sum(m_ss, axes, keep_dims=keep_dims, name="mean_ss") v_ss = math_ops.reduce_sum(v_ss, axes, keep_dims=keep_dims, name="var_ss") return counts, m_ss, v_ss, shift
Example #10
Source File: nn_impl.py From lambda-packs with MIT License | 4 votes |
def moments(x, axes, shift=None, name=None, keep_dims=False): """Calculate the mean and variance of `x`. The mean and variance are calculated by aggregating the contents of `x` across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean and variance of a vector. Note: for numerical stability, when shift=None, the true mean would be computed and used as shift. When using these moments for batch normalization (see `tf.nn.batch_normalization`): * for so-called "global normalization", used with convolutional filters with shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`. * for simple batch normalization pass `axes=[0]` (batch only). Args: x: A `Tensor`. axes: Array of ints. Axes along which to compute mean and variance. shift: A `Tensor` containing the value by which to shift the data for numerical stability, or `None` in which case the true mean of the data is used as shift. A shift close to the true mean provides the most numerically stable results. name: Name used to scope the operations that compute the moments. keep_dims: produce moments with the same dimensionality as the input. Returns: Two `Tensor` objects: `mean` and `variance`. """ with ops.name_scope(name, "moments", [x, axes, shift]): # The dynamic range of fp16 is too limited to support the collection of # sufficient statistics. As a workaround we simply perform the operations # on 32-bit floats before converting the mean and variance back to fp16 y = math_ops.cast(x, dtypes.float32) if x.dtype == dtypes.float16 else x if shift is None: # Compute true mean while keeping the dims for proper broadcasting. shift = array_ops.stop_gradient( math_ops.reduce_mean(y, axes, keep_dims=True)) else: shift = math_ops.cast(shift, y.dtype) shifted_mean = math_ops.reduce_mean( math_ops.subtract(y, shift), axes, keep_dims=True, name="shifted_mean") variance = math_ops.subtract( math_ops.reduce_mean( math_ops.squared_difference(y, shift), axes, keep_dims=True), math_ops.square(shifted_mean), name="variance") mean = math_ops.add(shifted_mean, shift, name="mean") if not keep_dims: mean = array_ops.squeeze(mean, axes) variance = array_ops.squeeze(variance, axes) if x.dtype == dtypes.float16: return (math_ops.cast(mean, dtypes.float16), math_ops.cast( variance, dtypes.float16)) else: return (mean, variance)
Example #11
Source File: nn_impl.py From auto-alt-text-lambda-api with MIT License | 4 votes |
def sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None): """Calculate the sufficient statistics for the mean and variance of `x`. These sufficient statistics are computed using the one pass algorithm on an input that's optionally shifted. See: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data Args: x: A `Tensor`. axes: Array of ints. Axes along which to compute mean and variance. shift: A `Tensor` containing the value by which to shift the data for numerical stability, or `None` if no shift is to be performed. A shift close to the true mean provides the most numerically stable results. keep_dims: produce statistics with the same dimensionality as the input. name: Name used to scope the operations that compute the sufficient stats. Returns: Four `Tensor` objects of the same type as `x`: * the count (number of elements to average over). * the (possibly shifted) sum of the elements in the array. * the (possibly shifted) sum of squares of the elements in the array. * the shift by which the mean must be corrected or None if `shift` is None. """ axes = list(set(axes)) with ops.name_scope(name, "sufficient_statistics", [x, shift]): x = ops.convert_to_tensor(x, name="x") x_shape = x.get_shape() if all(x_shape[d].value is not None for d in axes): counts = 1 for d in axes: counts *= x_shape[d].value counts = constant_op.constant(counts, dtype=x.dtype) else: # shape needs to be inferred at runtime. x_dims = array_ops.gather( math_ops.cast(array_ops.shape(x), x.dtype), axes) counts = math_ops.reduce_prod(x_dims, name="count") if shift is not None: shift = ops.convert_to_tensor(shift, name="shift") m_ss = math_ops.subtract(x, shift) v_ss = math_ops.squared_difference(x, shift) else: # no shift. m_ss = x v_ss = math_ops.square(x) m_ss = math_ops.reduce_sum(m_ss, axes, keep_dims=keep_dims, name="mean_ss") v_ss = math_ops.reduce_sum(v_ss, axes, keep_dims=keep_dims, name="var_ss") return counts, m_ss, v_ss, shift
Example #12
Source File: nn.py From deep_image_model with Apache License 2.0 | 4 votes |
def sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None): """Calculate the sufficient statistics for the mean and variance of `x`. These sufficient statistics are computed using the one pass algorithm on an input that's optionally shifted. See: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data Args: x: A `Tensor`. axes: Array of ints. Axes along which to compute mean and variance. shift: A `Tensor` containing the value by which to shift the data for numerical stability, or `None` if no shift is to be performed. A shift close to the true mean provides the most numerically stable results. keep_dims: produce statistics with the same dimensionality as the input. name: Name used to scope the operations that compute the sufficient stats. Returns: Four `Tensor` objects of the same type as `x`: * the count (number of elements to average over). * the (possibly shifted) sum of the elements in the array. * the (possibly shifted) sum of squares of the elements in the array. * the shift by which the mean must be corrected or None if `shift` is None. """ axes = list(set(axes)) with ops.name_scope(name, "sufficient_statistics", [x, shift]): x = ops.convert_to_tensor(x, name="x") x_shape = x.get_shape() if x_shape.is_fully_defined(): counts = 1 for d in axes: counts *= x_shape[d].value counts = constant_op.constant(counts, dtype=x.dtype) else: # shape needs to be inferred at runtime. x_dims = array_ops.gather(array_ops.shape(x), axes) counts = math_ops.cast( math_ops.reduce_prod(x_dims), x.dtype, name="count") if shift is not None: shift = ops.convert_to_tensor(shift, name="shift") m_ss = math_ops.sub(x, shift) v_ss = math_ops.squared_difference(x, shift) else: # no shift. m_ss = x v_ss = math_ops.square(x) m_ss = math_ops.reduce_sum(m_ss, axes, keep_dims=keep_dims, name="mean_ss") v_ss = math_ops.reduce_sum(v_ss, axes, keep_dims=keep_dims, name="var_ss") return counts, m_ss, v_ss, shift
Example #13
Source File: losses_impl.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 4 votes |
def mean_squared_error( labels, predictions, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Adds a Sum-of-Squares loss to the training procedure. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size [batch_size], then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: labels: The ground truth output tensor, same dimensions as 'predictions'. predictions: The predicted outputs. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. Also if `labels` or `predictions` is None. """ if labels is None: raise ValueError("labels must not be None.") if predictions is None: raise ValueError("predictions must not be None.") with ops.name_scope(scope, "mean_squared_error", (predictions, labels, weights)) as scope: predictions = math_ops.to_float(predictions) labels = math_ops.to_float(labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) losses = math_ops.squared_difference(predictions, labels) return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction)
Example #14
Source File: nn_impl.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 4 votes |
def sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None): """Calculate the sufficient statistics for the mean and variance of `x`. These sufficient statistics are computed using the one pass algorithm on an input that's optionally shifted. See: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data Args: x: A `Tensor`. axes: Array of ints. Axes along which to compute mean and variance. shift: A `Tensor` containing the value by which to shift the data for numerical stability, or `None` if no shift is to be performed. A shift close to the true mean provides the most numerically stable results. keep_dims: produce statistics with the same dimensionality as the input. name: Name used to scope the operations that compute the sufficient stats. Returns: Four `Tensor` objects of the same type as `x`: * the count (number of elements to average over). * the (possibly shifted) sum of the elements in the array. * the (possibly shifted) sum of squares of the elements in the array. * the shift by which the mean must be corrected or None if `shift` is None. """ axes = list(set(axes)) with ops.name_scope(name, "sufficient_statistics", [x, shift]): x = ops.convert_to_tensor(x, name="x") x_shape = x.get_shape() if all(x_shape[d].value is not None for d in axes): counts = 1 for d in axes: counts *= x_shape[d].value counts = constant_op.constant(counts, dtype=x.dtype) else: # shape needs to be inferred at runtime. x_dims = array_ops.gather( math_ops.cast(array_ops.shape(x), x.dtype), axes) counts = math_ops.reduce_prod(x_dims, name="count") if shift is not None: shift = ops.convert_to_tensor(shift, name="shift") m_ss = math_ops.subtract(x, shift) v_ss = math_ops.squared_difference(x, shift) else: # no shift. m_ss = x v_ss = math_ops.square(x) m_ss = math_ops.reduce_sum(m_ss, axes, keep_dims=keep_dims, name="mean_ss") v_ss = math_ops.reduce_sum(v_ss, axes, keep_dims=keep_dims, name="var_ss") return counts, m_ss, v_ss, shift
Example #15
Source File: nn_impl.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 4 votes |
def moments(x, axes, shift=None, # pylint: disable=unused-argument name=None, keep_dims=False): """Calculate the mean and variance of `x`. The mean and variance are calculated by aggregating the contents of `x` across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean and variance of a vector. Note: shift is currently not used, the true mean is computed and used. When using these moments for batch normalization (see `tf.nn.batch_normalization`): * for so-called "global normalization", used with convolutional filters with shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`. * for simple batch normalization pass `axes=[0]` (batch only). Args: x: A `Tensor`. axes: Array of ints. Axes along which to compute mean and variance. shift: Not used in the current implementation name: Name used to scope the operations that compute the moments. keep_dims: produce moments with the same dimensionality as the input. Returns: Two `Tensor` objects: `mean` and `variance`. """ with ops.name_scope(name, "moments", [x, axes]): # The dynamic range of fp16 is too limited to support the collection of # sufficient statistics. As a workaround we simply perform the operations # on 32-bit floats before converting the mean and variance back to fp16 y = math_ops.cast(x, dtypes.float32) if x.dtype == dtypes.float16 else x # Compute true mean while keeping the dims for proper broadcasting. mean = math_ops.reduce_mean(y, axes, keep_dims=True, name="mean") # sample variance, not unbiased variance variance = math_ops.reduce_mean( math_ops.squared_difference(y, array_ops.stop_gradient(mean)), axes, keep_dims=True, name="variance") if not keep_dims: mean = array_ops.squeeze(mean, axes) variance = array_ops.squeeze(variance, axes) if x.dtype == dtypes.float16: return (math_ops.cast(mean, dtypes.float16), math_ops.cast( variance, dtypes.float16)) else: return (mean, variance)
Example #16
Source File: nn_impl.py From keras-lambda with MIT License | 4 votes |
def sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None): """Calculate the sufficient statistics for the mean and variance of `x`. These sufficient statistics are computed using the one pass algorithm on an input that's optionally shifted. See: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data Args: x: A `Tensor`. axes: Array of ints. Axes along which to compute mean and variance. shift: A `Tensor` containing the value by which to shift the data for numerical stability, or `None` if no shift is to be performed. A shift close to the true mean provides the most numerically stable results. keep_dims: produce statistics with the same dimensionality as the input. name: Name used to scope the operations that compute the sufficient stats. Returns: Four `Tensor` objects of the same type as `x`: * the count (number of elements to average over). * the (possibly shifted) sum of the elements in the array. * the (possibly shifted) sum of squares of the elements in the array. * the shift by which the mean must be corrected or None if `shift` is None. """ axes = list(set(axes)) with ops.name_scope(name, "sufficient_statistics", [x, shift]): x = ops.convert_to_tensor(x, name="x") x_shape = x.get_shape() if all(x_shape[d].value is not None for d in axes): counts = 1 for d in axes: counts *= x_shape[d].value counts = constant_op.constant(counts, dtype=x.dtype) else: # shape needs to be inferred at runtime. x_dims = array_ops.gather( math_ops.cast(array_ops.shape(x), x.dtype), axes) counts = math_ops.reduce_prod(x_dims, name="count") if shift is not None: shift = ops.convert_to_tensor(shift, name="shift") m_ss = math_ops.subtract(x, shift) v_ss = math_ops.squared_difference(x, shift) else: # no shift. m_ss = x v_ss = math_ops.square(x) m_ss = math_ops.reduce_sum(m_ss, axes, keep_dims=keep_dims, name="mean_ss") v_ss = math_ops.reduce_sum(v_ss, axes, keep_dims=keep_dims, name="var_ss") return counts, m_ss, v_ss, shift