Python tensorflow.python.ops.math_ops.multiply() Examples
The following are 30
code examples of tensorflow.python.ops.math_ops.multiply().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.math_ops
, or try the search function
.
Example #1
Source File: loss_ops.py From lambda-packs with MIT License | 6 votes |
def hinge_loss(logits, labels=None, scope=None): """Method that returns the loss tensor for hinge loss. Args: logits: The logits, a float tensor. labels: The ground truth output tensor. Its shape should match the shape of logits. The values of the tensor are expected to be 0.0 or 1.0. scope: The scope for the operations performed in computing the loss. Returns: A `Tensor` of same shape as `logits` and `labels` representing the loss values across the batch. Raises: ValueError: If the shapes of `logits` and `labels` don't match. """ with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope: logits.get_shape().assert_is_compatible_with(labels.get_shape()) # We first need to convert binary labels to -1/1 labels (as floats). labels = math_ops.to_float(labels) all_ones = array_ops.ones_like(labels) labels = math_ops.subtract(2 * labels, all_ones) return nn_ops.relu( math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
Example #2
Source File: stepper_cli_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def setUp(self): self.a = variables.Variable(10.0, name="a") self.b = variables.Variable(20.0, name="b") self.c = math_ops.add(self.a, self.b, name="c") # Should be 30.0. self.d = math_ops.subtract(self.a, self.c, name="d") # Should be -20.0. self.e = math_ops.multiply(self.c, self.d, name="e") # Should be -600.0. self.ph = array_ops.placeholder(dtypes.float32, shape=(2, 2), name="ph") self.f = math_ops.multiply(self.e, self.ph, name="f") self.opt = gradient_descent.GradientDescentOptimizer(0.1).minimize( self.e, name="opt") self.sess = session.Session() self.sess.run(self.a.initializer) self.sess.run(self.b.initializer)
Example #3
Source File: loss_ops.py From lambda-packs with MIT License | 6 votes |
def _scale_losses(losses, weights): """Computes the scaled loss. Args: losses: A `Tensor` of size [batch_size, d1, ... dN]. weights: A `Tensor` of size [1], [batch_size] or [batch_size, d1, ... dN]. The `losses` are reduced (tf.reduce_sum) until its dimension matches that of `weights` at which point the reduced `losses` are element-wise multiplied by `weights` and a final reduce_sum is computed on the result. Conceptually, this operation is equivalent to broadcasting (tiling) `weights` to be the same size as `losses`, performing an element-wise multiplication, and summing the result. Returns: A scalar tf.float32 `Tensor` whose value represents the sum of the scaled `losses`. """ # First, compute the sum of the losses over all elements: start_index = max(0, weights.get_shape().ndims) reduction_indices = list(range(start_index, losses.get_shape().ndims)) reduced_losses = math_ops.reduce_sum(losses, reduction_indices=reduction_indices) reduced_losses = math_ops.multiply(reduced_losses, weights) return math_ops.reduce_sum(reduced_losses)
Example #4
Source File: stepper_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def setUp(self): self.a = variables.Variable(2.0, name="a") self.b = variables.Variable(3.0, name="b") self.c = math_ops.multiply(self.a, self.b, name="c") # Should be 6.0. self.d = math_ops.multiply(self.a, self.a, name="d") # Should be 4.0. self.e = math_ops.multiply(self.d, self.c, name="e") # Should be 24.0. self.f_y = constant_op.constant(0.30, name="f_y") self.f = math_ops.div(self.b, self.f_y, name="f") # Should be 10.0. # The there nodes x, y and z form a graph with "cross-links" in. I.e., x # and y are both direct inputs to z, but x is also a direct input to y. self.x = variables.Variable(2.0, name="x") # Should be 2.0 self.y = math_ops.negative(self.x, name="y") # Should be -2.0. self.z = math_ops.multiply(self.x, self.y, name="z") # Should be -4.0. self.sess = session.Session() self.sess.run(variables.global_variables_initializer()) self.sess = session.Session() self.sess.run(variables.global_variables_initializer())
Example #5
Source File: losses_impl.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _scale_losses(losses, weights): """Computes the scaled loss. Args: losses: `Tensor` of shape `[batch_size, d1, ... dN]`. weights: `Tensor` of shape `[]`, `[batch_size]` or `[batch_size, d1, ... dN]`. The `losses` are reduced (`tf.reduce_sum`) until its dimension matches that of `weights` at which point the reduced `losses` are element-wise multiplied by `weights` and a final `reduce_sum` is computed on the result. Conceptually, this operation is similar to broadcasting (tiling) `weights` to be the same shape as `losses`, performing an element-wise multiplication, and summing the result. Note, however, that the dimension matching is right-to-left, not left-to-right; i.e., the opposite of standard NumPy/Tensorflow broadcasting. Returns: A scalar tf.float32 `Tensor` whose value represents the sum of the scaled `losses`. """ weighted_losses = math_ops.multiply(losses, weights) return math_ops.reduce_sum(weighted_losses)
Example #6
Source File: sdca_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _linear_predictions(self, examples): """Returns predictions of the form w*x.""" with name_scope('sdca/prediction'): sparse_variables = self._convert_n_to_tensor(self._variables[ 'sparse_features_weights']) result = 0.0 for sfc, sv in zip(examples['sparse_features'], sparse_variables): # TODO(sibyl-Aix6ihai): following does not take care of missing features. result += math_ops.segment_sum( math_ops.multiply( array_ops.gather(sv, sfc.feature_indices), sfc.feature_values), sfc.example_indices) dense_features = self._convert_n_to_tensor(examples['dense_features']) dense_variables = self._convert_n_to_tensor(self._variables[ 'dense_features_weights']) for i in range(len(dense_variables)): result += math_ops.matmul(dense_features[i], array_ops.expand_dims(dense_variables[i], -1)) # Reshaping to allow shape inference at graph construction time. return array_ops.reshape(result, [-1])
Example #7
Source File: loss_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _scale_losses(losses, weights): """Computes the scaled loss. Args: losses: A `Tensor` of size [batch_size, d1, ... dN]. weights: A `Tensor` of size [1], [batch_size] or [batch_size, d1, ... dN]. The `losses` are reduced (tf.reduce_sum) until its dimension matches that of `weights` at which point the reduced `losses` are element-wise multiplied by `weights` and a final reduce_sum is computed on the result. Conceptually, this operation is equivalent to broadcasting (tiling) `weights` to be the same size as `losses`, performing an element-wise multiplication, and summing the result. Returns: A scalar tf.float32 `Tensor` whose value represents the sum of the scaled `losses`. """ # First, compute the sum of the losses over all elements: start_index = max(0, weights.get_shape().ndims) reduction_indices = list(range(start_index, losses.get_shape().ndims)) reduced_losses = math_ops.reduce_sum(losses, reduction_indices=reduction_indices) reduced_losses = math_ops.multiply(reduced_losses, weights) return math_ops.reduce_sum(reduced_losses)
Example #8
Source File: ind_rnn_cell.py From indrnn with Apache License 2.0 | 6 votes |
def call(self, inputs, state): """Run one time step of the IndRNN. Calculates the output and new hidden state using the IndRNN equation `output = new_state = act(W * input + u (*) state + b)` where `*` is the matrix multiplication and `(*)` is the Hadamard product. Args: inputs: Tensor, 2-D tensor of shape `[batch, num_units]`. state: Tensor, 2-D tensor of shape `[batch, num_units]` containing the previous hidden state. Returns: A tuple containing the output and new hidden state. Both are the same 2-D tensor of shape `[batch, num_units]`. """ gate_inputs = math_ops.matmul(inputs, self._input_kernel) recurrent_update = math_ops.multiply(state, self._recurrent_kernel) gate_inputs = math_ops.add(gate_inputs, recurrent_update) gate_inputs = nn_ops.bias_add(gate_inputs, self._bias) output = self._activation(gate_inputs) return output, output
Example #9
Source File: metric_learning.py From tf-slim with Apache License 2.0 | 6 votes |
def masked_maximum(data, mask, dim=1): """Computes the axis wise maximum over chosen elements. Args: data: 2-D float `Tensor` of size [n, m]. mask: 2-D Boolean `Tensor` of size [n, m]. dim: The dimension over which to compute the maximum. Returns: masked_maximums: N-D `Tensor`. The maximized dimension is of size 1 after the operation. """ axis_minimums = math_ops.reduce_min(data, dim, keepdims=True) masked_maximums = math_ops.reduce_max( math_ops.multiply(data - axis_minimums, mask), dim, keepdims=True) + axis_minimums return masked_maximums
Example #10
Source File: loss_ops.py From tf-slim with Apache License 2.0 | 6 votes |
def _scale_losses(losses, weights): """Computes the scaled loss. Args: losses: A `Tensor` of size [batch_size, d1, ... dN]. weights: A `Tensor` of size [1], [batch_size] or [batch_size, d1, ... dN]. The `losses` are reduced (tf.reduce_sum) until its dimension matches that of `weights` at which point the reduced `losses` are element-wise multiplied by `weights` and a final reduce_sum is computed on the result. Conceptually, this operation is equivalent to broadcasting (tiling) `weights` to be the same size as `losses`, performing an element-wise multiplication, and summing the result. Returns: A scalar tf.float32 `Tensor` whose value represents the sum of the scaled `losses`. """ # First, compute the sum of the losses over all elements: start_index = max(0, weights.get_shape().ndims) axis = list(range(start_index, losses.get_shape().ndims)) reduced_losses = math_ops.reduce_sum(losses, axis=axis) reduced_losses = math_ops.multiply(reduced_losses, weights) return math_ops.reduce_sum(reduced_losses)
Example #11
Source File: head.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _weighted_loss(loss, weight): """Returns cumulative weighted loss as 1d `Tensor`.""" with ops.name_scope(None, "weighted_loss", (loss, weight)) as name: return math_ops.multiply( array_ops.reshape( loss, shape=(-1,)), array_ops.reshape( weight, shape=(-1,)), name=name)
Example #12
Source File: classification.py From tf-slim with Apache License 2.0 | 5 votes |
def accuracy(predictions, labels, weights=None, name=None): """Computes the percentage of times that predictions matches labels. Args: predictions: the predicted values, a `Tensor` whose dtype and shape matches 'labels'. labels: the ground truth values, a `Tensor` of any shape and bool, integer, or string dtype. weights: None or `Tensor` of float values to reweight the accuracy. name: A name for the operation (optional). Returns: Accuracy `Tensor`. Raises: ValueError: if dtypes don't match or if dtype is not bool, integer, or string. """ if not (labels.dtype.is_integer or labels.dtype in (dtypes.bool, dtypes.string)): raise ValueError( 'Labels should have bool, integer, or string dtype, not %r' % labels.dtype) if not labels.dtype.is_compatible_with(predictions.dtype): raise ValueError('Dtypes of predictions and labels should match. ' 'Given: predictions (%r) and labels (%r)' % (predictions.dtype, labels.dtype)) with ops.name_scope(name, 'accuracy', values=[predictions, labels]): is_correct = math_ops.cast( math_ops.equal(predictions, labels), dtypes.float32) if weights is not None: is_correct = math_ops.multiply(is_correct, weights) num_values = math_ops.multiply(weights, array_ops.ones_like(is_correct)) return math_ops.div(math_ops.reduce_sum(is_correct), math_ops.reduce_sum(num_values)) return math_ops.reduce_mean(is_correct)
Example #13
Source File: loss_ops.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def cosine_distance( predictions, labels=None, dim=None, weights=1.0, scope=None): """Adds a cosine-distance loss to the training procedure. Note that the function assumes that `predictions` and `labels` are already unit-normalized. Args: predictions: An arbitrary matrix. labels: A `Tensor` whose shape matches 'predictions' dim: The dimension along which the cosine distance is computed. weights: Coefficients for the loss a scalar, a tensor of shape [batch_size] or a tensor whose shape matches `predictions`. scope: The scope for the operations performed in computing the loss. Returns: A scalar `Tensor` representing the loss value. Raises: ValueError: If `predictions` shape doesn't match `labels` shape, or `weights` is `None`. """ if dim is None: raise ValueError("`dim` cannot be None.") with ops.name_scope(scope, "cosine_distance_loss", [predictions, labels, weights]) as scope: predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions = math_ops.to_float(predictions) labels = math_ops.to_float(labels) radial_diffs = math_ops.multiply(predictions, labels) losses = 1 - math_ops.reduce_sum(radial_diffs, reduction_indices=[dim,]) return compute_weighted_loss(losses, weights, scope=scope)
Example #14
Source File: loss_ops.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def log_loss(predictions, labels=None, weights=1.0, epsilon=1e-7, scope=None): """Adds a Log Loss term to the training procedure. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size [batch_size], then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: predictions: The predicted outputs. labels: The ground truth output tensor, same dimensions as 'predictions'. weights: Coefficients for the loss a scalar, a tensor of shape [batch_size] or a tensor whose shape matches `predictions`. epsilon: A small increment to add to avoid taking a log of zero. scope: The scope for the operations performed in computing the loss. Returns: A scalar `Tensor` representing the loss value. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. """ with ops.name_scope(scope, "log_loss", [predictions, labels, weights]) as scope: predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions = math_ops.to_float(predictions) labels = math_ops.to_float(labels) losses = -math_ops.multiply( labels, math_ops.log(predictions + epsilon)) - math_ops.multiply( (1 - labels), math_ops.log(1 - predictions + epsilon)) return compute_weighted_loss(losses, weights, scope=scope)
Example #15
Source File: evaluation_test.py From tf-slim with Apache License 2.0 | 5 votes |
def TestModel(inputs): scale = variables.Variable(1.0, trainable=False) # Scaling the outputs wont change the result... outputs = math_ops.multiply(inputs, scale) return math_ops.argmax(outputs, 1), scale
Example #16
Source File: core_test.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def setUp(self): super(CoreBinaryOpsTest, self).setUp() self.x_probs_broadcast_tensor = array_ops.reshape( self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size]) self.channel_probs_broadcast_tensor = array_ops.reshape( self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size]) # == and != are not element-wise for tf.Tensor, so they shouldn't be # elementwise for LabeledTensor, either. self.ops = [ ('add', operator.add, math_ops.add, core.add), ('sub', operator.sub, math_ops.subtract, core.sub), ('mul', operator.mul, math_ops.multiply, core.mul), ('div', operator.truediv, math_ops.div, core.div), ('mod', operator.mod, math_ops.mod, core.mod), ('pow', operator.pow, math_ops.pow, core.pow_function), ('equal', None, math_ops.equal, core.equal), ('less', operator.lt, math_ops.less, core.less), ('less_equal', operator.le, math_ops.less_equal, core.less_equal), ('not_equal', None, math_ops.not_equal, core.not_equal), ('greater', operator.gt, math_ops.greater, core.greater), ('greater_equal', operator.ge, math_ops.greater_equal, core.greater_equal), ] self.test_lt_1 = self.x_probs_lt self.test_lt_2 = self.channel_probs_lt self.test_lt_1_broadcast = self.x_probs_broadcast_tensor self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor self.broadcast_axes = [self.a0, self.a1, self.a3]
Example #17
Source File: target_column.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _weighted_loss(self, loss, weight_tensor): """Returns cumulative weighted loss.""" unweighted_loss = array_ops.reshape(loss, shape=(-1,)) weighted_loss = math_ops.multiply(unweighted_loss, array_ops.reshape( weight_tensor, shape=(-1,))) return weighted_loss
Example #18
Source File: losses_ops.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def softmax_classifier(tensor_in, labels, weights, biases, class_weight=None, name=None): """Returns prediction and loss for softmax classifier. This function returns "probabilities" and a cross entropy loss. To obtain predictions, use `tf.argmax` on the returned probabilities. This function requires labels to be passed in one-hot encoding. Args: tensor_in: Input tensor, [batch_size, feature_size], features. labels: Tensor, [batch_size, n_classes], one-hot labels of the output classes. weights: Tensor, [batch_size, feature_size], linear transformation matrix. biases: Tensor, [batch_size], biases. class_weight: Tensor, optional, [n_classes], weight for each class. If not given, all classes are supposed to have weight one. name: Operation name. Returns: `tuple` of softmax predictions and loss `Tensor`s. """ with ops.name_scope(name, 'softmax_classifier', [tensor_in, labels]): logits = nn.xw_plus_b(tensor_in, weights, biases) if class_weight is not None: logits = math_ops.multiply(logits, class_weight) return nn.softmax(logits), loss_ops.softmax_cross_entropy(logits, labels)
Example #19
Source File: metric_ops.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _broadcast_weights(weights, values): """Broadcast `weights` to the same shape as `values`. This returns a version of `weights` following the same broadcast rules as `mul(weights, values)`. When computing a weighted average, use this function to broadcast `weights` before summing them; e.g., `reduce_sum(w * v) / reduce_sum(_broadcast_weights(w, v))`. Args: weights: `Tensor` whose rank is either 0, or the same rank as `values`, and must be broadcastable to `values` (i.e., all dimensions must be either `1`, or the same as the corresponding `values` dimension). values: `Tensor` of any shape. Returns: `weights` broadcast to `values` shape. """ with ops.name_scope(None, 'broadcast_weights', (values, weights)) as scope: weights_shape = weights.get_shape() values_shape = values.get_shape() if (weights_shape.is_fully_defined() and values_shape.is_fully_defined() and weights_shape.is_compatible_with(values_shape)): return weights with ops.control_dependencies((_assert_weights_rank(weights, values),)): return math_ops.multiply( weights, array_ops.ones_like(values), name=scope)
Example #20
Source File: layers.py From tf-slim with Apache License 2.0 | 5 votes |
def scale_gradient(inputs, gradient_multiplier): """Identity operation, but with the gradient multiplied by a tensor. The TensorFlow gradient system will compute the gradient with respect to `inputs` as the product of the gradient with respect to the `output` multiplied by a specified `gradient_multiplier` tensor. If `gradient_multiplier` is equal to 1, then this results in the true gradient. Otherwise, it results in a scaled gradient. This can be useful for adjusting the relative learning rate of different parameter tensors when performing gradient descent, and because this rescaling can be inserted at arbitrary locations within a graph, is often more convenient to apply than simply rescaling the final computed gradients. Args: inputs: Tensor to be output. gradient_multiplier: Tensor by which to multiply the gradient with respect to `output` to compute the gradient with respect to `inputs`. Its shape must be broadcastable to the shape of `inputs`. Returns: output Tensor, equal to `inputs`. """ def grad(dy): return [dy * gradient_multiplier, None] return inputs, grad
Example #21
Source File: nn_impl.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def normalize_moments(counts, mean_ss, variance_ss, shift, name=None): """Calculate the mean and variance of based on the sufficient statistics. Args: counts: A `Tensor` containing a the total count of the data (one value). mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly shifted) sum of the elements to average over. variance_ss: A `Tensor` containing the variance sufficient statistics: the (possibly shifted) squared sum of the data to compute the variance over. shift: A `Tensor` containing the value by which the data is shifted for numerical stability, or `None` if no shift was performed. name: Name used to scope the operations that compute the moments. Returns: Two `Tensor` objects: `mean` and `variance`. """ with ops.name_scope(name, "normalize", [counts, mean_ss, variance_ss, shift]): divisor = math_ops.reciprocal(counts, name="divisor") if shift is not None: shifted_mean = math_ops.multiply(mean_ss, divisor, name="shifted_mean") mean = math_ops.add(shifted_mean, shift, name="mean") else: # no shift. shifted_mean = math_ops.multiply(mean_ss, divisor, name="mean") mean = shifted_mean variance = math_ops.subtract(math_ops.multiply(variance_ss, divisor), math_ops.square(shifted_mean), name="variance") return (mean, variance)
Example #22
Source File: losses_impl.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def log_loss(labels, predictions, weights=1.0, epsilon=1e-7, scope=None, loss_collection=ops.GraphKeys.LOSSES): """Adds a Log Loss term to the training procedure. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size [batch_size], then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: labels: The ground truth output tensor, same dimensions as 'predictions'. predictions: The predicted outputs. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). epsilon: A small increment to add to avoid taking a log of zero. scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. Returns: A scalar `Tensor` representing the loss value. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. """ with ops.name_scope(scope, "log_loss", (predictions, labels, weights)) as scope: predictions = math_ops.to_float(predictions) labels = math_ops.to_float(labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) losses = -math_ops.multiply( labels, math_ops.log(predictions + epsilon)) - math_ops.multiply( (1 - labels), math_ops.log(1 - predictions + epsilon)) return compute_weighted_loss(losses, weights, scope, loss_collection)
Example #23
Source File: losses_impl.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def hinge_loss(labels, logits, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES): """Adds a hinge loss to the training procedure. Args: labels: The ground truth output tensor. Its shape should match the shape of logits. The values of the tensor are expected to be 0.0 or 1.0. logits: The logits, a float tensor. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. Returns: A scalar `Tensor` of the loss value. Raises: ValueError: If the shapes of `logits` and `labels` don't match. """ with ops.name_scope(scope, "hinge_loss", (logits, labels)) as scope: logits = math_ops.to_float(logits) labels = math_ops.to_float(labels) logits.get_shape().assert_is_compatible_with(labels.get_shape()) # We first need to convert binary labels to -1/1 labels (as floats). all_ones = array_ops.ones_like(labels) labels = math_ops.subtract(2 * labels, all_ones) losses = nn_ops.relu( math_ops.subtract(all_ones, math_ops.multiply(labels, logits))) return compute_weighted_loss(losses, weights, scope, loss_collection)
Example #24
Source File: losses_impl.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def cosine_distance( labels, predictions, dim=None, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES): """Adds a cosine-distance loss to the training procedure. Note that the function assumes that `predictions` and `labels` are already unit-normalized. Args: labels: `Tensor` whose shape matches 'predictions' predictions: An arbitrary matrix. dim: The dimension along which the cosine distance is computed. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which this loss will be added. Returns: A scalar `Tensor` representing the loss value. Raises: ValueError: If `predictions` shape doesn't match `labels` shape, or `weights` is `None`. """ if dim is None: raise ValueError("`dim` cannot be None.") with ops.name_scope(scope, "cosine_distance_loss", (predictions, labels, weights)) as scope: predictions = math_ops.to_float(predictions) labels = math_ops.to_float(labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) radial_diffs = math_ops.multiply(predictions, labels) losses = 1 - math_ops.reduce_sum(radial_diffs, axis=(dim,), keep_dims=True) return compute_weighted_loss(losses, weights, scope, loss_collection)
Example #25
Source File: layers.py From tf-slim with Apache License 2.0 | 5 votes |
def poincare_normalize(x, axis=1, epsilon=1e-5, name=None): """Project into the Poincare ball with norm <= 1.0 - epsilon. https://en.wikipedia.org/wiki/Poincare_ball_model Used in Poincare Embeddings for Learning Hierarchical Representations Maximilian Nickel, Douwe Kiela https://arxiv.org/pdf/1705.08039.pdf For a 1-D tensor with `axis = 0`, computes (x * (1 - epsilon)) / ||x|| if ||x|| > 1 - epsilon output = x otherwise For `x` with more dimensions, independently normalizes each 1-D slice along dimension `axis`. Args: x: A `Tensor`. axis: Axis along which to normalize. A scalar or a vector of integers. epsilon: A small deviation from the edge of the unit sphere for numerical stability. name: A name for this operation (optional). Returns: A `Tensor` with the same shape as `x`. """ with ops.name_scope(name, 'poincare_normalize', [x]) as name: x = ops.convert_to_tensor(x, name='x') square_sum = math_ops.reduce_sum(math_ops.square(x), axis, keepdims=True) x_inv_norm = math_ops.rsqrt(square_sum) x_inv_norm = math_ops.minimum((1. - epsilon) * x_inv_norm, 1.) return math_ops.multiply(x, x_inv_norm, name=name)
Example #26
Source File: loss_ops.py From tf-slim with Apache License 2.0 | 5 votes |
def log_loss(predictions, labels=None, weights=1.0, epsilon=1e-7, scope=None): """Adds a Log Loss term to the training procedure. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size [batch_size], then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: predictions: The predicted outputs. labels: The ground truth output tensor, same dimensions as 'predictions'. weights: Coefficients for the loss a scalar, a tensor of shape [batch_size] or a tensor whose shape matches `predictions`. epsilon: A small increment to add to avoid taking a log of zero. scope: The scope for the operations performed in computing the loss. Returns: A scalar `Tensor` representing the loss value. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. """ with ops.name_scope(scope, "log_loss", [predictions, labels, weights]) as scope: predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions = math_ops.cast(predictions, dtypes.float32) labels = math_ops.cast(labels, dtypes.float32) losses = -math_ops.multiply( labels, math_ops.log(predictions + epsilon)) - math_ops.multiply( (1 - labels), math_ops.log(1 - predictions + epsilon)) return compute_weighted_loss(losses, weights, scope=scope)
Example #27
Source File: loss_ops.py From tf-slim with Apache License 2.0 | 5 votes |
def hinge_loss(logits, labels=None, scope=None): """Method that returns the loss tensor for hinge loss. Args: logits: The logits, a float tensor. Note that logits are assumed to be unbounded and 0-centered. A value > 0 (resp. < 0) is considered a positive (resp. negative) binary prediction. labels: The ground truth output tensor. Its shape should match the shape of logits. The values of the tensor are expected to be 0.0 or 1.0. Internally the {0,1} labels are converted to {-1,1} when calculating the hinge loss. scope: The scope for the operations performed in computing the loss. Returns: An unweighted `Tensor` of same shape as `logits` and `labels` representing the loss values across the batch. Raises: ValueError: If the shapes of `logits` and `labels` don't match. """ with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope: logits.get_shape().assert_is_compatible_with(labels.get_shape()) # We first need to convert binary labels to -1/1 labels (as floats). labels = math_ops.cast(labels, dtypes.float32) all_ones = array_ops.ones_like(labels) labels = math_ops.subtract(2 * labels, all_ones) return nn_ops.relu( math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
Example #28
Source File: loss_ops.py From lambda-packs with MIT License | 5 votes |
def cosine_distance( predictions, labels=None, dim=None, weights=1.0, scope=None): """Adds a cosine-distance loss to the training procedure. Note that the function assumes that `predictions` and `labels` are already unit-normalized. Args: predictions: An arbitrary matrix. labels: A `Tensor` whose shape matches 'predictions' dim: The dimension along which the cosine distance is computed. weights: Coefficients for the loss a scalar, a tensor of shape [batch_size] or a tensor whose shape matches `predictions`. scope: The scope for the operations performed in computing the loss. Returns: A scalar `Tensor` representing the loss value. Raises: ValueError: If `predictions` shape doesn't match `labels` shape, or `weights` is `None`. """ if dim is None: raise ValueError("`dim` cannot be None.") with ops.name_scope(scope, "cosine_distance_loss", [predictions, labels, weights]) as scope: predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions = math_ops.to_float(predictions) labels = math_ops.to_float(labels) radial_diffs = math_ops.multiply(predictions, labels) losses = 1 - math_ops.reduce_sum(radial_diffs, reduction_indices=[dim,]) return compute_weighted_loss(losses, weights, scope=scope)
Example #29
Source File: layers.py From tensornets with MIT License | 5 votes |
def poincare_normalize(x, axis=1, epsilon=1e-5, name=None): """Project into the Poincare ball with norm <= 1.0 - epsilon. https://en.wikipedia.org/wiki/Poincare_ball_model Used in Poincare Embeddings for Learning Hierarchical Representations Maximilian Nickel, Douwe Kiela https://arxiv.org/pdf/1705.08039.pdf For a 1-D tensor with `axis = 0`, computes (x * (1 - epsilon)) / ||x|| if ||x|| > 1 - epsilon output = x otherwise For `x` with more dimensions, independently normalizes each 1-D slice along dimension `axis`. Args: x: A `Tensor`. axis: Axis along which to normalize. A scalar or a vector of integers. epsilon: A small deviation from the edge of the unit sphere for numerical stability. name: A name for this operation (optional). Returns: A `Tensor` with the same shape as `x`. """ with ops.name_scope(name, 'poincare_normalize', [x]) as name: x = ops.convert_to_tensor(x, name='x') square_sum = math_ops.reduce_sum(math_ops.square(x), axis, keepdims=True) x_inv_norm = math_ops.rsqrt(square_sum) x_inv_norm = math_ops.minimum((1. - epsilon) * x_inv_norm, 1.) return math_ops.multiply(x, x_inv_norm, name=name)
Example #30
Source File: classification.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def accuracy(predictions, labels, weights=None): """Computes the percentage of times that predictions matches labels. Args: predictions: the predicted values, a `Tensor` whose dtype and shape matches 'labels'. labels: the ground truth values, a `Tensor` of any shape and bool, integer, or string dtype. weights: None or `Tensor` of float values to reweight the accuracy. Returns: Accuracy `Tensor`. Raises: ValueError: if dtypes don't match or if dtype is not bool, integer, or string. """ if not (labels.dtype.is_integer or labels.dtype in (dtypes.bool, dtypes.string)): raise ValueError( 'Labels should have bool, integer, or string dtype, not %r' % labels.dtype) if not labels.dtype.is_compatible_with(predictions.dtype): raise ValueError('Dtypes of predictions and labels should match. ' 'Given: predictions (%r) and labels (%r)' % (predictions.dtype, labels.dtype)) with ops.name_scope('accuracy', values=[predictions, labels]): is_correct = math_ops.cast( math_ops.equal(predictions, labels), dtypes.float32) if weights is not None: is_correct = math_ops.multiply(is_correct, weights) num_values = math_ops.multiply(weights, array_ops.ones_like(is_correct)) return math_ops.div(math_ops.reduce_sum(is_correct), math_ops.reduce_sum(num_values)) return math_ops.reduce_mean(is_correct)