Python tensorflow.compat.v1.metrics() Examples

The following are 9 code examples of tensorflow.compat.v1.metrics(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v1 , or try the search function .
Example #1
Source File: compat_test.py    From lingvo with Apache License 2.0 6 votes vote down vote up
def testDoesNotModifyTF2(self):
    modules_no_overwritten = [
        (tf2.data, tf1.data),
        (tf2.graph_util, tf1.graph_util),
        (tf2.image, tf1.image),
        (tf2.initializers, tf1.initializers),
        (tf2.io, tf1.io),
        (tf2.losses, tf1.losses),
        (tf2.metrics, tf1.metrics),
        (tf2.nn, tf1.nn),
        (tf2.random, tf1.random),
        (tf2.saved_model, tf1.saved_model),
        (tf2.strings, tf1.strings),
        (tf2.summary, tf1.summary),
        (tf2.test, tf1.test),
        (tf2.train, tf1.train),
    ]
    for modules in modules_no_overwritten:
      self.assertIsNot(modules[0], modules[1]) 
Example #2
Source File: base_model.py    From magenta with Apache License 2.0 6 votes vote down vote up
def reconstruction_loss(self, x_input, x_target, x_length, z=None,
                          c_input=None):
    """Reconstruction loss calculation.

    Args:
      x_input: Batch of decoder input sequences for teacher forcing, sized
          `[batch_size, max(x_length), output_depth]`.
      x_target: Batch of expected output sequences to compute loss against,
          sized `[batch_size, max(x_length), output_depth]`.
      x_length: Length of input/output sequences, sized `[batch_size]`.
      z: (Optional) Latent vectors. Required if model is conditional. Sized
          `[n, z_size]`.
      c_input: (Optional) Batch of control sequences, sized
          `[batch_size, max(x_length), control_depth]`. Required if conditioning
          on control sequences.

    Returns:
      r_loss: The reconstruction loss for each sequence in the batch.
      metric_map: Map from metric name to tf.metrics return values for logging.
    """
    pass 
Example #3
Source File: model_utils.py    From language with Apache License 2.0 6 votes vote down vote up
def hamming_loss(preds, targets, sign=False):
  """Implements hamming loss.

  Args:
    preds: Tensor of predicted values.
    targets: Tensor of target values.
    sign (bool): Set to True if targets={-1, 1} to take the sign of preds
    before calculating loss.

  Returns:
    A tf.metrics tuple containing the proportion of incorrect predictions and an
    update op for the metric.
  """
  if sign:
    preds = tf.sign(preds)
  equal = tf.equal(preds, tf.cast(targets, preds.dtype))
  proportion_correct, update_op = tf.metrics.mean(tf.cast(equal, tf.float32))
  return 1 - proportion_correct, update_op 
Example #4
Source File: model_utils.py    From language with Apache License 2.0 6 votes vote down vote up
def hamming_loss(preds, targets, sign=False):
  """Implements hamming loss.

  Args:
    preds: Tensor of predicted values.
    targets: Tensor of target values.
    sign (bool): Set to True if targets={-1, 1} to take the sign of preds
    before calculating loss.

  Returns:
    A tf.metrics tuple containing the proportion of incorrect predictions and an
    update op for the metric.
  """
  if sign:
    preds = tf.sign(preds)
  equal = tf.equal(preds, tf.cast(targets, preds.dtype))
  proportion_correct, update_op = tf.metrics.mean(tf.cast(equal, tf.float32))
  return 1 - proportion_correct, update_op 
Example #5
Source File: base_model.py    From magenta with Apache License 2.0 5 votes vote down vote up
def eval(self, input_sequence, output_sequence, sequence_length,
           control_sequence=None):
    """Evaluate on the given sequences, returning metric update ops.

    Args:
      input_sequence: The sequence to be fed to the encoder.
      output_sequence: The sequence expected from the decoder.
      sequence_length: The length of the given sequences (which must be
        identical).
      control_sequence: (Optional) sequence on which to condition the decoder.

    Returns:
      metric_update_ops: tf.metrics update ops.
    """
    metric_map, scalars_to_summarize = self._compute_model_loss(
        input_sequence, output_sequence, sequence_length, control_sequence)

    for n, t in scalars_to_summarize.items():
      metric_map[n] = tf.metrics.mean(t)

    metrics_to_values, metrics_to_updates = (
        tf_slim.metrics.aggregate_metric_map(metric_map))

    for metric_name, metric_value in metrics_to_values.items():
      tf.summary.scalar(metric_name, metric_value)

    return list(metrics_to_updates.values()) 
Example #6
Source File: lstm_models.py    From magenta with Apache License 2.0 5 votes vote down vote up
def _flat_reconstruction_loss(self, flat_x_target, flat_rnn_output):
    """Core loss calculation method for flattened outputs.

    Args:
      flat_x_target: The flattened ground truth vectors, sized
        `[sum(x_length), self._output_depth]`.
      flat_rnn_output: The flattened output from all timeputs of the RNN,
        sized `[sum(x_length), rnn_output_size]`.
    Returns:
      r_loss: The unreduced reconstruction losses, sized `[sum(x_length)]`.
      metric_map: A map of metric names to tuples, each of which contain the
        pair of (value_tensor, update_op) from a tf.metrics streaming metric.
    """
    pass 
Example #7
Source File: lstm_models.py    From magenta with Apache License 2.0 5 votes vote down vote up
def _flat_reconstruction_loss(self, flat_x_target, flat_rnn_output):
    flat_logits = flat_rnn_output
    flat_truth = tf.squeeze(flat_x_target, axis=1)
    flat_predictions = tf.squeeze(flat_logits >= 0, axis=1)
    r_loss = tf.nn.sigmoid_cross_entropy_with_logits(
        labels=flat_x_target, logits=flat_logits)

    metric_map = {
        'metrics/accuracy':
            tf.metrics.accuracy(flat_truth, flat_predictions),
    }
    return r_loss, metric_map 
Example #8
Source File: lstm_models.py    From magenta with Apache License 2.0 5 votes vote down vote up
def _flat_reconstruction_loss(self, flat_x_target, flat_rnn_output):
    flat_logits = flat_rnn_output
    flat_truth = tf.argmax(flat_x_target, axis=1)
    flat_predictions = tf.argmax(flat_logits, axis=1)
    r_loss = tf.nn.softmax_cross_entropy_with_logits(
        labels=flat_x_target, logits=flat_logits)

    metric_map = {
        'metrics/accuracy':
            tf.metrics.accuracy(flat_truth, flat_predictions),
        'metrics/mean_per_class_accuracy':
            tf.metrics.mean_per_class_accuracy(
                flat_truth, flat_predictions, int(flat_x_target.shape[-1])),
    }
    return r_loss, metric_map 
Example #9
Source File: lstm_models.py    From magenta with Apache License 2.0 5 votes vote down vote up
def _flat_reconstruction_loss(self, flat_x_target, flat_rnn_output):
    # flat_x_target is by default shape (1,27), [on/offs... vels...offsets...]
    # split into 3 equal length vectors
    target_hits, target_velocities, target_offsets = tf.split(
        flat_x_target, 3, axis=1)

    output_hits, output_velocities, output_offsets = self._activate_outputs(
        flat_rnn_output)

    hits_loss = tf.reduce_sum(tf.losses.log_loss(
        labels=target_hits, predictions=output_hits,
        reduction=tf.losses.Reduction.NONE), axis=1)

    velocities_loss = tf.reduce_sum(tf.losses.mean_squared_error(
        target_velocities, output_velocities,
        reduction=tf.losses.Reduction.NONE), axis=1)

    offsets_loss = tf.reduce_sum(tf.losses.mean_squared_error(
        target_offsets, output_offsets,
        reduction=tf.losses.Reduction.NONE), axis=1)

    loss = hits_loss + velocities_loss + offsets_loss

    metric_map = {
        'metrics/hits_loss':
            tf.metrics.mean(hits_loss),
        'metrics/velocities_loss':
            tf.metrics.mean(velocities_loss),
        'metrics/offsets_loss':
            tf.metrics.mean(offsets_loss)
    }

    return loss, metric_map