Python tensorflow.python.summary.summary.scalar() Examples

The following are 30 code examples of tensorflow.python.summary.summary.scalar(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.summary.summary , or try the search function .
Example #1
Source File: summaries.py    From lambda-packs with MIT License 6 votes vote down vote up
def _add_scalar_summary(tensor, tag=None):
  """Add a scalar summary operation for the tensor.

  Args:
    tensor: The tensor to summarize.
    tag: The tag to use, if None then use tensor's op's name.

  Returns:
    The created histogram summary.

  Raises:
    ValueError: If the tag is already in use or the rank is not 0.
  """
  tensor.get_shape().assert_has_rank(0)
  tag = tag or '%s_summary' % tensor.op.name
  return summary.scalar(tag, tensor) 
Example #2
Source File: learning_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def testTrainWithNoneAsLogdirWhenUsingSummariesRaisesError(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_labels, tf_predictions)
      total_loss = loss_ops.get_total_loss()
      summary.scalar('total_loss', total_loss)

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)
      summary_op = summary.merge_all()

      with self.assertRaises(ValueError):
        learning.train(
            train_op, None, number_of_steps=300, summary_op=summary_op) 
Example #3
Source File: summaries.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def _add_scalar_summary(tensor, tag=None):
  """Add a scalar summary operation for the tensor.

  Args:
    tensor: The tensor to summarize.
    tag: The tag to use, if None then use tensor's op's name.

  Returns:
    The created histogram summary.

  Raises:
    ValueError: If the tag is already in use or the rank is not 0.
  """
  tensor.get_shape().assert_has_rank(0)
  tag = tag or '%s_summary' % tensor.op.name
  return summary.scalar(tag, tensor) 
Example #4
Source File: training.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def add_gradients_summaries(grads_and_vars):
  """Add summaries to gradients.

  Args:
    grads_and_vars: A list of gradient to variable pairs (tuples).

  Returns:
    The list of created summaries.
  """
  summaries = []
  for grad, var in grads_and_vars:
    if grad is not None:
      if isinstance(grad, ops.IndexedSlices):
        grad_values = grad.values
      else:
        grad_values = grad
      summaries.append(
          summary.histogram(var.op.name + '_gradient', grad_values))
      summaries.append(
          summary.scalar(var.op.name + '_gradient_norm',
                         clip_ops.global_norm([grad_values])))
    else:
      logging.info('Var %s has no gradient', var.op.name)

  return summaries 
Example #5
Source File: learning.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def add_gradients_summaries(grads_and_vars):
  """Add summaries to gradients.

  Args:
    grads_and_vars: A list of gradient to variable pairs (tuples).

  Returns:
    The list of created summaries.
  """
  summaries = []
  for grad, var in grads_and_vars:
    if grad is not None:
      if isinstance(grad, ops.IndexedSlices):
        grad_values = grad.values
      else:
        grad_values = grad
      summaries.append(
          summary.histogram(var.op.name + '/gradient', grad_values))
      summaries.append(
          summary.scalar(var.op.name + '/gradient_norm',
                         clip_ops.global_norm([grad_values])))
    else:
      logging.info('Var %s has no gradient', var.op.name)

  return summaries 
Example #6
Source File: learning.py    From ctw-baseline with MIT License 6 votes vote down vote up
def add_gradients_summaries(grads_and_vars):
  """Add summaries to gradients.

  Args:
    grads_and_vars: A list of gradient to variable pairs (tuples).

  Returns:
    The list of created summaries.
  """
  summaries = []
  for grad, var in grads_and_vars:
    if grad is not None:
      if isinstance(grad, ops.IndexedSlices):
        grad_values = grad.values
      else:
        grad_values = grad
      summaries.append(
          summary.histogram(var.op.name + '/gradient', grad_values))
      summaries.append(
          summary.scalar(var.op.name + '/gradient_norm',
                         clip_ops.global_norm([grad_values])))
    else:
      logging.info('Var %s has no gradient', var.op.name)

  return summaries 
Example #7
Source File: summaries.py    From tensornets with MIT License 6 votes vote down vote up
def _add_scalar_summary(tensor, tag=None):
  """Add a scalar summary operation for the tensor.

  Args:
    tensor: The tensor to summarize.
    tag: The tag to use, if None then use tensor's op's name.

  Returns:
    The created histogram summary.

  Raises:
    ValueError: If the tag is already in use or the rank is not 0.
  """
  tensor.get_shape().assert_has_rank(0)
  tag = tag or '%s_summary' % tensor.op.name
  return summary.scalar(tag, tensor) 
Example #8
Source File: learning.py    From lambda-packs with MIT License 6 votes vote down vote up
def add_gradients_summaries(grads_and_vars):
  """Add summaries to gradients.

  Args:
    grads_and_vars: A list of gradient to variable pairs (tuples).

  Returns:
    The list of created summaries.
  """
  summaries = []
  for grad, var in grads_and_vars:
    if grad is not None:
      if isinstance(grad, ops.IndexedSlices):
        grad_values = grad.values
      else:
        grad_values = grad
      summaries.append(
          summary.histogram(var.op.name + '/gradient', grad_values))
      summaries.append(
          summary.scalar(var.op.name + '/gradient_norm',
                         clip_ops.global_norm([grad_values])))
    else:
      logging.info('Var %s has no gradient', var.op.name)

  return summaries 
Example #9
Source File: summaries.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def add_zero_fraction_summary(tensor, name=None, prefix=None,
                              print_summary=False):
  """Adds a summary for the percentage of zero values in the given tensor.

  Args:
    tensor: a variable or op tensor.
    name: the optional name for the summary.
    prefix: An optional prefix for the summary names.
    print_summary: If `True`, the summary is printed to stdout when the summary
      is computed.

  Returns:
    A scalar `Tensor` of type `string` whose contents are the serialized
    `Summary` protocol buffer.
  """
  name = _get_summary_name(tensor, name, prefix, 'Fraction_of_Zero_Values')
  tensor = nn.zero_fraction(tensor)
  return add_scalar_summary(tensor, name, print_summary=print_summary) 
Example #10
Source File: summaries.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def add_scalar_summary(tensor, name=None, prefix=None, print_summary=False):
  """Adds a scalar summary for the given tensor.

  Args:
    tensor: a variable or op tensor.
    name: the optional name for the summary.
    prefix: An optional prefix for the summary names.
    print_summary: If `True`, the summary is printed to stdout when the summary
      is computed.

  Returns:
    A scalar `Tensor` of type `string` whose contents are the serialized
    `Summary` protocol buffer.
  """
  collections = [] if print_summary else None
  summary_name = _get_summary_name(tensor, name, prefix)

  # If print_summary, then we need to make sure that this call doesn't add the
  # non-printing op to the collection. We'll add it to the collection later.
  op = summary.scalar(
      name=summary_name, tensor=tensor, collections=collections)
  if print_summary:
    op = logging_ops.Print(op, [tensor], summary_name)
    ops.add_to_collection(ops.GraphKeys.SUMMARIES, op)
  return op 
Example #11
Source File: learning.py    From CVTron with Apache License 2.0 6 votes vote down vote up
def add_gradients_summaries(grads_and_vars):
  """Add summaries to gradients.
  Args:
    grads_and_vars: A list of gradient to variable pairs (tuples).
  Returns:
    The list of created summaries.
  """
  summaries = []
  for grad, var in grads_and_vars:
    if grad is not None:
      if isinstance(grad, ops.IndexedSlices):
        grad_values = grad.values
      else:
        grad_values = grad
      summaries.append(
          summary.histogram(var.op.name + '/gradient', grad_values))
      summaries.append(
          summary.scalar(var.op.name + '/gradient_norm',
                         clip_ops.global_norm([grad_values])))
    else:
      logging.info('Var %s has no gradient', var.op.name)

  return summaries 
Example #12
Source File: learning.py    From CVTron with Apache License 2.0 6 votes vote down vote up
def add_gradients_summaries(grads_and_vars):
  """Add summaries to gradients.
  Args:
    grads_and_vars: A list of gradient to variable pairs (tuples).
  Returns:
    The list of created summaries.
  """
  summaries = []
  for grad, var in grads_and_vars:
    if grad is not None:
      if isinstance(grad, ops.IndexedSlices):
        grad_values = grad.values
      else:
        grad_values = grad
      summaries.append(
          summary.histogram(var.op.name + '/gradient', grad_values))
      summaries.append(
          summary.scalar(var.op.name + '/gradient_norm',
                         clip_ops.global_norm([grad_values])))
    else:
      logging.info('Var %s has no gradient', var.op.name)

  return summaries 
Example #13
Source File: learning.py    From CVTron with Apache License 2.0 6 votes vote down vote up
def add_gradients_summaries(grads_and_vars):
  """Add summaries to gradients.
  Args:
    grads_and_vars: A list of gradient to variable pairs (tuples).
  Returns:
    The list of created summaries.
  """
  summaries = []
  for grad, var in grads_and_vars:
    if grad is not None:
      if isinstance(grad, ops.IndexedSlices):
        grad_values = grad.values
      else:
        grad_values = grad
      summaries.append(
          summary.histogram(var.op.name + '/gradient', grad_values))
      summaries.append(
          summary.scalar(var.op.name + '/gradient_norm',
                         clip_ops.global_norm([grad_values])))
    else:
      logging.info('Var %s has no gradient', var.op.name)

  return summaries 
Example #14
Source File: head.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def create_estimator_spec(
      self, features, mode, logits, labels=None, train_op_fn=None):
    """Returns `EstimatorSpec` that a model_fn can return.

    Please note that,
    + All args must be passed via name.

    Args:
      features: Input `dict` of `Tensor` objects.
      mode: Estimator's `ModeKeys`.
      logits: logits `Tensor` to be used by the head.
      labels: Labels `Tensor`, or `dict` of same.
      train_op_fn: Function that takes a scalar loss `Tensor` and returns an op
          to optimize the model with the loss. This is used in TRAIN mode and
          must not be None. None is allowed in other modes. If you want to
          optimize loss yourself you can pass `no_op_train_fn` and then use
          EstimatorSpec.loss to compute and apply gradients.

    Returns:
      `EstimatorSpec`.
    """
    raise NotImplementedError('Calling an abstract method.') 
Example #15
Source File: training.py    From lambda-packs with MIT License 6 votes vote down vote up
def add_gradients_summaries(grads_and_vars):
  """Add summaries to gradients.

  Args:
    grads_and_vars: A list of gradient to variable pairs (tuples).

  Returns:
    The list of created summaries.
  """
  summaries = []
  for grad, var in grads_and_vars:
    if grad is not None:
      if isinstance(grad, ops.IndexedSlices):
        grad_values = grad.values
      else:
        grad_values = grad
      summaries.append(
          summary.histogram(var.op.name + '_gradient', grad_values))
      summaries.append(
          summary.scalar(var.op.name + '_gradient_norm',
                         clip_ops.global_norm([grad_values])))
    else:
      logging.info('Var %s has no gradient', var.op.name)

  return summaries 
Example #16
Source File: learning_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testTrainWithTrace(self):
    logdir = tempfile.mkdtemp('tmp_logs')
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_labels, tf_predictions)
      total_loss = loss_ops.get_total_loss()
      summary.scalar('total_loss', total_loss)

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      loss = learning.train(
          train_op,
          logdir,
          number_of_steps=300,
          log_every_n_steps=10,
          trace_every_n_steps=100)
    self.assertIsNotNone(loss)
    for trace_step in [1, 101, 201]:
      trace_filename = 'tf_trace-%d.json' % (trace_step - 1)
      trace_filename_legacy = 'tf_trace-%d.json' % trace_step

      trace_paths = [os.path.join(logdir, f) for f in
                     (trace_filename, trace_filename_legacy)]
      # Note: with resource variables the traces are created at 0/100/200
      # with legacy variables traces are created at 1/101/201
      self.assertTrue(any(os.path.isfile(path) for path in trace_paths),
                      trace_paths) 
Example #17
Source File: summaries.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def add_histogram_summary(tensor, name=None, prefix=None):
  """Adds a histogram summary for the given tensor.

  Args:
    tensor: A variable or op tensor.
    name: The optional name for the summary.
    prefix: An optional prefix for the summary names.

  Returns:
    A scalar `Tensor` of type `string` whose contents are the serialized
    `Summary` protocol buffer.
  """
  return summary.histogram(
      _get_summary_name(tensor, name, prefix), tensor) 
Example #18
Source File: evaluation_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testSummariesAreFlushedToDisk(self):
    checkpoint_dir = tempfile.mkdtemp('summaries_are_flushed')
    logdir = tempfile.mkdtemp('summaries_are_flushed_eval')
    if gfile.Exists(logdir):
      gfile.DeleteRecursively(logdir)

    # Train a Model to completion:
    self._train_model(checkpoint_dir, num_steps=300)

    # Create the model (which can be restored).
    inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    logistic_classifier(inputs)

    names_to_values = {'bread': 3.4, 'cheese': 4.5, 'tomato': 2.0}

    for k in names_to_values:
      v = names_to_values[k]
      summary_lib.scalar(k, v)

    evaluation.evaluate_repeatedly(
        checkpoint_dir=checkpoint_dir,
        hooks=[
            evaluation.SummaryAtEndHook(log_dir=logdir),
        ],
        max_number_of_evaluations=1)

    self._verify_events(logdir, names_to_values) 
Example #19
Source File: summaries.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def add_zero_fraction_summaries(tensors, prefix=None):
  """Adds a scalar zero-fraction summary for each of the given tensors.

  Args:
    tensors: a list of variable or op tensors.
    prefix: An optional prefix for the summary names.

  Returns:
    A list of scalar `Tensors` of type `string` whose contents are the
    serialized `Summary` protocol buffer.
  """
  summary_ops = []
  for tensor in tensors:
    summary_ops.append(add_zero_fraction_summary(tensor, prefix=prefix))
  return summary_ops 
Example #20
Source File: head.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _centered_bias(logits_dimension, head_name=None):
  """Returns `logits`, optionally with centered bias applied.

  Args:
    logits_dimension: Last dimension of `logits`. Must be >= 1.
    head_name: Optional name of the head.

  Returns:
    Centered bias `Variable`.

  Raises:
    ValueError: if `logits_dimension` is invalid.
  """
  if (logits_dimension is None) or (logits_dimension < 1):
    raise ValueError("Invalid logits_dimension %s." % logits_dimension)
  centered_bias = variable_scope.get_variable(
      name="centered_bias_weight",
      shape=(logits_dimension,),
      initializer=init_ops.zeros_initializer(),
      trainable=True)
  for dim in range(logits_dimension):
    if head_name:
      summary.scalar("centered_bias/bias_%d/%s" % (dim, head_name),
                     centered_bias[dim])
    else:
      summary.scalar("centered_bias/bias_%d" % dim, centered_bias[dim])
  return centered_bias 
Example #21
Source File: head.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def head_ops(self,
               features,
               labels,
               mode,
               train_op_fn,
               logits=None,
               logits_input=None,
               scope=None):
    """Returns ops for a model_fn.

    Args:
      features: input dict.
      labels: labels dict or tensor.
      mode: estimator's ModeKeys
      train_op_fn: function that takes a scalar loss and returns an op to
          optimize with the loss.
      logits: logits to be used for the head.
      logits_input: tensor to build logits from.
      scope: Optional scope for variable_scope.

    Returns:
      `ModelFnOps`.

    Raises:
      ValueError: if mode is not recognized.
    """
    raise NotImplementedError("Calling an abstract method.") 
Example #22
Source File: summaries.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def add_histogram_summaries(tensors, prefix=None):
  """Adds a histogram summary for each of the given tensors.

  Args:
    tensors: A list of variable or op tensors.
    prefix: An optional prefix for the summary names.

  Returns:
    A list of scalar `Tensors` of type `string` whose contents are the
    serialized `Summary` protocol buffer.
  """
  summary_ops = []
  for tensor in tensors:
    summary_ops.append(add_histogram_summary(tensor, prefix=prefix))
  return summary_ops 
Example #23
Source File: composable_model.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _add_hidden_layer_summary(self, value, tag):
    # TODO(zakaria): Move this code to tf.learn and add test.
    summary.scalar("%s/fraction_of_zero_values" % tag, nn.zero_fraction(value))
    summary.histogram("%s/activation" % tag, value) 
Example #24
Source File: metric_learning.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def compute_facility_energy(pairwise_distances, centroid_ids):
  """Compute the average travel distance to the assigned centroid.

  Args:
    pairwise_distances: 2-D Tensor of pairwise distances.
    centroid_ids: 1-D Tensor of indices.

  Returns:
    facility_energy: dtypes.float32 scalar.
  """
  return -1.0 * math_ops.reduce_sum(
      math_ops.reduce_min(
          array_ops.gather(pairwise_distances, centroid_ids), axis=0)) 
Example #25
Source File: metric_loss_ops.py    From cluster-loss-tensorflow with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def contrastive_loss(labels, embeddings_anchor, embeddings_positive,
                     margin=1.0):
  """Computes the contrastive loss.

  This loss encourages the embedding to be close to each other for
    the samples of the same label and the embedding to be far apart at least
    by the margin constant for the samples of different labels.
  See: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf

  Args:
    labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
      binary labels indicating positive vs negative pair.
    embeddings_anchor: 2-D float `Tensor` of embedding vectors for the anchor
      images. Embeddings should be l2 normalized.
    embeddings_positive: 2-D float `Tensor` of embedding vectors for the
      positive images. Embeddings should be l2 normalized.
    margin: margin term in the loss definition.

  Returns:
    contrastive_loss: tf.float32 scalar.
  """
  # Get per pair distances
  distances = math_ops.sqrt(
      math_ops.reduce_sum(
          math_ops.square(embeddings_anchor - embeddings_positive), 1))

  # Add contrastive loss for the siamese network.
  #   label here is {0,1} for neg, pos.
  return math_ops.reduce_mean(
      math_ops.to_float(labels) * math_ops.square(distances) +
      (1. - math_ops.to_float(labels)) *
      math_ops.square(math_ops.maximum(margin - distances, 0.)),
      name='contrastive_loss') 
Example #26
Source File: gmm_ops.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _define_loglikelihood_operation(self):
    """Defines the total log-likelihood of current iteration."""
    self._ll_op = []
    for prior_probs in self._prior_probs:
      self._ll_op.append(math_ops.reduce_sum(math_ops.log(prior_probs)))
    summary.scalar('ll', math_ops.reduce_sum(self._ll_op)) 
Example #27
Source File: learning_test.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def testTrainWithTrace(self):
    logdir = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()
      summary.scalar('total_loss', total_loss)

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      loss = learning.train(
          train_op,
          logdir,
          number_of_steps=300,
          log_every_n_steps=10,
          trace_every_n_steps=100)
    self.assertIsNotNone(loss)
    for trace_step in [1, 101, 201]:
      trace_filename = 'tf_trace-%d.json' % trace_step
      self.assertTrue(os.path.isfile(os.path.join(logdir, trace_filename))) 
Example #28
Source File: metric_loss_ops.py    From cluster-loss-tensorflow with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def compute_facility_energy(pairwise_distances, centroid_ids):
  """Compute the average travel distance to the assigned centroid.

  Args:
    pairwise_distances: 2-D Tensor of pairwise distances.
    centroid_ids: 1-D Tensor of indices.

  Returns:
    facility_energy: dtypes.float32 scalar.
  """
  return -1.0 * math_ops.reduce_sum(
      math_ops.reduce_min(
          array_ops.gather(pairwise_distances, centroid_ids), axis=0)) 
Example #29
Source File: summaries.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def add_image_summaries(tensors, prefix=None):
  """Adds an image summary for each of the given tensors.

  Args:
    tensors: A list of variable or op tensors.
    prefix: An optional prefix for the summary names.

  Returns:
    A list of scalar `Tensors` of type `string` whose contents are the
    serialized `Summary` protocol buffer.
  """
  summary_ops = []
  for tensor in tensors:
    summary_ops.append(add_image_summary(tensor, prefix=prefix))
  return summary_ops 
Example #30
Source File: metric_loss_ops.py    From cluster-loss-tensorflow with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def compute_clustering_score(labels, predictions, margin_type):
  """Computes the clustering score via sklearn.metrics functions.

  There are various ways to compute the clustering score. Intuitively,
  we want to measure the agreement of two clustering assignments (labels vs
  predictions) ignoring the permutations and output a score from zero to one.
  (where the values close to one indicate significant agreement).
  This code supports following scoring functions:
    nmi: normalized mutual information
    ami: adjusted mutual information
    ari: adjusted random index
    vmeasure: v-measure
    const: indicator checking whether the two clusterings are the same.
  See http://scikit-learn.org/stable/modules/classes.html#clustering-metrics
    for the detailed descriptions.
  Args:
    labels: 1-D Tensor. ground truth cluster assignment.
    predictions: 1-D Tensor. predicted cluster assignment.
    margin_type: Type of structured margin to use. Default is nmi.
  Returns:
    clustering_score: dtypes.float32 scalar.
      The possible valid values are from zero to one.
      Zero means the worst clustering and one means the perfect clustering.
  Raises:
    ValueError: margin_type is not recognized.
  """
  margin_type_to_func = {
      'nmi': _compute_nmi_score,
      'ami': _compute_ami_score,
      'ari': _compute_ari_score,
      'vmeasure': _compute_vmeasure_score,
      'const': _compute_zeroone_score
  }

  if margin_type not in margin_type_to_func:
    raise ValueError('Unrecognized margin_type: %s' % margin_type)
  clustering_score_fn = margin_type_to_func[margin_type]
  return array_ops.squeeze(clustering_score_fn(labels, predictions))