Python tensorflow.python.ops.math_ops.reduce_max() Examples

The following are 30 code examples of tensorflow.python.ops.math_ops.reduce_max(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.math_ops , or try the search function .
Example #1
Source File: backend.py    From lambda-packs with MIT License 6 votes vote down vote up
def max(x, axis=None, keepdims=False):
  """Maximum value in a tensor.

  Arguments:
      x: A tensor or variable.
      axis: An integer, the axis to find maximum values.
      keepdims: A boolean, whether to keep the dimensions or not.
          If `keepdims` is `False`, the rank of the tensor is reduced
          by 1. If `keepdims` is `True`,
          the reduced dimension is retained with length 1.

  Returns:
      A tensor with maximum values of `x`.
  """
  axis = _normalize_axis(axis, ndim(x))
  return math_ops.reduce_max(x, reduction_indices=axis, keep_dims=keepdims) 
Example #2
Source File: metric_loss_ops.py    From cluster-loss-tensorflow with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def masked_minimum(data, mask, dim=1):
  """Computes the axis wise minimum over chosen elements.

  Args:
    data: 2-D float `Tensor` of size [n, m].
    mask: 2-D Boolean `Tensor` of size [n, m].
    dim: The dimension over which to compute the minimum.

  Returns:
    masked_minimums: N-D `Tensor`.
      The minimized dimension is of size 1 after the operation.
  """
  axis_maximums = math_ops.reduce_max(data, dim, keep_dims=True)
  masked_minimums = math_ops.reduce_min(
      math_ops.multiply(
          data - axis_maximums, mask), dim, keep_dims=True) + axis_maximums
  return masked_minimums 
Example #3
Source File: metric_loss_ops.py    From cluster-loss-tensorflow with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def masked_maximum(data, mask, dim=1):
  """Computes the axis wise maximum over chosen elements.

  Args:
    data: 2-D float `Tensor` of size [n, m].
    mask: 2-D Boolean `Tensor` of size [n, m].
    dim: The dimension over which to compute the maximum.

  Returns:
    masked_maximums: N-D `Tensor`.
      The maximized dimension is of size 1 after the operation.
  """
  axis_minimums = math_ops.reduce_min(data, dim, keep_dims=True)
  masked_maximums = math_ops.reduce_max(
      math_ops.multiply(
          data - axis_minimums, mask), dim, keep_dims=True) + axis_minimums
  return masked_maximums 
Example #4
Source File: metric_learning.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def masked_minimum(data, mask, dim=1):
  """Computes the axis wise minimum over chosen elements.

  Args:
    data: 2-D float `Tensor` of size [n, m].
    mask: 2-D Boolean `Tensor` of size [n, m].
    dim: The dimension over which to compute the minimum.

  Returns:
    masked_minimums: N-D `Tensor`.
      The minimized dimension is of size 1 after the operation.
  """
  axis_maximums = math_ops.reduce_max(data, dim, keepdims=True)
  masked_minimums = math_ops.reduce_min(
      math_ops.multiply(data - axis_maximums, mask), dim,
      keepdims=True) + axis_maximums
  return masked_minimums 
Example #5
Source File: metric_learning.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def masked_maximum(data, mask, dim=1):
  """Computes the axis wise maximum over chosen elements.

  Args:
    data: 2-D float `Tensor` of size [n, m].
    mask: 2-D Boolean `Tensor` of size [n, m].
    dim: The dimension over which to compute the maximum.

  Returns:
    masked_maximums: N-D `Tensor`.
      The maximized dimension is of size 1 after the operation.
  """
  axis_minimums = math_ops.reduce_min(data, dim, keepdims=True)
  masked_maximums = math_ops.reduce_max(
      math_ops.multiply(data - axis_minimums, mask), dim,
      keepdims=True) + axis_minimums
  return masked_maximums 
Example #6
Source File: algebra_ops.py    From NJUNMT-tf with Apache License 2.0 6 votes vote down vote up
def advanced_softmax(logits, mask=None):
    """ Computes softmax function manually.

    Avoids numeric overflow.

    Args:
        logits: A Tensor. The softmax will apply on the last dimension of it.
        mask: A Tensor with the same shape as `logits`.

    Returns: The softmax results.
    """
    num_shapes = logits.get_shape().ndims
    if mask is not None:
        scores_exp = math_ops.exp(logits - math_ops.reduce_max(logits, axis=num_shapes - 1, keepdims=True)) * mask
    else:
        scores_exp = math_ops.exp(logits - math_ops.reduce_max(logits, axis=num_shapes - 1, keepdims=True))
    scores_sum = math_ops.reduce_sum(scores_exp, axis=num_shapes - 1, keepdims=True)
    x_sm = scores_exp / scores_sum
    return x_sm 
Example #7
Source File: crf.py    From tensorflow-nlp-examples with MIT License 6 votes vote down vote up
def __call__(self, inputs, state, scope=None):
            """Build the CrfDecodeForwardRnnCell.
            Args:
              inputs: A [batch_size, num_tags] matrix of unary potentials.
              state: A [batch_size, num_tags] matrix containing the previous step's
                    score values.
              scope: Unused variable scope of this cell.
            Returns:
              backpointers: [batch_size, num_tags], containing backpointers.
              new_state: [batch_size, num_tags], containing new score values.
            """
            # For simplicity, in shape comments, denote:
            # 'batch_size' by 'B', 'max_seq_len' by 'T' , 'num_tags' by 'O' (output).
            state = array_ops.expand_dims(state, 2)  # [B, O, 1]

            # This addition op broadcasts self._transitions_params along the zeroth
            # dimension and state along the second dimension.
            # [B, O, 1] + [1, O, O] -> [B, O, O]
            transition_scores = state + self._transition_params  # [B, O, O]
            new_state = inputs + math_ops.reduce_max(transition_scores, [1])  # [B, O]
            backpointers = math_ops.argmax(transition_scores, 1)
            backpointers = math_ops.cast(backpointers, dtype=dtypes.int32)  # [B, O]
            return backpointers, new_state 
Example #8
Source File: crf.py    From keras-crf-layer with MIT License 6 votes vote down vote up
def __call__(self, inputs, state, scope=None):
            """Build the CrfDecodeForwardRnnCell.
            Args:
              inputs: A [batch_size, num_tags] matrix of unary potentials.
              state: A [batch_size, num_tags] matrix containing the previous step's
                    score values.
              scope: Unused variable scope of this cell.
            Returns:
              backpointers: [batch_size, num_tags], containing backpointers.
              new_state: [batch_size, num_tags], containing new score values.
            """
            # For simplicity, in shape comments, denote:
            # 'batch_size' by 'B', 'max_seq_len' by 'T' , 'num_tags' by 'O' (output).
            state = array_ops.expand_dims(state, 2)  # [B, O, 1]

            # This addition op broadcasts self._transitions_params along the zeroth
            # dimension and state along the second dimension.
            # [B, O, 1] + [1, O, O] -> [B, O, O]
            transition_scores = state + self._transition_params  # [B, O, O]
            new_state = inputs + math_ops.reduce_max(transition_scores, [1])  # [B, O]
            backpointers = math_ops.argmax(transition_scores, 1)
            backpointers = math_ops.cast(backpointers, dtype=dtypes.int32)  # [B, O]
            return backpointers, new_state 
Example #9
Source File: crf.py    From tensorflow_nlp with Apache License 2.0 6 votes vote down vote up
def __call__(self, inputs, state, scope=None):
    """Build the CrfDecodeForwardRnnCell.

    Args:
      inputs: A [batch_size, num_tags] matrix of unary potentials.
      state: A [batch_size, num_tags] matrix containing the previous step's
            score values.
      scope: Unused variable scope of this cell.

    Returns:
      backpointers: [batch_size, num_tags], containing backpointers.
      new_state: [batch_size, num_tags], containing new score values.
    """
    # For simplicity, in shape comments, denote:
    # 'batch_size' by 'B', 'max_seq_len' by 'T' , 'num_tags' by 'O' (output).
    state = array_ops.expand_dims(state, 2)                         # [B, O, 1]

    # This addition op broadcasts self._transitions_params along the zeroth
    # dimension and state along the second dimension.
    # [B, O, 1] + [1, O, O] -> [B, O, O]
    transition_scores = state + self._transition_params             # [B, O, O]
    new_state = inputs + math_ops.reduce_max(transition_scores, [1])  # [B, O]
    backpointers = math_ops.argmax(transition_scores, 1)
    backpointers = math_ops.cast(backpointers, dtype=dtypes.int32)    # [B, O]
    return backpointers, new_state 
Example #10
Source File: entropy_models.py    From pcc_geo_cnn with MIT License 6 votes vote down vote up
def visualize(self):
    """Multi-channel visualization of densities as images.

    Creates and returns an image summary visualizing the current probabilty
    density estimates. The image contains one row for each channel. Within each
    row, the pixel intensities are proportional to probability values, and each
    row is centered on the median of the corresponding distribution.

    Returns:
      The created image summary.
    """
    with ops.name_scope(self._name_scope()):
      image = self._pmf
      image *= 255 / math_ops.reduce_max(image, axis=1, keepdims=True)
      image = math_ops.cast(image + .5, dtypes.uint8)
      image = image[None, :, :, None]
    return summary.image("pmf", image, max_outputs=1) 
Example #11
Source File: maxout.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def call(self, inputs):
    inputs = ops.convert_to_tensor(inputs)
    shape = inputs.get_shape().as_list()
    num_channels = shape[self.axis]
    if num_channels % self.num_units:
      raise ValueError('number of features({}) is not '
               'a multiple of num_units({})'
               .format(num_channels, self.num_units))
    shape[self.axis] = -1
    shape += [num_channels // self.num_units]

    # Dealing with batches with arbitrary sizes
    for i in range(len(shape)):
      if shape[i] is None:
        shape[i] = gen_array_ops.shape(inputs)[i]
    outputs = math_ops.reduce_max(gen_array_ops.reshape(inputs, shape), -1, keep_dims=False)

    return outputs 
Example #12
Source File: linear_operator.py    From lambda-packs with MIT License 6 votes vote down vote up
def _assert_non_singular(self):
    """Private default implementation of _assert_non_singular."""
    logging.warn(
        "Using (possibly slow) default implementation of assert_non_singular."
        "  Requires conversion to a dense matrix and O(N^3) operations.")
    if self._can_use_cholesky():
      return self.assert_positive_definite()
    else:
      singular_values = linalg_ops.svd(
          self._get_cached_dense_matrix(), compute_uv=False)
      # TODO(langmore) Add .eig and .cond as methods.
      cond = (math_ops.reduce_max(singular_values, axis=-1) /
              math_ops.reduce_min(singular_values, axis=-1))
      return check_ops.assert_less(
          cond,
          self._max_condition_number_to_be_non_singular(),
          message="Singular matrix up to precision epsilon.")
    raise NotImplementedError("assert_non_singular is not implemented.") 
Example #13
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def _test_reduce_max(data, keep_dims=None):
    """ One iteration of reduce_max """
    return _test_reduce(math_ops.reduce_max, data, keep_dims)

#######################################################################
# Reduce_mean
# ----------- 
Example #14
Source File: rnn_beam_search_decoder.py    From OpenSeq2Seq with Apache License 2.0 5 votes vote down vote up
def finalize(self, outputs, final_state, sequence_lengths):
    """Finalize and return the predicted_ids.

    Args:
      outputs: An instance of BeamSearchDecoderOutput.
      final_state: An instance of BeamSearchDecoderState. Passed through to the
        output.
      sequence_lengths: An `int64` tensor shaped `[batch_size, beam_width]`.
        The sequence lengths determined for each beam during decode.
        **NOTE** These are ignored; the updated sequence lengths are stored in
        `final_state.lengths`.

    Returns:
      outputs: An instance of `FinalBeamSearchDecoderOutput` where the
        predicted_ids are the result of calling _gather_tree.
      final_state: The same input instance of `BeamSearchDecoderState`.
    """
    del sequence_lengths
    # Get max_sequence_length across all beams for each batch.
    max_sequence_lengths = tf.cast(
        math_ops.reduce_max(final_state.lengths, axis=1),tf.int32)
    predicted_ids = beam_search_ops.gather_tree(
        outputs.predicted_ids,
        outputs.parent_ids,
        max_sequence_lengths=max_sequence_lengths,
        end_token=self._end_token)
    outputs = FinalBeamSearchDecoderOutput(
        beam_search_decoder_output=outputs, predicted_ids=predicted_ids)
    return outputs, final_state 
Example #15
Source File: backend.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def max(x, axis=None, keepdims=False):
  """Maximum value in a tensor.

  Arguments:
      x: A tensor or variable.
      axis: An integer, the axis to find maximum values.
      keepdims: A boolean, whether to keep the dimensions or not.
          If `keepdims` is `False`, the rank of the tensor is reduced
          by 1. If `keepdims` is `True`,
          the reduced dimension is retained with length 1.

  Returns:
      A tensor with maximum values of `x`.
  """
  return math_ops.reduce_max(x, axis=axis, keep_dims=keepdims) 
Example #16
Source File: sparse_ops.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def sparse_reduce_max_sparse(sp_input, axis=None, keep_dims=False,
                             reduction_axes=None):
  """Computes the max of elements across dimensions of a SparseTensor.

  This Op takes a SparseTensor and is the sparse counterpart to
  `tf.reduce_max()`.  In contrast to SparseReduceSum, this Op returns a
  SparseTensor.

  Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
  `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
  `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
  with length 1.

  If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
  with a single element is returned.  Additionally, the axes can be negative,
  which are interpreted according to the indexing rules in Python.

  Args:
    sp_input: The SparseTensor to reduce. Should have numeric type.
    axis: The dimensions to reduce; list or scalar. If `None` (the
      default), reduces all dimensions.
    keep_dims: If true, retain reduced dimensions with length 1.
    reduction_axes: Deprecated name of axis

  Returns:
    The reduced SparseTensor.
  """
  output_ind, output_val, output_shape = (
      gen_sparse_ops.sparse_reduce_max_sparse(
          sp_input.indices, sp_input.values,
          sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis,
                                                        reduction_axes),
          keep_dims))

  return sparse_tensor.SparseTensor(output_ind, output_val, output_shape) 
Example #17
Source File: ops_test.py    From keras-lambda with MIT License 5 votes vote down vote up
def test_name(self):
    result_lt = ops.reduce_max(self.original_lt, {'channel'})
    self.assertIn('lt_reduce_max', result_lt.name) 
Example #18
Source File: ops_test.py    From keras-lambda with MIT License 5 votes vote down vote up
def test(self):
    result_lt = ops.reduce_max(self.original_lt, {'channel'})
    golden_lt = core.LabeledTensor(
        math_ops.reduce_max(self.original_lt.tensor, 1),
        [self.a0, self.a2, self.a3])
    self.assertLabeledTensorsEqual(result_lt, golden_lt) 
Example #19
Source File: monte_carlo.py    From keras-lambda with MIT License 5 votes vote down vote up
def _sample_max(values):
  """Max over sample indices.  In this module this is always [0]."""
  return math_ops.reduce_max(values, reduction_indices=[0]) 
Example #20
Source File: contrib.keras.python.keras.layers.convolutional_recurrent.py    From AttentionConvLSTM with MIT License 5 votes vote down vote up
def attention(self, x, dw, pw):
    z = K.separable_conv2d(
        K.tanh(x),
        dw,
        pw,
        strides=self.strides,
        padding=self.padding,
        data_format=self.data_format,
        dilation_rate=self.dilation_rate)
    att = math_ops.exp(z)/math_ops.reduce_sum(math_ops.exp(z), [1, 2], keep_dims=True)
    att = att/math_ops.reduce_max(att, [1, 2], keep_dims=True)
    return att 
Example #21
Source File: contrib.keras.python.keras.layers.convolutional_recurrent.py    From AttentionConvLSTM with MIT License 5 votes vote down vote up
def attention(self, x, dw, pw):
    z = K.separable_conv2d(
        K.tanh(x),
        dw,
        pw,
        strides=self.strides,
        padding=self.padding,
        data_format=self.data_format,
        dilation_rate=self.dilation_rate)
    att = math_ops.exp(z)/math_ops.reduce_sum(math_ops.exp(z), [1, 2], keep_dims=True)
    att = att/math_ops.reduce_max(att, [1, 2], keep_dims=True)
    return att 
Example #22
Source File: monte_carlo.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _sample_max(values):
  """Max over sample indices.  In this module this is always [0]."""
  return math_ops.reduce_max(values, reduction_indices=[0]) 
Example #23
Source File: linear_operator.py    From lambda-packs with MIT License 5 votes vote down vote up
def _max_condition_number_to_be_non_singular(self):
    """Return the maximum condition number that we consider nonsingular."""
    with ops.name_scope("max_nonsingular_condition_number"):
      dtype_eps = np.finfo(self.dtype.as_numpy_dtype).eps
      eps = math_ops.cast(
          math_ops.reduce_max([
              100.,
              math_ops.cast(self.range_dimension_tensor(), self.dtype),
              math_ops.cast(self.domain_dimension_tensor(), self.dtype)
          ]), self.dtype) * dtype_eps
      return 1. / eps 
Example #24
Source File: ops_test.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def test_name(self):
    result_lt = ops.reduce_max(self.original_lt, {'channel'})
    self.assertIn('lt_reduce_max', result_lt.name) 
Example #25
Source File: ops_test.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def test(self):
    result_lt = ops.reduce_max(self.original_lt, {'channel'})
    golden_lt = core.LabeledTensor(
        math_ops.reduce_max(self.original_lt.tensor, 1),
        [self.a0, self.a2, self.a3])
    self.assertLabeledTensorsEqual(result_lt, golden_lt) 
Example #26
Source File: monte_carlo.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _sample_max(values):
  """Max over sample indices.  In this module this is always [0]."""
  return math_ops.reduce_max(values, reduction_indices=[0]) 
Example #27
Source File: ops.py    From dynamic-coattention-network with MIT License 5 votes vote down vote up
def maxout(inputs,
           num_units,
           axis=None,
           outputs_collections=None,
           scope=None):
  """Adds a maxout op which is a max pooling performed in filter/channel
  dimension. This can also be used after fully-connected layers to reduce
  number of features.
  Args:
    inputs: A Tensor on which maxout will be performed
    num_units: Specifies how many features will remain after max pooling at the
      channel dimension. This must be multiple of number of channels.
    axis: The dimension where max pooling will be performed. Default is the
      last dimension.
    outputs_collections: The collections to which the outputs are added.
    scope: Optional scope for name_scope.
  Returns:
    A `Tensor` representing the results of the pooling operation.
  Raises:
    ValueError: if num_units is not multiple of number of features.
    """
  with ops.name_scope(scope, 'MaxOut', [inputs]) as sc:
    inputs = ops.convert_to_tensor(inputs)
    shape = inputs.get_shape().as_list()
    if axis is None:
      # Assume that channel is the last dimension
      axis = -1
    num_channels = shape[axis]
    if num_channels % num_units:
      raise ValueError('number of features({}) is not '
                       'a multiple of num_units({})'
              .format(num_channels, num_units))
    shape[axis] = -1
    shape += [num_channels // num_units]
    outputs = math_ops.reduce_max(gen_array_ops.reshape(inputs, shape), -1,
                                  keep_dims=False)
    return utils.collect_named_outputs(outputs_collections, sc, outputs) 
Example #28
Source File: tfexample_decoder.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def tensors_to_item(self, keys_to_tensors):
    """Maps the given dictionary of tensors to a num_boxes tensor.

    If check_consistency is True: raises runtime error in Tensorflow when the
    consistency is violated across tensors.

    Args:
      keys_to_tensors: A mapping of TF-Example keys to parsed tensors.

    Returns:
      [time] tf.Tensor containing the number of boxes per frame.

    Raises:
      ValueError: If any of the keyed tensors is not sparse or exactly 2
        dimensional.
    """
    def _compute_num_boxes(tensor):
      """Compute num_boxes from a single 2D tensor."""
      if not isinstance(tensor, sparse_tensor.SparseTensor):
        raise ValueError('tensor must be of type tf.SparseTensor.')
      indices = tensor.indices
      dense_shape = tensor.dense_shape
      box_ids = indices[:, 1]
      box_ids = sparse_tensor.SparseTensor(
          indices=indices, values=box_ids, dense_shape=dense_shape)
      box_ids = sparse_ops.sparse_tensor_to_dense(box_ids, default_value=-1)
      # In the event that the parsed tensor is empty (perhaps due to a negative
      # example), we pad box_ids so that the resulting number of boxes is 0.
      num_boxes = math_ops.reduce_max(
          array_ops.pad(box_ids + 1, [[0, 0], [0, 1]]), axis=1)
      return num_boxes

    num_boxes = _compute_num_boxes(keys_to_tensors[self._keys[0]])
    asserts = []
    if self._check_consistency:
      for i in range(1, len(self._keys)):
        cur_num_boxes = _compute_num_boxes(keys_to_tensors[self._keys[i]])
        asserts.append(check_ops.assert_equal(num_boxes, cur_num_boxes))

    with ops.control_dependencies(asserts):
      return array_ops.identity(num_boxes) 
Example #29
Source File: metrics.py    From ULTRA with Apache License 2.0 5 votes vote down vote up
def mean_reciprocal_rank(labels, predictions, weights=None, name=None):
    """Computes mean reciprocal rank (MRR).

    Args:
      labels: A `Tensor` of the same shape as `predictions`. A value >= 1 means a
        relevant example.
      predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
        the ranking score of the corresponding example.
      weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
        former case is per-example and the latter case is per-list.
      name: A string used as the name for this metric.

    Returns:
      A metric for the weighted mean reciprocal rank of the batch.
    """
    with ops.name_scope(name, 'mean_reciprocal_rank',
                        (labels, predictions, weights)):
        _, list_size = array_ops.unstack(array_ops.shape(predictions))
        labels, predictions, weights, topn = _prepare_and_validate_params(
            labels, predictions, weights, list_size)
        sorted_labels, = utils.sort_by_scores(predictions, [labels], topn=topn)
        # Relevance = 1.0 when labels >= 1.0 to accommodate graded relevance.
        relevance = math_ops.to_float(
            math_ops.greater_equal(
                sorted_labels, 1.0))
        reciprocal_rank = 1.0 / math_ops.to_float(math_ops.range(1, topn + 1))
        # MRR has a shape of [batch_size, 1]
        mrr = math_ops.reduce_max(
            relevance * reciprocal_rank, axis=1, keepdims=True)
        return math_ops.reduce_mean(
            mrr * array_ops.ones_like(weights) * weights) 
Example #30
Source File: crf.py    From tensorflow-nlp-examples with MIT License 4 votes vote down vote up
def crf_decode(potentials, transition_params, sequence_length):
        """Decode the highest scoring sequence of tags in TensorFlow.
        This is a function for tensor.
        Args:
        potentials: A [batch_size, max_seq_len, num_tags] tensor, matrix of
                  unary potentials.
        transition_params: A [num_tags, num_tags] tensor, matrix of
                  binary potentials.
        sequence_length: A [batch_size] tensor, containing sequence lengths.
        Returns:
        decode_tags: A [batch_size, max_seq_len] tensor, with dtype tf.int32.
                    Contains the highest scoring tag indicies.
        best_score: A [batch_size] tensor, containing the score of decode_tags.
        """
        # For simplicity, in shape comments, denote:
        # 'batch_size' by 'B', 'max_seq_len' by 'T' , 'num_tags' by 'O' (output).
        num_tags = potentials.get_shape()[2].value

        # Computes forward decoding. Get last score and backpointers.
        crf_fwd_cell = CrfDecodeForwardRnnCell(transition_params)
        initial_state = array_ops.slice(potentials, [0, 0, 0], [-1, 1, -1])
        initial_state = array_ops.squeeze(initial_state, axis=[1])  # [B, O]
        inputs = array_ops.slice(potentials, [0, 1, 0], [-1, -1, -1])  # [B, T-1, O]
        backpointers, last_score = rnn.dynamic_rnn(
            crf_fwd_cell,
            inputs=inputs,
            sequence_length=sequence_length - 1,
            initial_state=initial_state,
            time_major=False,
            dtype=dtypes.int32)  # [B, T - 1, O], [B, O]
        backpointers = gen_array_ops.reverse_sequence(backpointers, sequence_length - 1, seq_dim=1)  # [B, T-1, O]

        # Computes backward decoding. Extract tag indices from backpointers.
        crf_bwd_cell = CrfDecodeBackwardRnnCell(num_tags)
        initial_state = math_ops.cast(math_ops.argmax(last_score, axis=1), dtype=dtypes.int32)  # [B]
        initial_state = array_ops.expand_dims(initial_state, axis=-1)  # [B, 1]
        decode_tags, _ = rnn.dynamic_rnn(
            crf_bwd_cell,
            inputs=backpointers,
            sequence_length=sequence_length - 1,
            initial_state=initial_state,
            time_major=False,
            dtype=dtypes.int32)  # [B, T - 1, 1]
        decode_tags = array_ops.squeeze(decode_tags, axis=[2])  # [B, T - 1]
        decode_tags = array_ops.concat([initial_state, decode_tags], axis=1)  # [B, T]
        decode_tags = gen_array_ops.reverse_sequence(decode_tags, sequence_length, seq_dim=1)  # [B, T]

        best_score = math_ops.reduce_max(last_score, axis=1)  # [B]
        return decode_tags, best_score