Python tensorflow.python.ops.math_ops.cumsum() Examples

The following are 21 code examples of tensorflow.python.ops.math_ops.cumsum(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.math_ops , or try the search function .
Example #1
Source File: copy_attention_wrapper.py    From question-generation with MIT License 6 votes vote down vote up
def safe_cumprod(x, *args, **kwargs):
  """Computes cumprod of x in logspace using cumsum to avoid underflow.

  The cumprod function and its gradient can result in numerical instabilities
  when its argument has very small and/or zero values.  As long as the argument
  is all positive, we can instead compute the cumulative product as
  exp(cumsum(log(x))).  This function can be called identically to tf.cumprod.

  Args:
    x: Tensor to take the cumulative product of.
    *args: Passed on to cumsum; these are identical to those in cumprod.
    **kwargs: Passed on to cumsum; these are identical to those in cumprod.
  Returns:
    Cumulative product of x.
  """
  with ops.name_scope(None, "SafeCumprod", [x]):
    x = ops.convert_to_tensor(x, name="x")
    tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
    return math_ops.exp(math_ops.cumsum(
        math_ops.log(clip_ops.clip_by_value(x, tiny, 1)), *args, **kwargs)) 
Example #2
Source File: attention_wrapper.py    From CommonSenseMultiHopQA with MIT License 6 votes vote down vote up
def safe_cumprod(x, *args, **kwargs):
  """Computes cumprod of x in logspace using cumsum to avoid underflow.

  The cumprod function and its gradient can result in numerical instabilities
  when its argument has very small and/or zero values.  As long as the argument
  is all positive, we can instead compute the cumulative product as
  exp(cumsum(log(x))).  This function can be called identically to tf.cumprod.

  Args:
    x: Tensor to take the cumulative product of.
    *args: Passed on to cumsum; these are identical to those in cumprod.
    **kwargs: Passed on to cumsum; these are identical to those in cumprod.
  Returns:
    Cumulative product of x.
  """
  with ops.name_scope(None, "SafeCumprod", [x]):
    x = ops.convert_to_tensor(x, name="x")
    tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
    return math_ops.exp(math_ops.cumsum(
        math_ops.log(clip_ops.clip_by_value(x, tiny, 1)), *args, **kwargs)) 
Example #3
Source File: attention_wrapper.py    From OpenSeq2Seq with Apache License 2.0 6 votes vote down vote up
def safe_cumprod(x, *args, **kwargs):
  """Computes cumprod of x in logspace using cumsum to avoid underflow.

  The cumprod function and its gradient can result in numerical instabilities
  when its argument has very small and/or zero values.  As long as the argument
  is all positive, we can instead compute the cumulative product as
  exp(cumsum(log(x))).  This function can be called identically to tf.cumprod.

  Args:
    x: Tensor to take the cumulative product of.
    *args: Passed on to cumsum; these are identical to those in cumprod.
    **kwargs: Passed on to cumsum; these are identical to those in cumprod.
  Returns:
    Cumulative product of x.
  """
  with ops.name_scope(None, "SafeCumprod", [x]):
    x = ops.convert_to_tensor(x, name="x")
    tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
    return math_ops.exp(
        math_ops.cumsum(
            math_ops.log(clip_ops.clip_by_value(x, tiny, 1)), *args, **kwargs
        )
    ) 
Example #4
Source File: attention_wrapper.py    From tf-var-attention with MIT License 6 votes vote down vote up
def safe_cumprod(x, *args, **kwargs):
    """Computes cumprod of x in logspace using cumsum to avoid underflow.

    The cumprod function and its gradient can result in numerical instabilities
    when its argument has very small and/or zero values.  As long as the argument
    is all positive, we can instead compute the cumulative product as
    exp(cumsum(log(x))).  This function can be called identically to tf.cumprod.

    Args:
      x: Tensor to take the cumulative product of.
      *args: Passed on to cumsum; these are identical to those in cumprod.
      **kwargs: Passed on to cumsum; these are identical to those in cumprod.
    Returns:
      Cumulative product of x.
    """
    with ops.name_scope(None, "SafeCumprod", [x]):
        x = ops.convert_to_tensor(x, name="x")
        tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
        return math_ops.exp(math_ops.cumsum(
            math_ops.log(clip_ops.clip_by_value(x, tiny, 1)), *args, **kwargs)) 
Example #5
Source File: attention_wrapper.py    From QGforQA with MIT License 6 votes vote down vote up
def safe_cumprod(x, *args, **kwargs):
  """Computes cumprod of x in logspace using cumsum to avoid underflow.

  The cumprod function and its gradient can result in numerical instabilities
  when its argument has very small and/or zero values.  As long as the argument
  is all positive, we can instead compute the cumulative product as
  exp(cumsum(log(x))).  This function can be called identically to tf.cumprod.

  Args:
    x: Tensor to take the cumulative product of.
    *args: Passed on to cumsum; these are identical to those in cumprod.
    **kwargs: Passed on to cumsum; these are identical to those in cumprod.
  Returns:
    Cumulative product of x.
  """
  with ops.name_scope(None, "SafeCumprod", [x]):
    x = ops.convert_to_tensor(x, name="x")
    tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
    return math_ops.exp(math_ops.cumsum(
        math_ops.log(clip_ops.clip_by_value(x, tiny, 1)), *args, **kwargs)) 
Example #6
Source File: attention_wrapper_mod.py    From NQG_ASs2s with MIT License 6 votes vote down vote up
def safe_cumprod(x, *args, **kwargs):
  """Computes cumprod of x in logspace using cumsum to avoid underflow.

  The cumprod function and its gradient can result in numerical instabilities
  when its argument has very small and/or zero values.  As long as the argument
  is all positive, we can instead compute the cumulative product as
  exp(cumsum(log(x))).  This function can be called identically to tf.cumprod.

  Args:
    x: Tensor to take the cumulative product of.
    *args: Passed on to cumsum; these are identical to those in cumprod.
    **kwargs: Passed on to cumsum; these are identical to those in cumprod.
  Returns:
    Cumulative product of x.
  """
  with ops.name_scope(None, "SafeCumprod", [x]):
    x = ops.convert_to_tensor(x, name="x")
    tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
    return math_ops.exp(math_ops.cumsum(
        math_ops.log(clip_ops.clip_by_value(x, tiny, 1)), *args, **kwargs)) 
Example #7
Source File: math_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _CumprodGrad(op, grad):
  x = op.inputs[0]
  axis = op.inputs[1]
  exclusive = op.get_attr("exclusive")
  reverse = op.get_attr("reverse")

  # TODO This fails when x contains 0 and should be fixed
  prod = math_ops.cumprod(x, axis, exclusive=exclusive, reverse=reverse)
  out = math_ops.cumsum(
      prod * grad, axis, exclusive=exclusive, reverse=not reverse)
  return [out / x, None] 
Example #8
Source File: math_grad.py    From keras-lambda with MIT License 5 votes vote down vote up
def _CumprodGrad(op, grad):
  x = op.inputs[0]
  axis = op.inputs[1]
  exclusive = op.get_attr("exclusive")
  reverse = op.get_attr("reverse")

  # TODO This fails when x contains 0 and should be fixed
  prod = math_ops.cumprod(x, axis, exclusive=exclusive, reverse=reverse)
  out = math_ops.cumsum(
      prod * grad, axis, exclusive=exclusive, reverse=not reverse)
  return [out / x, None] 
Example #9
Source File: math_grad.py    From keras-lambda with MIT License 5 votes vote down vote up
def _CumsumGrad(op, grad):
  axis = op.inputs[1]
  exclusive = op.get_attr("exclusive")
  reverse = op.get_attr("reverse")
  return [
      math_ops.cumsum(
          grad, axis, exclusive=exclusive, reverse=not reverse), None
  ] 
Example #10
Source File: math_grad.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _CumprodGrad(op, grad):
  x = op.inputs[0]
  axis = op.inputs[1]
  exclusive = op.get_attr("exclusive")
  reverse = op.get_attr("reverse")

  # TODO This fails when x contains 0 and should be fixed
  prod = math_ops.cumprod(x, axis, exclusive=exclusive, reverse=reverse)
  out = math_ops.cumsum(
      prod * grad, axis, exclusive=exclusive, reverse=not reverse)
  return [out / x, None] 
Example #11
Source File: math_grad.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _CumsumGrad(op, grad):
  axis = op.inputs[1]
  exclusive = op.get_attr("exclusive")
  reverse = op.get_attr("reverse")
  return [
      math_ops.cumsum(
          grad, axis, exclusive=exclusive, reverse=not reverse), None
  ] 
Example #12
Source File: backend.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def cumsum(x, axis=0):
  """Cumulative sum of the values in a tensor, alongside the specified axis.

  Arguments:
      x: A tensor or variable.
      axis: An integer, the axis to compute the sum.

  Returns:
      A tensor of the cumulative sum of values of `x` along `axis`.
  """
  return math_ops.cumsum(x, axis=axis) 
Example #13
Source File: math_grad.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _CumprodGrad(op, grad):
  x = op.inputs[0]
  axis = op.inputs[1]
  exclusive = op.get_attr("exclusive")
  reverse = op.get_attr("reverse")

  # TODO This fails when x contains 0 and should be fixed
  prod = math_ops.cumprod(x, axis, exclusive=exclusive, reverse=reverse)
  out = math_ops.cumsum(prod * grad, axis, exclusive=exclusive,
                        reverse=not reverse)
  return [out / x, None] 
Example #14
Source File: math_grad.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _CumsumGrad(op, grad):
  axis = op.inputs[1]
  exclusive = op.get_attr("exclusive")
  reverse = op.get_attr("reverse")
  return [math_ops.cumsum(grad, axis, exclusive=exclusive,
                          reverse=not reverse), None] 
Example #15
Source File: histogram_ops.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def _strict_1d_cumsum(tensor, len_tensor):
  """Cumsum of a 1D tensor with defined shape by padding and convolving."""
  # Assumes tensor shape is fully defined.
  return math_ops.cumsum(tensor)[:len_tensor] 
Example #16
Source File: math_grad.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _CumprodGrad(op, grad):
  x = op.inputs[0]
  axis = op.inputs[1]
  exclusive = op.get_attr("exclusive")
  reverse = op.get_attr("reverse")

  # TODO This fails when x contains 0 and should be fixed
  prod = math_ops.cumprod(x, axis, exclusive=exclusive, reverse=reverse)
  out = math_ops.cumsum(
      prod * grad, axis, exclusive=exclusive, reverse=not reverse)
  return [out / x, None] 
Example #17
Source File: math_grad.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _CumsumGrad(op, grad):
  axis = op.inputs[1]
  exclusive = op.get_attr("exclusive")
  reverse = op.get_attr("reverse")
  return [
      math_ops.cumsum(
          grad, axis, exclusive=exclusive, reverse=not reverse), None
  ] 
Example #18
Source File: backend.py    From lambda-packs with MIT License 5 votes vote down vote up
def cumsum(x, axis=0):
  """Cumulative sum of the values in a tensor, alongside the specified axis.

  Arguments:
      x: A tensor or variable.
      axis: An integer, the axis to compute the sum.

  Returns:
      A tensor of the cumulative sum of values of `x` along `axis`.
  """
  axis = _normalize_axis(axis, ndim(x))
  return math_ops.cumsum(x, axis=axis) 
Example #19
Source File: math_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _CumsumGrad(op, grad):
  axis = op.inputs[1]
  exclusive = op.get_attr("exclusive")
  reverse = op.get_attr("reverse")
  return [
      math_ops.cumsum(
          grad, axis, exclusive=exclusive, reverse=not reverse), None
  ] 
Example #20
Source File: pointer_ops.py    From text with Apache License 2.0 4 votes vote down vote up
def _broadcast_ragged_sources_for_overlap(source_start, source_limit,
                                          target_splits):
  """Repeats source indices for each target item in the same batch.

  Args:
    source_start: `<int>[batch_size, (source_size)]`
    source_limit: `<int>[batch_size, (source_size)]`
    target_splits: `<int64>[batch_size, (target_size+1)]`

  Returns:
    `<int>[batch_size, (source_size), (target_size)]`.
    A tuple of tensors `(tiled_source_start, tiled_source_limit)` where:

    * `tiled_target_start[b, s, t] = source_start[b, s]`
    * `tiled_target_limit[b, s, t] = source_limit[b, s]`
  """
  source_splits = source_start.row_splits
  target_rowlens = target_splits[1:] - target_splits[:-1]
  source_batch_ids = segment_id_ops.row_splits_to_segment_ids(source_splits)

  # <int64>[sum(source_size[b] for b in range(batch_size))]
  # source_repeats[i] is the number of target spans in the batch that contains
  # source span i.  We need to add a new ragged dimension that repeats each
  # source span this number of times.
  source_repeats = ragged_gather_ops.gather(target_rowlens, source_batch_ids)

  # <int64>[sum(source_size[b] for b in range(batch_size)) + 1]
  # The row_splits tensor for the inner ragged dimension of the result tensors.
  inner_splits = array_ops.concat([[0], math_ops.cumsum(source_repeats)],
                                  axis=0)

  # <int64>[sum(source_size[b] * target_size[b] for b in range(batch_size))]
  # Indices for gathering source indices.
  source_indices = segment_id_ops.row_splits_to_segment_ids(inner_splits)

  source_start = ragged_tensor.RaggedTensor.from_nested_row_splits(
      array_ops.gather(source_start.values, source_indices),
      [source_splits, inner_splits])
  source_limit = ragged_tensor.RaggedTensor.from_nested_row_splits(
      array_ops.gather(source_limit.values, source_indices),
      [source_splits, inner_splits])

  return source_start, source_limit 
Example #21
Source File: sparsemax.py    From lambda-packs with MIT License 4 votes vote down vote up
def sparsemax(logits, name=None):
  """Computes sparsemax activations [1].

  For each batch `i` and class `j` we have
    sparsemax[i, j] = max(logits[i, j] - tau(logits[i, :]), 0)

  [1]: https://arxiv.org/abs/1602.02068

  Args:
    logits: A `Tensor`. Must be one of the following types: `half`, `float32`,
      `float64`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `logits`.
  """

  with ops.name_scope(name, "sparsemax", [logits]) as name:
    logits = ops.convert_to_tensor(logits, name="logits")
    obs = array_ops.shape(logits)[0]
    dims = array_ops.shape(logits)[1]

    z = logits - math_ops.reduce_mean(logits, axis=1)[:, array_ops.newaxis]

    # sort z
    z_sorted, _ = nn.top_k(z, k=dims)

    # calculate k(z)
    z_cumsum = math_ops.cumsum(z_sorted, axis=1)
    k = math_ops.range(
        1, math_ops.cast(dims, logits.dtype) + 1, dtype=logits.dtype)
    z_check = 1 + k * z_sorted > z_cumsum
    # because the z_check vector is always [1,1,...1,0,0,...0] finding the
    # (index + 1) of the last `1` is the same as just summing the number of 1.
    k_z = math_ops.reduce_sum(math_ops.cast(z_check, dtypes.int32), axis=1)

    # calculate tau(z)
    indices = array_ops.stack([math_ops.range(0, obs), k_z - 1], axis=1)
    tau_sum = array_ops.gather_nd(z_cumsum, indices)
    tau_z = (tau_sum - 1) / math_ops.cast(k_z, logits.dtype)

    # calculate p
    return math_ops.maximum(
        math_ops.cast(0, logits.dtype), z - tau_z[:, array_ops.newaxis])