Python tensorflow.python.ops.math_ops.log1p() Examples

The following are 28 code examples of tensorflow.python.ops.math_ops.log1p(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.math_ops , or try the search function .
Example #1
Source File: laplace.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def _sample_n(self, n, seed=None):
    shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
    # Uniform variates must be sampled from the open-interval `(-1, 1)` rather
    # than `[-1, 1)`. In the case of `(0, 1)` we'd use
    # `np.finfo(self.dtype.as_numpy_dtype).tiny` because it is the smallest,
    # positive, "normal" number. However, the concept of subnormality exists
    # only at zero; here we need the smallest usable number larger than -1,
    # i.e., `-1 + eps/2`.
    uniform_samples = random_ops.random_uniform(
        shape=shape,
        minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
                            self.dtype.as_numpy_dtype(0.)),
        maxval=1.,
        dtype=self.dtype,
        seed=seed)
    return (self.loc - self.scale * math_ops.sign(uniform_samples) *
            math_ops.log1p(-math_ops.abs(uniform_samples))) 
Example #2
Source File: logistic.py    From lambda-packs with MIT License 6 votes vote down vote up
def _sample_n(self, n, seed=None):
    # Uniform variates must be sampled from the open-interval `(0, 1)` rather
    # than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
    # because it is the smallest, positive, "normal" number. A "normal" number
    # is such that the mantissa has an implicit leading 1. Normal, positive
    # numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
    # this case, a subnormal number (i.e., np.nextafter) can cause us to sample
    # 0.
    uniform = random_ops.random_uniform(
        shape=array_ops.concat([[n], self.batch_shape_tensor()], 0),
        minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
        maxval=1.,
        dtype=self.dtype,
        seed=seed)
    sampled = math_ops.log(uniform) - math_ops.log1p(-1. * uniform)
    return sampled * self.scale + self.loc 
Example #3
Source File: geometric.py    From lambda-packs with MIT License 6 votes vote down vote up
def _sample_n(self, n, seed=None):
    # Uniform variates must be sampled from the open-interval `(0, 1)` rather
    # than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
    # because it is the smallest, positive, "normal" number. A "normal" number
    # is such that the mantissa has an implicit leading 1. Normal, positive
    # numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
    # this case, a subnormal number (i.e., np.nextafter) can cause us to sample
    # 0.
    sampled = random_ops.random_uniform(
        array_ops.concat([[n], array_ops.shape(self._probs)], 0),
        minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
        maxval=1.,
        seed=seed,
        dtype=self.dtype)

    return math_ops.floor(
        math_ops.log(sampled) / math_ops.log1p(-self.probs)) 
Example #4
Source File: student_t.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _log_unnormalized_prob(self, x):
    y = (x - self.mu) / self.sigma  # Abs(sigma) superfluous.
    return -0.5 * (self.df + 1.) * math_ops.log1p(y**2. / self.df) 
Example #5
Source File: bijector.py    From keras-lambda with MIT License 5 votes vote down vote up
def _forward_log_det_jacobian(self, x):
    x = self._maybe_assert_valid_x(x)
    if self.shaper is None:
      raise ValueError("Jacobian cannot be computed with unknown event_ndims")
    _, _, event_dims = self.shaper.get_dims(x)
    if self.power == 0.:
      return math_ops.reduce_sum(x, reduction_indices=event_dims)
    return (1. / self.power - 1.) * math_ops.reduce_sum(
        math_ops.log1p(x * self.power),
        reduction_indices=event_dims) 
Example #6
Source File: bijector.py    From keras-lambda with MIT License 5 votes vote down vote up
def _forward(self, x):
    x = self._maybe_assert_valid_x(x)
    if self.power == 0.:
      return math_ops.exp(x)
    # TODO(jvdillon): If large x accuracy is an issue, consider using
    # (1. + x * self.power)**(1. / self.power) when x >> 1.
    return math_ops.exp(math_ops.log1p(x * self.power) / self.power) 
Example #7
Source File: student_t.py    From keras-lambda with MIT License 5 votes vote down vote up
def _log_unnormalized_prob(self, x):
    y = (x - self.mu) / self.sigma  # Abs(sigma) superfluous.
    return -0.5 * (self.df + 1.) * math_ops.log1p(y**2. / self.df) 
Example #8
Source File: student_t.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _log_unnormalized_prob(self, x):
    y = (x - self.loc) / self.scale  # Abs(scale) superfluous.
    return -0.5 * (self.df + 1.) * math_ops.log1p(y**2. / self.df) 
Example #9
Source File: distribution.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _call_log_survival_function(self, value, name, **kwargs):
    with self._name_scope(name, values=[value]):
      value = ops.convert_to_tensor(value, name="value")
      try:
        return self._log_survival_function(value, **kwargs)
      except NotImplementedError as original_exception:
        try:
          return math_ops.log1p(-self.cdf(value, **kwargs))
        except NotImplementedError:
          raise original_exception 
Example #10
Source File: beta.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _log_unnormalized_prob(self, x):
    x = self._maybe_assert_valid_sample(x)
    return ((self.concentration1 - 1.) * math_ops.log(x)
            + (self.concentration0 - 1.) * math_ops.log1p(-x)) 
Example #11
Source File: metrics.py    From ULTRA with Apache License 2.0 5 votes vote down vote up
def discounted_cumulative_gain(labels,
                               predictions,
                               weights=None,
                               topn=None,
                               name=None):
    """Computes discounted cumulative gain (DCG).

    Args:
      labels: A `Tensor` of the same shape as `predictions`.
      predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
        the ranking score of the corresponding example.
      weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
        former case is per-example and the latter case is per-list.
      topn: A cutoff for how many examples to consider for this metric.
      name: A string used as the name for this metric.

    Returns:
      A metric for the weighted discounted cumulative gain of the batch.
    """
    with ops.name_scope(name, 'discounted_cumulative_gain',
                        (labels, predictions, weights)):
        labels, predictions, weights, topn = _prepare_and_validate_params(
            labels, predictions, weights, topn)
        sorted_labels, sorted_weights = utils.sort_by_scores(
            predictions, [labels, weights], topn=topn)
        dcg = _discounted_cumulative_gain(sorted_labels,
                                          sorted_weights) * math_ops.log1p(1.0)
        per_list_weights = _per_example_weights_to_per_list_weights(
            weights=weights,
            relevance=math_ops.pow(2.0, math_ops.to_float(labels)) - 1.0)
        return math_ops.reduce_mean(
            _safe_div(dcg, per_list_weights) * per_list_weights) 
Example #12
Source File: customlayers.py    From AiGEM_TeamHeidelberg2017 with MIT License 5 votes vote down vote up
def focal_loss_alpha(labels=[], logits=[], pos_weights=[], gamma=2., clips=[], name='focal_loss'):
    """
    Add focal loss weigths to the wigthted sigmoid cross entropy
    :return:
    """
    batchsize = labels.get_shape().as_list()[0]
    n_classes = labels.get_shape().as_list()[1]

    with tf.variable_scope(name) as vs:
        # first get a sigmoid to determine the focal loss weigths:
        sigmoid_logits = tf.nn.sigmoid(logits)
        # determine the focal loss weigths:
        labels = math_ops.to_float(labels)
        sigmoid_logits.get_shape().assert_is_compatible_with(labels.get_shape())
        preds = array_ops.where(math_ops.equal(labels, 1.), sigmoid_logits, 1. - sigmoid_logits)
        focal_weights = (math_ops.subtract(1., preds)) ** gamma
        print(focal_weights)

        # clip the weights at E-3 and E3
        up_clip = math_ops.multiply(tf.ones([batchsize, n_classes]), clips[1])
        low_clip = math_ops.multiply(tf.ones([batchsize, n_classes]), clips[0])
        focal_weights = array_ops.where(math_ops.greater(focal_weights, clips[1]), up_clip, focal_weights)
        focal_weights = array_ops.where(math_ops.less(focal_weights, clips[0]), low_clip, focal_weights)
        log_weight = 1. + (pos_weights - 1.) * labels

        # now put them into a weighted softmax ce:
        loss = math_ops.multiply(math_ops.add((1. - labels) * logits,
                         log_weight * (math_ops.log1p(math_ops.exp(-math_ops.abs(logits))) + nn_ops.relu(-logits))),
                                 focal_weights, name='sc_entropy')
        return loss 
Example #13
Source File: customlayers.py    From AiGEM_TeamHeidelberg2017 with MIT License 5 votes vote down vote up
def focal_loss(labels=[], logits=[], pos_weights=[], gamma=2., clips=[], name='focal_loss'):
    """
    Add focal loss weigths to the wigthted sigmoid cross entropy
    :return:
    """
    batchsize = labels.get_shape().as_list()[0]
    n_classes = labels.get_shape().as_list()[1]

    with tf.variable_scope(name) as vs:
        # first get a sigmoid to determine the focal loss weigths:
        sigmoid_logits = tf.nn.sigmoid(logits)
        # determine the focal loss weigths:
        labels = math_ops.to_float(labels)
        sigmoid_logits.get_shape().assert_is_compatible_with(labels.get_shape())
        preds = array_ops.where(math_ops.equal(labels, 1.), sigmoid_logits, 1. - sigmoid_logits)
        focal_weights = (math_ops.subtract(1., preds)) ** gamma
        print(focal_weights)

        # clip the weights at E-3 and E3
        up_clip = math_ops.multiply(tf.ones([batchsize, n_classes]), clips[1])
        low_clip = math_ops.multiply(tf.ones([batchsize, n_classes]), clips[0])
        focal_weights = array_ops.where(math_ops.greater(focal_weights, clips[1]), up_clip, focal_weights)
        focal_weights = array_ops.where(math_ops.less(focal_weights, clips[0]), low_clip, focal_weights)
        log_weight = 1. + (pos_weights - 1.) * labels

        # now put them into a weighted softmax ce:
        loss = math_ops.multiply(math_ops.add((1. - labels) * logits,
                        log_weight * (math_ops.log1p(math_ops.exp(-math_ops.abs(logits))) + nn_ops.relu(-logits))),
               focal_weights, name='sc_entropy')
        return loss 
Example #14
Source File: bijector.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _forward_log_det_jacobian(self, x):
    x = self._maybe_assert_valid_x(x)
    if self.shaper is None:
      raise ValueError("Jacobian cannot be computed with unknown event_ndims")
    _, _, event_dims = self.shaper.get_dims(x)
    if self.power == 0.:
      return math_ops.reduce_sum(x, reduction_indices=event_dims)
    return (1. / self.power - 1.) * math_ops.reduce_sum(
        math_ops.log1p(x * self.power),
        reduction_indices=event_dims) 
Example #15
Source File: bijector.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _forward(self, x):
    x = self._maybe_assert_valid_x(x)
    if self.power == 0.:
      return math_ops.exp(x)
    # TODO(jvdillon): If large x accuracy is an issue, consider using
    # (1. + x * self.power)**(1. / self.power) when x >> 1.
    return math_ops.exp(math_ops.log1p(x * self.power) / self.power) 
Example #16
Source File: negative_binomial.py    From lambda-packs with MIT License 5 votes vote down vote up
def _log_unnormalized_prob(self, positive_counts):
    if self.validate_args:
      positive_counts = distribution_util.embed_check_nonnegative_discrete(
          positive_counts, check_integer=True)
    return self.total_count * math_ops.log1p(
        -self.probs) + positive_counts * math_ops.log(self.probs) 
Example #17
Source File: binomial.py    From lambda-packs with MIT License 5 votes vote down vote up
def _log_unnormalized_prob(self, counts):
    counts = self._maybe_assert_valid_sample(counts)
    return (counts * math_ops.log(self.probs) +
            (self.total_count - counts) * math_ops.log1p(-self.probs)) 
Example #18
Source File: sigmoid_impl.py    From lambda-packs with MIT License 5 votes vote down vote up
def _inverse_log_det_jacobian(self, y):
    return -math_ops.log(y) - math_ops.log1p(-y) 
Example #19
Source File: sigmoid_impl.py    From lambda-packs with MIT License 5 votes vote down vote up
def _inverse(self, y):
    return math_ops.log(y) - math_ops.log1p(-y) 
Example #20
Source File: power_transform_impl.py    From lambda-packs with MIT License 5 votes vote down vote up
def _forward(self, x):
    x = self._maybe_assert_valid_x(x)
    if self.power == 0.:
      return math_ops.exp(x)
    # If large x accuracy is an issue, consider using:
    # (1. + x * self.power)**(1. / self.power) when x >> 1.
    return math_ops.exp(math_ops.log1p(x * self.power) / self.power) 
Example #21
Source File: geometric.py    From lambda-packs with MIT License 5 votes vote down vote up
def _log_prob(self, counts):
    if self.validate_args:
      counts = distribution_util.embed_check_nonnegative_discrete(
          counts, check_integer=True)
    counts *= array_ops.ones_like(self.probs)
    probs = self.probs * array_ops.ones_like(counts)

    safe_domain = array_ops.where(
        math_ops.equal(counts, 0.),
        array_ops.zeros_like(probs),
        probs)
    return counts * math_ops.log1p(-safe_domain) + math_ops.log(probs) 
Example #22
Source File: geometric.py    From lambda-packs with MIT License 5 votes vote down vote up
def _cdf(self, counts):
    if self.validate_args:
      # We set `check_integer=False` since the CDF is defined on whole real
      # line.
      counts = math_ops.floor(
          distribution_util.embed_check_nonnegative_discrete(
              counts, check_integer=False))
    counts *= array_ops.ones_like(self.probs)
    return array_ops.where(
        counts < 0.,
        array_ops.zeros_like(counts),
        -math_ops.expm1(
            (counts + 1) * math_ops.log1p(-self.probs))) 
Example #23
Source File: student_t.py    From lambda-packs with MIT License 5 votes vote down vote up
def _log_unnormalized_prob(self, x):
    y = (x - self.loc) / self.scale  # Abs(scale) superfluous.
    return -0.5 * (self.df + 1.) * math_ops.log1p(y**2. / self.df) 
Example #24
Source File: distribution.py    From lambda-packs with MIT License 5 votes vote down vote up
def _call_log_survival_function(self, value, name, **kwargs):
    with self._name_scope(name, values=[value]):
      value = ops.convert_to_tensor(value, name="value")
      try:
        return self._log_survival_function(value, **kwargs)
      except NotImplementedError as original_exception:
        try:
          return math_ops.log1p(-self.cdf(value, **kwargs))
        except NotImplementedError:
          raise original_exception 
Example #25
Source File: beta.py    From lambda-packs with MIT License 5 votes vote down vote up
def _log_unnormalized_prob(self, x):
    x = self._maybe_assert_valid_sample(x)
    return ((self.concentration1 - 1.) * math_ops.log(x)
            + (self.concentration0 - 1.) * math_ops.log1p(-x)) 
Example #26
Source File: util.py    From lambda-packs with MIT License 4 votes vote down vote up
def get_logits_and_probs(logits=None,
                         probs=None,
                         multidimensional=False,
                         validate_args=False,
                         name="get_logits_and_probs"):
  """Converts logit to probabilities (or vice-versa), and returns both.

  Args:
    logits: Floating-point `Tensor` representing log-odds.
    probs: Floating-point `Tensor` representing probabilities.
    multidimensional: Python `bool`, default `False`.
      If `True`, represents whether the last dimension of `logits` or `probs`,
      a `[N1, N2, ...  k]` dimensional tensor, representing the
      logit or probability of `shape[-1]` classes.
    validate_args: Python `bool`, default `False`. When `True`, either assert
      `0 <= probs <= 1` (if not `multidimensional`) or that the last dimension
      of `probs` sums to one.
    name: A name for this operation (optional).

  Returns:
    logits, probs: Tuple of `Tensor`s. If `probs` has an entry that is `0` or
      `1`, then the corresponding entry in the returned logit will be `-Inf` and
      `Inf` respectively.

  Raises:
    ValueError: if neither `probs` nor `logits` were passed in, or both were.
  """
  with ops.name_scope(name, values=[probs, logits]):
    if (probs is None) == (logits is None):
      raise ValueError("Must pass probs or logits, but not both.")

    if probs is None:
      logits = ops.convert_to_tensor(logits, name="logits")
      if multidimensional:
        return logits, nn.softmax(logits, name="probs")
      return logits, math_ops.sigmoid(logits, name="probs")

    probs = ops.convert_to_tensor(probs, name="probs")
    if validate_args:
      with ops.name_scope("validate_probs"):
        one = constant_op.constant(1., probs.dtype)
        dependencies = [check_ops.assert_non_negative(probs)]
        if multidimensional:
          dependencies += [assert_close(math_ops.reduce_sum(probs, -1), one,
                                        message="probs does not sum to 1.")]
        else:
          dependencies += [check_ops.assert_less_equal(
              probs, one, message="probs has components greater than 1.")]
        probs = control_flow_ops.with_dependencies(dependencies, probs)

    with ops.name_scope("logits"):
      if multidimensional:
        # Here we don't compute the multidimensional case, in a manner
        # consistent with respect to the unidimensional case. We do so
        # following the TF convention. Typically, you might expect to see
        # logits = log(probs) - log(probs[pivot]). A side-effect of
        # being consistent with the TF approach is that the unidimensional case
        # implicitly handles the second dimension but the multidimensional case
        # explicitly keeps the pivot dimension.
        return math_ops.log(probs), probs
      return math_ops.log(probs) - math_ops.log1p(-1. * probs), probs 
Example #27
Source File: special_math.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 4 votes vote down vote up
def log_cdf_laplace(x, name="log_cdf_laplace"):
  """Log Laplace distribution function.

  This function calculates `Log[L(x)]`, where `L(x)` is the cumulative
  distribution function of the Laplace distribution, i.e.

  ```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt```

  For numerical accuracy, `L(x)` is computed in different ways depending on `x`,

  ```
  x <= 0:
    Log[L(x)] = Log[0.5] + x, which is exact

  0 < x:
    Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact
  ```

  Args:
    x: `Tensor` of type `float32`, `float64`.
    name: Python string. A name for the operation (default="log_ndtr").

  Returns:
    `Tensor` with `dtype=x.dtype`.

  Raises:
    TypeError: if `x.dtype` is not handled.
  """

  with ops.name_scope(name, values=[x]):
    x = ops.convert_to_tensor(x, name="x")

    # For x < 0, L(x) = 0.5 * exp{x} exactly, so Log[L(x)] = log(0.5) + x.
    lower_solution = -np.log(2.) + x

    # safe_exp_neg_x = exp{-x} for x > 0, but is
    # bounded above by 1, which avoids
    #   log[1 - 1] = -inf for x = log(1/2), AND
    #   exp{-x} --> inf, for x << -1
    safe_exp_neg_x = math_ops.exp(-math_ops.abs(x))

    # log1p(z) = log(1 + z) approx z for |z| << 1. This approxmation is used
    # internally by log1p, rather than being done explicitly here.
    upper_solution = math_ops.log1p(-0.5 * safe_exp_neg_x)

    return array_ops.where(x < 0., lower_solution, upper_solution) 
Example #28
Source File: special_math.py    From lambda-packs with MIT License 4 votes vote down vote up
def log_cdf_laplace(x, name="log_cdf_laplace"):
  """Log Laplace distribution function.

  This function calculates `Log[L(x)]`, where `L(x)` is the cumulative
  distribution function of the Laplace distribution, i.e.

  ```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt```

  For numerical accuracy, `L(x)` is computed in different ways depending on `x`,

  ```
  x <= 0:
    Log[L(x)] = Log[0.5] + x, which is exact

  0 < x:
    Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact
  ```

  Args:
    x: `Tensor` of type `float32`, `float64`.
    name: Python string. A name for the operation (default="log_ndtr").

  Returns:
    `Tensor` with `dtype=x.dtype`.

  Raises:
    TypeError: if `x.dtype` is not handled.
  """

  with ops.name_scope(name, values=[x]):
    x = ops.convert_to_tensor(x, name="x")

    # For x < 0, L(x) = 0.5 * exp{x} exactly, so Log[L(x)] = log(0.5) + x.
    lower_solution = -np.log(2.) + x

    # safe_exp_neg_x = exp{-x} for x > 0, but is
    # bounded above by 1, which avoids
    #   log[1 - 1] = -inf for x = log(1/2), AND
    #   exp{-x} --> inf, for x << -1
    safe_exp_neg_x = math_ops.exp(-math_ops.abs(x))

    # log1p(z) = log(1 + z) approx z for |z| << 1. This approxmation is used
    # internally by log1p, rather than being done explicitly here.
    upper_solution = math_ops.log1p(-0.5 * safe_exp_neg_x)

    return array_ops.where(x < 0., lower_solution, upper_solution)