Python tensorflow.python.ops.math_ops.digamma() Examples

The following are 30 code examples of tensorflow.python.ops.math_ops.digamma(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.math_ops , or try the search function .
Example #1
Source File: gamma.py    From lambda-packs with MIT License 6 votes vote down vote up
def _kl_gamma_gamma(g0, g1, name=None):
  """Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma.

  Args:
    g0: instance of a Gamma distribution object.
    g1: instance of a Gamma distribution object.
    name: (optional) Name to use for created operations.
      Default is "kl_gamma_gamma".

  Returns:
    kl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1).
  """
  with ops.name_scope(name, "kl_gamma_gamma", values=[
      g0.concentration, g0.rate, g1.concentration, g1.rate]):
    # Result from:
    #   http://www.fil.ion.ucl.ac.uk/~wpenny/publications/densities.ps
    # For derivation see:
    #   http://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions   pylint: disable=line-too-long
    return (((g0.concentration - g1.concentration)
             * math_ops.digamma(g0.concentration))
            + math_ops.lgamma(g1.concentration)
            - math_ops.lgamma(g0.concentration)
            + g1.concentration * math_ops.log(g0.rate)
            - g1.concentration * math_ops.log(g1.rate)
            + g0.concentration * (g1.rate / g0.rate - 1.)) 
Example #2
Source File: gamma.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def _kl_gamma_gamma(g0, g1, name=None):
  """Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma.

  Args:
    g0: instance of a Gamma distribution object.
    g1: instance of a Gamma distribution object.
    name: (optional) Name to use for created operations.
      Default is "kl_gamma_gamma".

  Returns:
    kl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1).
  """
  with ops.name_scope(name, "kl_gamma_gamma", values=[
      g0.concentration, g0.rate, g1.concentration, g1.rate]):
    # Result from:
    #   http://www.fil.ion.ucl.ac.uk/~wpenny/publications/densities.ps
    # For derivation see:
    #   http://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions   pylint: disable=line-too-long
    return (((g0.concentration - g1.concentration)
             * math_ops.digamma(g0.concentration))
            + math_ops.lgamma(g1.concentration)
            - math_ops.lgamma(g0.concentration)
            + g1.concentration * math_ops.log(g0.rate)
            - g1.concentration * math_ops.log(g1.rate)
            + g0.concentration * (g1.rate / g0.rate - 1.)) 
Example #3
Source File: beta.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _kl_beta_beta(d1, d2, name=None):
  """Calculate the batched KL divergence KL(d1 || d2) with d1 and d2 Beta.

  Args:
    d1: instance of a Beta distribution object.
    d2: instance of a Beta distribution object.
    name: (optional) Name to use for created operations.
      default is "kl_beta_beta".

  Returns:
    Batchwise KL(d1 || d2)
  """
  inputs = [d1.a, d1.b, d1.a_b_sum, d2.a_b_sum]
  with ops.name_scope(name, "kl_beta_beta", inputs):
    # ln(B(a', b') / B(a, b))
    log_betas = (math_ops.lgamma(d2.a) + math_ops.lgamma(d2.b)
                - math_ops.lgamma(d2.a_b_sum) + math_ops.lgamma(d1.a_b_sum)
                - math_ops.lgamma(d1.a) - math_ops.lgamma(d1.b))
    # (a - a')*psi(a) + (b - b')*psi(b) + (a' - a + b' - b)*psi(a + b)
    digammas = ((d1.a - d2.a)*math_ops.digamma(d1.a)
              + (d1.b - d2.b)*math_ops.digamma(d1.b)
              + (d2.a_b_sum - d1.a_b_sum)*math_ops.digamma(d1.a_b_sum))
    return log_betas + digammas 
Example #4
Source File: gamma.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _kl_gamma_gamma(g0, g1, name=None):
  """Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma.

  Args:
    g0: instance of a Gamma distribution object.
    g1: instance of a Gamma distribution object.
    name: (optional) Name to use for created operations.
      Default is "kl_gamma_gamma".

  Returns:
    kl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1).
  """
  with ops.name_scope(name, "kl_gamma_gamma",
                      values=[g0.alpha, g0.beta, g1.alpha, g1.beta]):
    # Result from:
    #   http://www.fil.ion.ucl.ac.uk/~wpenny/publications/densities.ps
    # For derivation see:
    #   http://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions   pylint: disable=line-too-long
    return ((g0.alpha - g1.alpha) * math_ops.digamma(g0.alpha)
            + math_ops.lgamma(g1.alpha)
            - math_ops.lgamma(g0.alpha)
            + g1.alpha * math_ops.log(g0.beta)
            - g1.alpha * math_ops.log(g1.beta)
            + g0.alpha * (g1.beta / g0.beta - 1.)) 
Example #5
Source File: gamma.py    From keras-lambda with MIT License 6 votes vote down vote up
def _kl_gamma_gamma(g0, g1, name=None):
  """Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma.

  Args:
    g0: instance of a Gamma distribution object.
    g1: instance of a Gamma distribution object.
    name: (optional) Name to use for created operations.
      Default is "kl_gamma_gamma".

  Returns:
    kl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1).
  """
  with ops.name_scope(name, "kl_gamma_gamma",
                      values=[g0.alpha, g0.beta, g1.alpha, g1.beta]):
    # Result from:
    #   http://www.fil.ion.ucl.ac.uk/~wpenny/publications/densities.ps
    # For derivation see:
    #   http://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions   pylint: disable=line-too-long
    return ((g0.alpha - g1.alpha) * math_ops.digamma(g0.alpha)
            + math_ops.lgamma(g1.alpha)
            - math_ops.lgamma(g0.alpha)
            + g1.alpha * math_ops.log(g0.beta)
            - g1.alpha * math_ops.log(g1.beta)
            + g0.alpha * (g1.beta / g0.beta - 1.)) 
Example #6
Source File: beta.py    From keras-lambda with MIT License 6 votes vote down vote up
def _kl_beta_beta(d1, d2, name=None):
  """Calculate the batched KL divergence KL(d1 || d2) with d1 and d2 Beta.

  Args:
    d1: instance of a Beta distribution object.
    d2: instance of a Beta distribution object.
    name: (optional) Name to use for created operations.
      default is "kl_beta_beta".

  Returns:
    Batchwise KL(d1 || d2)
  """
  inputs = [d1.a, d1.b, d1.a_b_sum, d2.a_b_sum]
  with ops.name_scope(name, "kl_beta_beta", inputs):
    # ln(B(a', b') / B(a, b))
    log_betas = (math_ops.lgamma(d2.a) + math_ops.lgamma(d2.b)
                - math_ops.lgamma(d2.a_b_sum) + math_ops.lgamma(d1.a_b_sum)
                - math_ops.lgamma(d1.a) - math_ops.lgamma(d1.b))
    # (a - a')*psi(a) + (b - b')*psi(b) + (a' - a + b' - b)*psi(a + b)
    digammas = ((d1.a - d2.a)*math_ops.digamma(d1.a)
              + (d1.b - d2.b)*math_ops.digamma(d1.b)
              + (d2.a_b_sum - d1.a_b_sum)*math_ops.digamma(d1.a_b_sum))
    return log_betas + digammas 
Example #7
Source File: gamma.py    From lambda-packs with MIT License 5 votes vote down vote up
def _entropy(self):
    return (self.concentration
            - math_ops.log(self.rate)
            + math_ops.lgamma(self.concentration)
            + ((1. - self.concentration) *
               math_ops.digamma(self.concentration))) 
Example #8
Source File: student_t.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _entropy(self):
    u = array_ops.expand_dims(self.df * self._ones(), -1)
    v = array_ops.expand_dims(self._ones(), -1)
    beta_arg = array_ops.concat(len(u.get_shape()) - 1, [u, v]) / 2
    half_df = 0.5 * self.df
    return ((0.5 + half_df) * (math_ops.digamma(0.5 + half_df) -
                               math_ops.digamma(half_df)) +
            0.5 * math_ops.log(self.df) +
            special_math_ops.lbeta(beta_arg) +
            math_ops.log(self.sigma)) 
Example #9
Source File: dirichlet.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _entropy(self):
    entropy = special_math_ops.lbeta(self.alpha)
    entropy += math_ops.digamma(self.alpha_sum) * (
        self.alpha_sum - math_ops.cast(self.event_shape()[0], self.dtype))
    entropy += -math_ops.reduce_sum(
        (self.alpha - 1.) * math_ops.digamma(self.alpha),
        reduction_indices=[-1],
        keep_dims=False)
    return entropy 
Example #10
Source File: gamma.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _entropy(self):
    return (self.concentration
            - math_ops.log(self.rate)
            + math_ops.lgamma(self.concentration)
            + ((1. - self.concentration) *
               math_ops.digamma(self.concentration))) 
Example #11
Source File: core_test.py    From keras-lambda with MIT License 5 votes vote down vote up
def setUp(self):
    super(CoreUnaryOpsTest, self).setUp()

    self.ops = [
        ('abs', operator.abs, math_ops.abs, core.abs_function),
        ('neg', operator.neg, math_ops.negative, core.neg),
        # TODO(shoyer): add unary + to core TensorFlow
        ('pos', None, None, None),
        ('sign', None, math_ops.sign, core.sign),
        ('reciprocal', None, math_ops.reciprocal, core.reciprocal),
        ('square', None, math_ops.square, core.square),
        ('round', None, math_ops.round, core.round_function),
        ('sqrt', None, math_ops.sqrt, core.sqrt),
        ('rsqrt', None, math_ops.rsqrt, core.rsqrt),
        ('log', None, math_ops.log, core.log),
        ('exp', None, math_ops.exp, core.exp),
        ('log', None, math_ops.log, core.log),
        ('ceil', None, math_ops.ceil, core.ceil),
        ('floor', None, math_ops.floor, core.floor),
        ('cos', None, math_ops.cos, core.cos),
        ('sin', None, math_ops.sin, core.sin),
        ('tan', None, math_ops.tan, core.tan),
        ('acos', None, math_ops.acos, core.acos),
        ('asin', None, math_ops.asin, core.asin),
        ('atan', None, math_ops.atan, core.atan),
        ('lgamma', None, math_ops.lgamma, core.lgamma),
        ('digamma', None, math_ops.digamma, core.digamma),
        ('erf', None, math_ops.erf, core.erf),
        ('erfc', None, math_ops.erfc, core.erfc),
        ('lgamma', None, math_ops.lgamma, core.lgamma),
    ]
    total_size = np.prod([v.size for v in self.original_lt.axes.values()])
    self.test_lt = core.LabeledTensor(
        math_ops.cast(self.original_lt, dtypes.float32) / total_size,
        self.original_lt.axes) 
Example #12
Source File: beta.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _entropy(self):
    return (
        self._log_normalization()
        - (self.concentration1 - 1.) * math_ops.digamma(self.concentration1)
        - (self.concentration0 - 1.) * math_ops.digamma(self.concentration0)
        + ((self.total_concentration - 2.) *
           math_ops.digamma(self.total_concentration))) 
Example #13
Source File: beta.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _kl_beta_beta(d1, d2, name=None):
  """Calculate the batchwise KL divergence KL(d1 || d2) with d1 and d2 Beta.

  Args:
    d1: instance of a Beta distribution object.
    d2: instance of a Beta distribution object.
    name: (optional) Name to use for created operations.
      default is "kl_beta_beta".

  Returns:
    Batchwise KL(d1 || d2)
  """
  def delta(fn, is_property=True):
    fn1 = getattr(d1, fn)
    fn2 = getattr(d2, fn)
    return (fn2 - fn1) if is_property else (fn2() - fn1())
  with ops.name_scope(name, "kl_beta_beta", values=[
      d1.concentration1,
      d1.concentration0,
      d1.total_concentration,
      d2.concentration1,
      d2.concentration0,
      d2.total_concentration,
  ]):
    return (delta("_log_normalization", is_property=False)
            - math_ops.digamma(d1.concentration1) * delta("concentration1")
            - math_ops.digamma(d1.concentration0) * delta("concentration0")
            + (math_ops.digamma(d1.total_concentration)
               * delta("total_concentration"))) 
Example #14
Source File: student_t.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _entropy(self):
    v = array_ops.ones(self.batch_shape_tensor(),
                       dtype=self.dtype)[..., array_ops.newaxis]
    u = v * self.df[..., array_ops.newaxis]
    beta_arg = array_ops.concat([u, v], -1) / 2.
    return (math_ops.log(math_ops.abs(self.scale)) +
            0.5 * math_ops.log(self.df) +
            special_math_ops.lbeta(beta_arg) +
            0.5 * (self.df + 1.) *
            (math_ops.digamma(0.5 * (self.df + 1.)) -
             math_ops.digamma(0.5 * self.df))) 
Example #15
Source File: math_grad.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _LgammaGrad(op, grad):
  """Returns grad * digamma(x)."""
  x = op.inputs[0]
  with ops.control_dependencies([grad]):
    x = math_ops.conj(x)
    return grad * math_ops.digamma(x) 
Example #16
Source File: math_grad.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _DigammaGrad(op, grad):
  """Compute gradient of the digamma function with respect to its argument."""
  x = op.inputs[0]
  with ops.control_dependencies([grad]):
    x = math_ops.conj(x)
    return grad * math_ops.polygamma(array_ops.constant(1, dtype=x.dtype), x) 
Example #17
Source File: dirichlet.py    From keras-lambda with MIT License 5 votes vote down vote up
def _entropy(self):
    entropy = special_math_ops.lbeta(self.alpha)
    entropy += math_ops.digamma(self.alpha_sum) * (
        self.alpha_sum - math_ops.cast(self.event_shape()[0], self.dtype))
    entropy += -math_ops.reduce_sum(
        (self.alpha - 1.) * math_ops.digamma(self.alpha),
        reduction_indices=[-1],
        keep_dims=False)
    return entropy 
Example #18
Source File: math_grad.py    From keras-lambda with MIT License 5 votes vote down vote up
def _LgammaGrad(op, grad):
  """Returns grad * digamma(x)."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * math_ops.digamma(x) 
Example #19
Source File: math_grad.py    From keras-lambda with MIT License 5 votes vote down vote up
def _DigammaGrad(op, grad):
  """Compute gradient of the digamma function with respect to its argument."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * math_ops.polygamma(array_ops.constant(1, dtype=x.dtype), x) 
Example #20
Source File: gamma.py    From keras-lambda with MIT License 5 votes vote down vote up
def _entropy(self):
    return (self.alpha -
            math_ops.log(self.beta) +
            math_ops.lgamma(self.alpha) +
            (1. - self.alpha) * math_ops.digamma(self.alpha)) 
Example #21
Source File: student_t.py    From keras-lambda with MIT License 5 votes vote down vote up
def _entropy(self):
    v = array_ops.ones(self.batch_shape(), dtype=self.dtype)[..., None]
    u = v * self.df[..., None]
    beta_arg = array_ops.concat([u, v], -1) / 2.
    return (math_ops.log(math_ops.abs(self.sigma)) +
            0.5 * math_ops.log(self.df) +
            special_math_ops.lbeta(beta_arg) +
            0.5 * (self.df + 1.) *
            (math_ops.digamma(0.5 * (self.df + 1.)) -
             math_ops.digamma(0.5 * self.df))) 
Example #22
Source File: inverse_gamma.py    From keras-lambda with MIT License 5 votes vote down vote up
def _entropy(self):
    return (self.alpha +
            math_ops.log(self.beta) +
            math_ops.lgamma(self.alpha) -
            (1. + self.alpha) * math_ops.digamma(self.alpha)) 
Example #23
Source File: gamma.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _entropy(self):
    return (self.alpha -
            math_ops.log(self.beta) +
            math_ops.lgamma(self.alpha) +
            (1. - self.alpha) * math_ops.digamma(self.alpha)) 
Example #24
Source File: beta.py    From lambda-packs with MIT License 5 votes vote down vote up
def _entropy(self):
    return (
        self._log_normalization()
        - (self.concentration1 - 1.) * math_ops.digamma(self.concentration1)
        - (self.concentration0 - 1.) * math_ops.digamma(self.concentration0)
        + ((self.total_concentration - 2.) *
           math_ops.digamma(self.total_concentration))) 
Example #25
Source File: beta.py    From lambda-packs with MIT License 5 votes vote down vote up
def _kl_beta_beta(d1, d2, name=None):
  """Calculate the batchwise KL divergence KL(d1 || d2) with d1 and d2 Beta.

  Args:
    d1: instance of a Beta distribution object.
    d2: instance of a Beta distribution object.
    name: (optional) Name to use for created operations.
      default is "kl_beta_beta".

  Returns:
    Batchwise KL(d1 || d2)
  """
  def delta(fn, is_property=True):
    fn1 = getattr(d1, fn)
    fn2 = getattr(d2, fn)
    return (fn2 - fn1) if is_property else (fn2() - fn1())
  with ops.name_scope(name, "kl_beta_beta", values=[
      d1.concentration1,
      d1.concentration0,
      d1.total_concentration,
      d2.concentration1,
      d2.concentration0,
      d2.total_concentration,
  ]):
    return (delta("_log_normalization", is_property=False)
            - math_ops.digamma(d1.concentration1) * delta("concentration1")
            - math_ops.digamma(d1.concentration0) * delta("concentration0")
            + (math_ops.digamma(d1.total_concentration)
               * delta("total_concentration"))) 
Example #26
Source File: student_t.py    From lambda-packs with MIT License 5 votes vote down vote up
def _entropy(self):
    v = array_ops.ones(self.batch_shape_tensor(),
                       dtype=self.dtype)[..., array_ops.newaxis]
    u = v * self.df[..., array_ops.newaxis]
    beta_arg = array_ops.concat([u, v], -1) / 2.
    return (math_ops.log(math_ops.abs(self.scale)) +
            0.5 * math_ops.log(self.df) +
            special_math_ops.lbeta(beta_arg) +
            0.5 * (self.df + 1.) *
            (math_ops.digamma(0.5 * (self.df + 1.)) -
             math_ops.digamma(0.5 * self.df))) 
Example #27
Source File: math_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _LgammaGrad(op, grad):
  """Returns grad * digamma(x)."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * math_ops.digamma(x) 
Example #28
Source File: math_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _DigammaGrad(op, grad):
  """Compute gradient of the digamma function with respect to its argument."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * math_ops.polygamma(array_ops.constant(1, dtype=x.dtype), x) 
Example #29
Source File: inverse_gamma.py    From lambda-packs with MIT License 5 votes vote down vote up
def _entropy(self):
    return (self.concentration
            + math_ops.log(self.rate)
            + math_ops.lgamma(self.concentration)
            - ((1. + self.concentration) *
               math_ops.digamma(self.concentration))) 
Example #30
Source File: math_grad.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _LgammaGrad(op, grad):
  """Returns grad * digamma(x)."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * math_ops.digamma(x)