Python scipy.special.log_ndtr() Examples

The following are 15 code examples of scipy.special.log_ndtr(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.special , or try the search function .
Example #1
Source File: special_math_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _test_grid_log(self, dtype, grid_spec, error_spec):
    with self.test_session():
      grid = _make_grid(dtype, grid_spec)
      actual = sm.log_ndtr(grid).eval()

      # Basic tests.
      self.assertTrue(np.isfinite(actual).all())
      # On the grid, -inf < log_cdf(x) < 0.  In this case, we should be able
      # to use a huge grid because we have used tricks to escape numerical
      # difficulties.
      self.assertTrue((actual < 0).all())
      _check_strictly_increasing(actual)

      # Versus scipy.
      expected = special.log_ndtr(grid)
      # Scipy prematurely goes to zero at some places that we don't.  So don't
      # include these in the comparison.
      self.assertAllClose(expected.astype(np.float64)[expected < 0],
                          actual.astype(np.float64)[expected < 0],
                          rtol=error_spec.rtol, atol=error_spec.atol) 
Example #2
Source File: rdp_accountant.py    From privacy with Apache License 2.0 6 votes vote down vote up
def _log_erfc(x):
  """Compute log(erfc(x)) with high accuracy for large x."""
  try:
    return math.log(2) + special.log_ndtr(-x * 2**.5)
  except NameError:
    # If log_ndtr is not available, approximate as follows:
    r = special.erfc(x)
    if r == 0.0:
      # Using the Laurent series at infinity for the tail of the erfc function:
      #     erfc(x) ~ exp(-x^2-.5/x^2+.625/x^4)/(x*pi^.5)
      # To verify in Mathematica:
      #     Series[Log[Erfc[x]] + Log[x] + Log[Pi]/2 + x^2, {x, Infinity, 6}]
      return (-math.log(math.pi) / 2 - math.log(x) - x**2 - .5 * x**-2 +
              .625 * x**-4 - 37. / 24. * x**-6 + 353. / 64. * x**-8)
    else:
      return math.log(r) 
Example #3
Source File: rdp_accountant.py    From models with Apache License 2.0 6 votes vote down vote up
def _log_erfc(x):
  """Compute log(erfc(x)) with high accuracy for large x."""
  try:
    return math.log(2) + special.log_ndtr(-x * 2**.5)
  except NameError:
    # If log_ndtr is not available, approximate as follows:
    r = special.erfc(x)
    if r == 0.0:
      # Using the Laurent series at infinity for the tail of the erfc function:
      #     erfc(x) ~ exp(-x^2-.5/x^2+.625/x^4)/(x*pi^.5)
      # To verify in Mathematica:
      #     Series[Log[Erfc[x]] + Log[x] + Log[Pi]/2 + x^2, {x, Infinity, 6}]
      return (-math.log(math.pi) / 2 - math.log(x) - x**2 - .5 * x**-2 +
              .625 * x**-4 - 37. / 24. * x**-6 + 353. / 64. * x**-8)
    else:
      return math.log(r) 
Example #4
Source File: _continuous_distns.py    From lambda-packs with MIT License 5 votes vote down vote up
def _norm_logcdf(x):
    return sc.log_ndtr(x) 
Example #5
Source File: test_log_ndtr.py    From chainer with MIT License 5 votes vote down vote up
def _log_ndtr_cpu(x, dtype):
    from scipy import special
    return special.log_ndtr(x).astype(dtype) 
Example #6
Source File: log_ndtr.py    From chainer with MIT License 5 votes vote down vote up
def label(self):
        return 'log_ndtr' 
Example #7
Source File: log_ndtr.py    From chainer with MIT License 5 votes vote down vote up
def forward_cpu(self, x):
        global _log_ndtr_cpu
        if _log_ndtr_cpu is None:
            try:
                from scipy import special
                _log_ndtr_cpu = special.log_ndtr
            except ImportError:
                raise ImportError('SciPy is not available. Forward computation'
                                  ' of log_ndtr can not be done.')

        self.retain_inputs((0,))
        return utils.force_array(_log_ndtr_cpu(x[0]), dtype=x[0].dtype), 
Example #8
Source File: log_ndtr.py    From chainer with MIT License 5 votes vote down vote up
def log_ndtr(x):
    """Logarithm of cumulative distribution function of normal distribution.

    .. note::
       Forward computation in CPU can not be done if
       `SciPy <https://www.scipy.org/>`_ is not available.

    Args:
        x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.

    Returns:
        ~chainer.Variable: Output variable.
    """
    return LogNdtr().apply((x,))[0] 
Example #9
Source File: _continuous_distns.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def _norm_logcdf(x):
    return sc.log_ndtr(x) 
Example #10
Source File: test_mpmath.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_log_ndtr(self):
        assert_mpmath_equal(sc.log_ndtr,
                            exception_to_nan(lambda z: mpmath.log(mpmath.ncdf(z))),
                            [Arg()], n=600, dps=300) 
Example #11
Source File: test_mpmath.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_log_ndtr_complex(self):
        assert_mpmath_equal(sc.log_ndtr,
                            exception_to_nan(lambda z: mpmath.log(mpmath.erfc(-z/np.sqrt(2.))/2.)),
                            [ComplexArg(a=complex(-10000, -100),
                                        b=complex(10000, 100))], n=200, dps=300) 
Example #12
Source File: special_math_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _test_grad_finite(self, dtype):
    with self.test_session():
      x = tf.Variable([-100., 0., 100.], dtype=dtype)
      output = (sm.log_ndtr(x) if self._use_log else sm.ndtr(x))
      grad_output = tf.gradients(output, x)
      tf.global_variables_initializer().run()
      self.assert_all_true(np.isfinite(output.eval()))
      self.assert_all_true(np.isfinite(grad_output[0].eval())) 
Example #13
Source File: _continuous_distns.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def _norm_logcdf(x):
    return sc.log_ndtr(x) 
Example #14
Source File: privacy_analysis.py    From pytorch-dp with Apache License 2.0 5 votes vote down vote up
def _log_erfc(x):
    """Compute log(erfc(x)) with high accuracy for large x."""
    return math.log(2) + special.log_ndtr(-x * 2 ** 0.5) 
Example #15
Source File: special_math_test.py    From deep_image_model with Apache License 2.0 4 votes vote down vote up
def _test_grad_accuracy(self, dtype, grid_spec, error_spec):
    raw_grid = _make_grid(dtype, grid_spec)
    grid = tf.convert_to_tensor(raw_grid)
    with self.test_session():
      fn = sm.log_ndtr if self._use_log else sm.ndtr

      # If there are N points in the grid,
      # grad_eval.shape = (N, N), with grad_eval[i, j] the partial derivative of
      # the ith output point w.r.t. the jth grid point.  We only expect the
      # diagonal to be nonzero.
      # TODO(b/31131137): Replace tf.test.compute_gradient with our own custom
      # gradient evaluation to ensure we correctly handle small function delta.
      grad_eval, _ = tf.test.compute_gradient(
          grid, grid_spec.shape, fn(grid), grid_spec.shape)
      grad_eval = np.diag(grad_eval)

      # Check for NaN separately in order to get informative failures.
      self.assert_all_false(np.isnan(grad_eval))
      self.assert_all_true(grad_eval > 0.)
      self.assert_all_true(np.isfinite(grad_eval))

      # Do the same checks but explicitly compute the gradient.
      # (We did this because we're not sure if we trust
      # tf.test.compute_gradient.)
      grad_eval = tf.gradients(fn(grid), grid)[0].eval()
      self.assert_all_false(np.isnan(grad_eval))
      if self._use_log:
        g = np.reshape(grad_eval, [-1])
        half = np.ceil(len(g)/2)
        self.assert_all_true(g[:half] > 0.)
        self.assert_all_true(g[half:] >= 0.)
      else:
        # The ndtr gradient will only be non-zero in the range [-14, 14] for
        # float32 and [-38, 38] for float64.
        self.assert_all_true(grad_eval >= 0.)
      self.assert_all_true(np.isfinite(grad_eval))

      # Versus scipy.
      expected = stats.norm.pdf(raw_grid)
      if self._use_log:
        expected /= special.ndtr(raw_grid)
        expected[np.isnan(expected)] = 0.
      # Scipy prematurely goes to zero at some places that we don't.  So don't
      # include these in the comparison.
      self.assertAllClose(expected.astype(np.float64)[expected < 0],
                          grad_eval.astype(np.float64)[expected < 0],
                          rtol=error_spec.rtol, atol=error_spec.atol)