Python scipy.special.loggamma() Examples
The following are 24
code examples of scipy.special.loggamma().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
scipy.special
, or try the search function
.
Example #1
Source File: vmf_hypvae.py From vmf_vae_nlp with MIT License | 6 votes |
def KL_davidson(k, d): vmf_entropy = k * ive(d / 2, k) / ive((d / 2) - 1, k) + \ (d / 2 - 1) * np.log(k) \ - (d / 2) * np.log(2 * np.pi) - np.log(iv(d / 2 - 1, k)) hyu_ent = np.log(2) + (d / 2) * np.log(np.pi) - sp.loggamma( d / 2) kl = vmf_entropy + hyu_ent return kl # # first = k * bessel(d / 2, k) / bessel(d / 2 - 1, k) # second = (d / 2 - 1) * torch.log(k) - torch.log(bessel(d / 2 - 1, k)) # const = torch.tensor( # np.log(3.1415926) * d / 2 + np.log(2) - sp.loggamma(d / 2).real - (d / 2) * np.log(2 * 3.1415926)).to( # devic # for kappa in range(10, 150, 20): # for d in range(50, 150, 50): # print("Davidson:{}\t\tGuu:{}".format(KL_davidson(kappa, d), KL_guu(kappa, d)))
Example #2
Source File: models.py From GSTools with GNU Lesser General Public License v3.0 | 6 votes |
def spectral_density(self, k): # noqa: D102 k = np.array(k, dtype=np.double) # for nu > 20 we just use an approximation of the gaussian model if self.nu > 20.0: return ( (self.len_scale / np.sqrt(np.pi)) ** self.dim * np.exp(-((k * self.len_scale) ** 2)) * ( 1 + ( ((k * self.len_scale) ** 2 - self.dim / 2.0) ** 2 - self.dim / 2.0 ) / self.nu ) ) return (self.len_scale / np.sqrt(np.pi)) ** self.dim * np.exp( -(self.nu + self.dim / 2.0) * np.log(1.0 + (k * self.len_scale) ** 2 / self.nu) + sps.loggamma(self.nu + self.dim / 2.0) - sps.loggamma(self.nu) - self.dim * np.log(np.sqrt(self.nu)) )
Example #3
Source File: test_mpmath.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def test_loggamma_taylor(): # Test around the zeros at z = 1, 2. r = np.logspace(-16, np.log10(LOGGAMMA_TAYLOR_RADIUS), 10) theta = np.linspace(0, 2*np.pi, 20) r, theta = np.meshgrid(r, theta) dz = r*np.exp(1j*theta) z = np.r_[1 + dz, 2 + dz].flatten() dataset = [] for z0 in z: dataset.append((z0, complex(mpmath.loggamma(z0)))) dataset = np.array(dataset) FuncData(sc.loggamma, dataset, 0, 1, rtol=5e-14).check() # ------------------------------------------------------------------------------ # rgamma # ------------------------------------------------------------------------------
Example #4
Source File: test_mpmath.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def test_loggamma_taylor_transition(): # Make sure there isn't a big jump in accuracy when we move from # using the Taylor series to using the recurrence relation. r = LOGGAMMA_TAYLOR_RADIUS + np.array([-0.1, -0.01, 0, 0.01, 0.1]) theta = np.linspace(0, 2*np.pi, 20) r, theta = np.meshgrid(r, theta) dz = r*np.exp(1j*theta) z = np.r_[1 + dz, 2 + dz].flatten() dataset = [] for z0 in z: dataset.append((z0, complex(mpmath.loggamma(z0)))) dataset = np.array(dataset) FuncData(sc.loggamma, dataset, 0, 1, rtol=5e-14).check()
Example #5
Source File: test_loggamma.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_real_dispatch(): x = np.logspace(-10, 10) + 0.5 dataset = np.vstack((x, gammaln(x))).T FuncData(loggamma, dataset, 0, 1, rtol=1e-14, atol=1e-14).check() assert_(loggamma(0) == np.inf) assert_(np.isnan(loggamma(-1)))
Example #6
Source File: core.py From tensorqtl with BSD 3-Clause "New" or "Revised" License | 5 votes |
def beta_log_likelihood(x, shape1, shape2): """negative log-likelihood of beta distribution""" logbeta = loggamma(shape1) + loggamma(shape2) - loggamma(shape1+shape2) return (1.0-shape1)*np.sum(np.log(x)) + (1.0-shape2)*np.sum(np.log(1.0-x)) + len(x)*logbeta
Example #7
Source File: transform.py From empymod with Apache License 2.0 | 5 votes |
def get_fftlog_input(rmin, rmax, n, q, mu): r"""Return parameters required for FFTLog.""" # Central point log10(r_c) of periodic interval logrc = (rmin + rmax)/2 # Central index (1/2 integral if n is even) nc = (n + 1)/2. # Log spacing of points dlogr = (rmax - rmin)/n dlnr = dlogr*np.log(10.) # Get low-ringing kr y = 1j*np.pi/(2.0*dlnr) zp = special.loggamma((mu + 1.0 + q)/2.0 + y) zm = special.loggamma((mu + 1.0 - q)/2.0 + y) arg = np.log(2.0)/dlnr + (zp.imag + zm.imag)/np.pi kr = np.exp((arg - np.round(arg))*dlnr) # Calculate required input x-values (freq); angular freq -> freq freq = 10**(logrc + (np.arange(1, n+1) - nc)*dlogr)/(2*np.pi) # Calculate tcalc with adjusted kr logkc = np.log10(kr) - logrc tcalc = 10**(logkc + (np.arange(1, n+1) - nc)*dlogr) # rk = r_c/k_r; adjust for Fourier transform scaling rk = 10**(logrc - logkc)*np.pi/2 return freq, tcalc, dlnr, kr, rk
Example #8
Source File: models.py From GSTools with GNU Lesser General Public License v3.0 | 5 votes |
def correlation(self, r): r"""Matérn correlation function. .. math:: \rho(r) = \frac{2^{1-\nu}}{\Gamma\left(\nu\right)} \cdot \left(\sqrt{\nu}\cdot\frac{r}{\ell}\right)^{\nu} \cdot \mathrm{K}_{\nu}\left(\sqrt{\nu}\cdot\frac{r}{\ell}\right) """ r = np.array(np.abs(r), dtype=np.double) # for nu > 20 we just use the gaussian model if self.nu > 20.0: return np.exp(-((r / self.len_scale) ** 2) / 4) # calculate by log-transformation to prevent numerical errors r_gz = r[r > 0.0] res = np.ones_like(r) # with np.errstate(over="ignore", invalid="ignore"): res[r > 0.0] = np.exp( (1.0 - self.nu) * np.log(2) - sps.loggamma(self.nu) + self.nu * np.log(np.sqrt(self.nu) * r_gz / self.len_scale) ) * sps.kv(self.nu, np.sqrt(self.nu) * r_gz / self.len_scale) # if nu >> 1 we get errors for the farfield, there 0 is approached res[np.logical_not(np.isfinite(res))] = 0.0 # covariance is positiv res = np.maximum(res, 0.0) return res
Example #9
Source File: test_mpmath.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_loggamma(self): def mpmath_loggamma(z): try: res = mpmath.loggamma(z) except ValueError: res = complex(np.nan, np.nan) return res assert_mpmath_equal(sc.loggamma, mpmath_loggamma, [ComplexArg()], nan_ok=False, distinguish_nan_and_inf=False, rtol=5e-14)
Example #10
Source File: test_mpmath.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_gammaln(self): # The real part of loggamma is log(|gamma(z)|). def f(z): return mpmath.loggamma(z).real assert_mpmath_equal(sc.gammaln, exception_to_nan(f), [Arg()])
Example #11
Source File: test_mpmath.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_beta(): np.random.seed(1234) b = np.r_[np.logspace(-200, 200, 4), np.logspace(-10, 10, 4), np.logspace(-1, 1, 4), np.arange(-10, 11, 1), np.arange(-10, 11, 1) + 0.5, -1, -2.3, -3, -100.3, -10003.4] a = b ab = np.array(np.broadcast_arrays(a[:,None], b[None,:])).reshape(2, -1).T old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec try: mpmath.mp.dps = 400 assert_func_equal(sc.beta, lambda a, b: float(mpmath.beta(a, b)), ab, vectorized=False, rtol=1e-10, ignore_inf_sign=True) assert_func_equal( sc.betaln, lambda a, b: float(mpmath.log(abs(mpmath.beta(a, b)))), ab, vectorized=False, rtol=1e-10) finally: mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec # ------------------------------------------------------------------------------ # loggamma # ------------------------------------------------------------------------------
Example #12
Source File: test_sf_error.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_errprint(): with suppress_warnings() as sup: sup.filter(DeprecationWarning, "`errprint` is deprecated!") flag = sc.errprint(True) try: assert_(isinstance(flag, bool)) with pytest.warns(sc.SpecialFunctionWarning): sc.loggamma(0) finally: with suppress_warnings() as sup: sup.filter(DeprecationWarning, "`errprint` is deprecated!") sc.errprint(flag)
Example #13
Source File: test_loggamma.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_gh_6536(): z = loggamma(complex(-3.4, +0.0)) zbar = loggamma(complex(-3.4, -0.0)) assert_allclose(z, zbar.conjugate(), rtol=1e-15, atol=0)
Example #14
Source File: scipy.py From vnpy_crypto with MIT License | 5 votes |
def _lazywhere(cond, arrays, f, fillvalue=None, f2=None): """ np.where(cond, x, fillvalue) always evaluates x even where cond is False. This one only evaluates f(arr1[cond], arr2[cond], ...). For example, >>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]) >>> def f(a, b): return a*b >>> _lazywhere(a > 2, (a, b), f, np.nan) array([ nan, nan, 21., 32.]) Notice it assumes that all `arrays` are of the same shape, or can be broadcasted together. """ if fillvalue is None: if f2 is None: raise ValueError("One of (fillvalue, f2) must be given.") else: fillvalue = np.nan else: if f2 is not None: raise ValueError("Only one of (fillvalue, f2) can be given.") arrays = np.broadcast_arrays(*arrays) temp = tuple(np.extract(cond, arr) for arr in arrays) tcode = np.mintypecode([a.dtype.char for a in arrays]) out = _valarray(np.shape(arrays[0]), value=fillvalue, typecode=tcode) np.place(out, cond, f(*temp)) if f2 is not None: temp = tuple(np.extract(~cond, arr) for arr in arrays) np.place(out, ~cond, f2(*temp)) return out # Work around for complex chnges in gammaln in 1.0.0. loggamma introduced in 0.18.
Example #15
Source File: test_loggamma.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_complex_dispatch_realpart(): # Test that the real parts of loggamma and gammaln agree on the # real axis. x = np.r_[-np.logspace(10, -10), np.logspace(-10, 10)] + 0.5 dataset = np.vstack((x, gammaln(x))).T def f(z): z = np.array(z, dtype='complex128') return loggamma(z).real FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
Example #16
Source File: test_loggamma.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_identities2(): # test the identity loggamma(z + 1) = log(z) + loggamma(z) x = np.array([-99.5, -9.5, -0.5, 0.5, 9.5, 99.5]) y = x.copy() x, y = np.meshgrid(x, y) z = (x + 1J*y).flatten() dataset = np.vstack((z, np.log(z) + loggamma(z))).T def f(z): return loggamma(z + 1) FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
Example #17
Source File: test_loggamma.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_identities1(): # test the identity exp(loggamma(z)) = gamma(z) x = np.array([-99.5, -9.5, -0.5, 0.5, 9.5, 99.5]) y = x.copy() x, y = np.meshgrid(x, y) z = (x + 1J*y).flatten() dataset = np.vstack((z, gamma(z))).T def f(z): return np.exp(loggamma(z)) FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
Example #18
Source File: vmf_unif.py From vmf_vae_nlp with MIT License | 5 votes |
def _vmf_kld(k, d): tmp = (k * ((sp.iv(d / 2.0 + 1.0, k) + sp.iv(d / 2.0, k) * d / (2.0 * k)) / sp.iv(d / 2.0, k) - d / (2.0 * k)) \ + d * np.log(k) / 2.0 - np.log(sp.iv(d / 2.0, k)) \ - sp.loggamma(d / 2 + 1) - d * np.log(2) / 2).real return tmp
Example #19
Source File: vmf_batch.py From vmf_vae_nlp with MIT License | 5 votes |
def _vmf_kld_davidson(k, d): """ This should be the correct KLD. Empirically we find that _vmf_kld (as in the Guu paper) only deviates a little (<2%) in most cases we use. """ tmp = k * sp.iv(d / 2, k) / sp.iv(d / 2 - 1, k) + (d / 2 - 1) * torch.log(k) - torch.log( sp.iv(d / 2 - 1, k)) + np.log(np.pi) * d / 2 + np.log(2) - sp.loggamma(d / 2).real - (d / 2) * np.log( 2 * np.pi) if tmp != tmp: exit() return np.array([tmp])
Example #20
Source File: vmf_batch.py From vmf_vae_nlp with MIT License | 5 votes |
def _vmf_kld(k, d): tmp = (k * ((sp.iv(d / 2.0 + 1.0, k) + sp.iv(d / 2.0, k) * d / (2.0 * k)) / sp.iv(d / 2.0, k) - d / (2.0 * k)) \ + d * np.log(k) / 2.0 - np.log(sp.iv(d / 2.0, k)) \ - sp.loggamma(d / 2 + 1) - d * np.log(2) / 2).real if tmp != tmp: exit() return np.array([tmp])
Example #21
Source File: vmf_only.py From vmf_vae_nlp with MIT License | 5 votes |
def _vmf_kld(k, d): tmp = (k * ((sp.iv(d / 2.0 + 1.0, k) + sp.iv(d / 2.0, k) * d / (2.0 * k)) / sp.iv(d / 2.0, k) - d / (2.0 * k)) \ + d * np.log(k) / 2.0 - np.log(sp.iv(d / 2.0, k)) \ - sp.loggamma(d / 2 + 1) - d * np.log(2) / 2).real if tmp != tmp: exit() return np.array([tmp])
Example #22
Source File: archived_vmf.py From vmf_vae_nlp with MIT License | 5 votes |
def _vmfKL(k, d): return k * ((sp.iv(d / 2.0 + 1.0, k) \ + sp.iv(d / 2.0, k) * d / (2.0 * k)) / sp.iv(d / 2.0, k) - d / (2.0 * k)) \ + d * np.log(k) / 2.0 - np.log(sp.iv(d / 2.0, k)) \ - sp.loggamma(d / 2 + 1) - d * np.log(2) / 2
Example #23
Source File: vmf_hypvae.py From vmf_vae_nlp with MIT License | 5 votes |
def KL_guu(k, d): kld = k * ((sp.iv(d / 2.0 + 1.0, k) \ + sp.iv(d / 2.0, k) * d / (2.0 * k)) / sp.iv(d / 2.0, k) - d / (2.0 * k)) \ + d * np.log(k) / 2.0 - np.log(sp.iv(d / 2.0, k)) \ - sp.loggamma(d / 2 + 1) - d * np.log(2) / 2 return kld
Example #24
Source File: vmf_hypvae.py From vmf_vae_nlp with MIT License | 5 votes |
def compute_KLD(self, tup, batch_sz): kappa = tup['kappa'] d = self.lat_dim rt_bag = [] # const = torch.log(torch.tensor(3.1415926)) * d / 2 + torch.log(torch.tensor(2.0)) \ # - torch.tensor(sp.loggamma(d / 2).real) - (d / 2) * torch.log(torch.tensor(2 * 3.1415926)) const = torch.tensor( np.log(np.pi) * d / 2 + np.log(2) - sp.loggamma(d / 2).real - (d / 2) * np.log(2 * np.pi)).to( device) d = torch.tensor([d], dtype=torch.float).to(device) batchsz = kappa.size()[0] rt_tensor = torch.zeros(batchsz) for k_idx in range(batchsz): k = kappa[k_idx] # print(k) # print(k) # print(d) first = k * bessel_iv(d / 2, k) / bessel_iv(d / 2 - 1, k) second = (d / 2 - 1) * torch.log(k) - torch.log(bessel_iv(d / 2 - 1, k)) combin = first + second + const rt_tensor[k_idx] = combin # rt_bag.append(combin) return rt_tensor.to(device) # return torch.tensor(rt_bag,requires_grad=True).to(device)