Python scipy.special.gammaln() Examples
The following are 30
code examples of scipy.special.gammaln().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
scipy.special
, or try the search function
.
Example #1
Source File: arima_process.py From vnpy_crypto with MIT License | 6 votes |
def lpol_fima(d, n=20): """MA representation of fractional integration .. math:: (1-L)^{-d} for |d|<0.5 or |d|<1 (?) Parameters ---------- d : float fractional power n : int number of terms to calculate, including lag zero Returns ------- ma : array coefficients of lag polynomial """ # hide import inside function until we use this heavily from scipy.special import gammaln j = np.arange(n) return np.exp(gammaln(d + j) - gammaln(j + 1) - gammaln(d)) # moved from sandbox.tsa.try_fi
Example #2
Source File: test_bayesian_mixture.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_log_wishart_norm(): rng = np.random.RandomState(0) n_components, n_features = 5, 2 degrees_of_freedom = np.abs(rng.rand(n_components)) + 1. log_det_precisions_chol = n_features * np.log(range(2, 2 + n_components)) expected_norm = np.empty(5) for k, (degrees_of_freedom_k, log_det_k) in enumerate( zip(degrees_of_freedom, log_det_precisions_chol)): expected_norm[k] = -( degrees_of_freedom_k * (log_det_k + .5 * n_features * np.log(2.)) + np.sum(gammaln(.5 * (degrees_of_freedom_k - np.arange(0, n_features)[:, np.newaxis])), 0)) predected_norm = _log_wishart_norm(degrees_of_freedom, log_det_precisions_chol, n_features) assert_almost_equal(expected_norm, predected_norm)
Example #3
Source File: weights.py From pyhawkes with MIT License | 6 votes |
def meanfieldupdate_p(self, stepsize=1.0): """ Update p given the network parameters and the current variational parameters of the weight distributions. :return: """ logit_p = self.network.expected_log_p() - self.network.expected_log_notp() logit_p += self.network.kappa * self.network.expected_log_v() - gammaln(self.network.kappa) logit_p += gammaln(self.mf_kappa_1) - self.mf_kappa_1 * np.log(self.mf_v_1) logit_p += gammaln(self.kappa_0) - self.kappa_0 * np.log(self.nu_0) logit_p += self.mf_kappa_0 * np.log(self.mf_v_0) - gammaln(self.mf_kappa_0) # p_hat = logistic(logit_p) # self.mf_p = (1.0 - stepsize) * self.mf_p + stepsize * p_hat logit_p_hat = (1-stepsize) * logit(self.mf_p) + \ stepsize * logit_p # self.mf_p = logistic(logit_p_hat) self.mf_p = np.clip(logistic(logit_p_hat), 1e-8, 1-1e-8)
Example #4
Source File: bayesian_mixture.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def _log_wishart_norm(degrees_of_freedom, log_det_precisions_chol, n_features): """Compute the log of the Wishart distribution normalization term. Parameters ---------- degrees_of_freedom : array-like, shape (n_components,) The number of degrees of freedom on the covariance Wishart distributions. log_det_precision_chol : array-like, shape (n_components,) The determinant of the precision matrix for each component. n_features : int The number of features. Return ------ log_wishart_norm : array-like, shape (n_components,) The log normalization of the Wishart distribution. """ # To simplify the computation we have removed the np.log(np.pi) term return -(degrees_of_freedom * log_det_precisions_chol + degrees_of_freedom * n_features * .5 * math.log(2.) + np.sum(gammaln(.5 * (degrees_of_freedom - np.arange(n_features)[:, np.newaxis])), 0))
Example #5
Source File: test_multivariate.py From zhusuan with MIT License | 6 votes |
def test_value(self): with self.session(use_gpu=True): def _test_value(given, temperature, logits): given = np.array(given, np.float32) logits = np.array(logits, np.float32) n = logits.shape[-1] t = temperature target_log_p = gammaln(n) + (n - 1) * np.log(t) + \ (logits - t * given).sum(axis=-1) - \ n * np.log(np.exp(logits - t * given).sum(axis=-1)) con = ExpConcrete(temperature, logits=logits) log_p = con.log_prob(given) self.assertAllClose(log_p.eval(), target_log_p) p = con.prob(given) self.assertAllClose(p.eval(), np.exp(target_log_p)) _test_value([np.log(0.25), np.log(0.25), np.log(0.5)], 0.1, [1., 1., 1.2]) _test_value([[np.log(0.25), np.log(0.25), np.log(0.5)], [np.log(0.1), np.log(0.5), np.log(0.4)]], 0.5, [[1., 1., 1.], [.5, .5, .4]])
Example #6
Source File: test_multivariate.py From zhusuan with MIT License | 6 votes |
def test_value(self): with self.session(use_gpu=True): def _test_value(given, temperature, logits): given = np.array(given, np.float32) logits = np.array(logits, np.float32) n = logits.shape[-1] t = temperature target_log_p = gammaln(n) + (n - 1) * np.log(t) + \ (logits - (t + 1) * np.log(given)).sum(axis=-1) - \ n * np.log(np.exp(logits - t * np.log(given)).sum(axis=-1)) con = Concrete(temperature, logits=logits) log_p = con.log_prob(given) self.assertAllClose(log_p.eval(), target_log_p) p = con.prob(given) self.assertAllClose(p.eval(), np.exp(target_log_p)) _test_value([0.25, 0.25, 0.5], 0.1, [1., 1., 1.2]) _test_value([[0.25, 0.25, 0.5], [0.1, 0.5, 0.4]], 0.5, [[1., 1., 1.], [.5, .5, .4]])
Example #7
Source File: test_special.py From mars with Apache License 2.0 | 6 votes |
def testGammaln(self): raw = np.random.rand(10, 8, 5) t = tensor(raw, chunk_size=3) r = gammaln(t) expect = scipy_gammaln(raw) self.assertEqual(r.shape, raw.shape) self.assertEqual(r.dtype, expect.dtype) r = r.tiles() t = get_tiled(t) self.assertEqual(r.nsplits, t.nsplits) for c in r.chunks: self.assertIsInstance(c.op, TensorGammaln) self.assertEqual(c.index, c.inputs[0].index) self.assertEqual(c.shape, c.inputs[0].shape)
Example #8
Source File: hdpmodel.py From topical_word_embeddings with MIT License | 6 votes |
def lda_e_step(doc_word_ids, doc_word_counts, alpha, beta, max_iter=100): gamma = np.ones(len(alpha)) expElogtheta = np.exp(dirichlet_expectation(gamma)) betad = beta[:, doc_word_ids] phinorm = np.dot(expElogtheta, betad) + 1e-100 counts = np.array(doc_word_counts) for _ in xrange(max_iter): lastgamma = gamma gamma = alpha + expElogtheta * np.dot(counts / phinorm, betad.T) Elogtheta = dirichlet_expectation(gamma) expElogtheta = np.exp(Elogtheta) phinorm = np.dot(expElogtheta, betad) + 1e-100 meanchange = np.mean(abs(gamma - lastgamma)) if (meanchange < meanchangethresh): break likelihood = np.sum(counts * np.log(phinorm)) likelihood += np.sum((alpha - gamma) * Elogtheta) likelihood += np.sum(sp.gammaln(gamma) - sp.gammaln(alpha)) likelihood += sp.gammaln(np.sum(alpha)) - sp.gammaln(np.sum(gamma)) return (likelihood, gamma)
Example #9
Source File: hdpmodel.py From topical_word_embeddings with MIT License | 6 votes |
def lda_e_step(doc_word_ids, doc_word_counts, alpha, beta, max_iter=100): gamma = np.ones(len(alpha)) expElogtheta = np.exp(dirichlet_expectation(gamma)) betad = beta[:, doc_word_ids] phinorm = np.dot(expElogtheta, betad) + 1e-100 counts = np.array(doc_word_counts) for _ in xrange(max_iter): lastgamma = gamma gamma = alpha + expElogtheta * np.dot(counts / phinorm, betad.T) Elogtheta = dirichlet_expectation(gamma) expElogtheta = np.exp(Elogtheta) phinorm = np.dot(expElogtheta, betad) + 1e-100 meanchange = np.mean(abs(gamma - lastgamma)) if (meanchange < meanchangethresh): break likelihood = np.sum(counts * np.log(phinorm)) likelihood += np.sum((alpha - gamma) * Elogtheta) likelihood += np.sum(sp.gammaln(gamma) - sp.gammaln(alpha)) likelihood += sp.gammaln(np.sum(alpha)) - sp.gammaln(np.sum(gamma)) return (likelihood, gamma)
Example #10
Source File: hdpmodel.py From topical_word_embeddings with MIT License | 6 votes |
def lda_e_step(doc_word_ids, doc_word_counts, alpha, beta, max_iter=100): gamma = np.ones(len(alpha)) expElogtheta = np.exp(dirichlet_expectation(gamma)) betad = beta[:, doc_word_ids] phinorm = np.dot(expElogtheta, betad) + 1e-100 counts = np.array(doc_word_counts) for _ in xrange(max_iter): lastgamma = gamma gamma = alpha + expElogtheta * np.dot(counts / phinorm, betad.T) Elogtheta = dirichlet_expectation(gamma) expElogtheta = np.exp(Elogtheta) phinorm = np.dot(expElogtheta, betad) + 1e-100 meanchange = np.mean(abs(gamma - lastgamma)) if (meanchange < meanchangethresh): break likelihood = np.sum(counts * np.log(phinorm)) likelihood += np.sum((alpha - gamma) * Elogtheta) likelihood += np.sum(sp.gammaln(gamma) - sp.gammaln(alpha)) likelihood += sp.gammaln(np.sum(alpha)) - sp.gammaln(np.sum(gamma)) return (likelihood, gamma)
Example #11
Source File: _continuous_distns.py From lambda-packs with MIT License | 6 votes |
def _pdf(self, x, df, nc): # nct.pdf(x, df, nc) = # df**(df/2) * gamma(df+1) # ---------------------------------------------------- # 2**df*exp(nc**2/2) * (df+x**2)**(df/2) * gamma(df/2) n = df*1.0 nc = nc*1.0 x2 = x*x ncx2 = nc*nc*x2 fac1 = n + x2 trm1 = n/2.*np.log(n) + sc.gammaln(n+1) trm1 -= n*np.log(2)+nc*nc/2.+(n/2.)*np.log(fac1)+sc.gammaln(n/2.) Px = np.exp(trm1) valF = ncx2 / (2*fac1) trm1 = np.sqrt(2)*nc*x*sc.hyp1f1(n/2+1, 1.5, valF) trm1 /= np.asarray(fac1*sc.gamma((n+1)/2)) trm2 = sc.hyp1f1((n+1)/2, 0.5, valF) trm2 /= np.asarray(np.sqrt(fac1)*sc.gamma(n/2+1)) Px *= trm1+trm2 return Px
Example #12
Source File: _multivariate.py From lambda-packs with MIT License | 6 votes |
def _lnB(alpha): r""" Internal helper function to compute the log of the useful quotient .. math:: B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}{\Gamma\left(\sum_{i=1}^{K}\alpha_i\right)} Parameters ---------- %(_dirichlet_doc_default_callparams)s Returns ------- B : scalar Helper quotient, internal use only """ return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
Example #13
Source File: test_squeeze_operation.py From strawberryfields with Apache License 2.0 | 6 votes |
def matrix_elem(n, r, m): """Matrix element corresponding to squeezed density matrix[n, m]""" eps = 1e-10 if n % 2 != m % 2: return 0.0 if r == 0.0: return np.complex(n == m) # delta function k = np.arange(m % 2, min([m, n]) + 1, 2) res = np.sum( (-1) ** ((n - k) / 2) * np.exp( (lg(m + 1) + lg(n + 1)) / 2 - lg(k + 1) - lg((m - k) / 2 + 1) - lg((n - k) / 2 + 1) ) * (np.sinh(r) / 2 + eps) ** ((n + m - 2 * k) / 2) / (np.cosh(r) ** ((n + m + 1) / 2)) ) return res
Example #14
Source File: weights.py From pyhawkes with MIT License | 5 votes |
def log_likelihood(self, x): """ Compute the log likelihood of the given A and W :param x: an (A,W) tuple :return: """ A,W = x assert isinstance(A, np.ndarray) and A.shape == (self.K,self.K), \ "A must be a KxK adjacency matrix" assert isinstance(W, np.ndarray) and W.shape == (self.K,self.K), \ "W must be a KxK weight matrix" # LL of A rho = np.clip(self.network.P, 1e-32, 1-1e-32) ll = (A * np.log(rho) + (1-A) * np.log(1-rho)).sum() ll = np.nan_to_num(ll) # Get the shape and scale parameters from the network model kappa = self.network.kappa v = self.network.V # Add the LL of the gamma weights lp_W = kappa * np.log(v) - gammaln(kappa) + \ (kappa-1) * np.log(W) - v * W ll += (A*lp_W).sum() return ll
Example #15
Source File: test_basic.py From Computable with MIT License | 5 votes |
def test_gammaln(self): gamln = special.gammaln(3) lngam = log(special.gamma(3)) assert_almost_equal(gamln,lngam,8)
Example #16
Source File: test_basic.py From Computable with MIT License | 5 votes |
def test_gammaln(self): cephes.gammaln(10)
Example #17
Source File: test_bayesian_mixture.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_log_dirichlet_norm(): rng = np.random.RandomState(0) weight_concentration = rng.rand(2) expected_norm = (gammaln(np.sum(weight_concentration)) - np.sum(gammaln(weight_concentration))) predected_norm = _log_dirichlet_norm(weight_concentration) assert_almost_equal(expected_norm, predected_norm)
Example #18
Source File: tools.py From vnpy_crypto with MIT License | 5 votes |
def ts_lls(y, params, df): '''t loglikelihood given observations and mean mu and variance sigma2 = 1 Parameters ---------- y : array, 1d normally distributed random variable params: array, (nobs, 2) array of mean, variance (mu, sigma2) with observations in rows df : integer degrees of freedom of the t distribution Returns ------- lls : array contribution to loglikelihood for each observation Notes ----- parameterized for garch normalized/rescaled so that sigma2 is the variance >>> df = 10; sigma = 1. >>> stats.t.stats(df, loc=0., scale=sigma.*np.sqrt((df-2.)/df)) (array(0.0), array(1.0)) >>> sigma = np.sqrt(2.) >>> stats.t.stats(df, loc=0., scale=sigma*np.sqrt((df-2.)/df)) (array(0.0), array(2.0)) ''' print(y, params, df) mu, sigma2 = params.T df = df*1.0 #lls = gammaln((df+1)/2.) - gammaln(df/2.) - 0.5*np.log((df-2)*np.pi) #lls -= (df+1)/2. * np.log(1. + (y-mu)**2/(df-2.)/sigma2) + 0.5 * np.log(sigma2) lls = gammaln((df+1)/2.) - gammaln(df/2.) - 0.5*np.log((df)*np.pi) lls -= (df+1.)/2. * np.log(1. + (y-mu)**2/(df)/sigma2) + 0.5 * np.log(sigma2) return lls
Example #19
Source File: tools.py From vnpy_crypto with MIT License | 5 votes |
def tstd_pdf(x, df): '''pdf for standardized (not standard) t distribution, variance is one ''' r = np.array(df*1.0) Px = np.exp(special.gammaln((r+1)/2.)-special.gammaln(r/2.))/np.sqrt((r-2)*pi) Px /= (1+(x**2)/(r-2))**((r+1)/2.) return Px
Example #20
Source File: tools.py From vnpy_crypto with MIT License | 5 votes |
def tstd_lls(y, params, df): '''t loglikelihood given observations and mean mu and variance sigma2 = 1 Parameters ---------- y : array, 1d normally distributed random variable params: array, (nobs, 2) array of mean, variance (mu, sigma2) with observations in rows df : integer degrees of freedom of the t distribution Returns ------- lls : array contribution to loglikelihood for each observation Notes ----- parameterized for garch ''' mu, sigma2 = params.T df = df*1.0 #lls = gammaln((df+1)/2.) - gammaln(df/2.) - 0.5*np.log((df-2)*np.pi) #lls -= (df+1)/2. * np.log(1. + (y-mu)**2/(df-2.)/sigma2) + 0.5 * np.log(sigma2) lls = gammaln((df+1)/2.) - gammaln(df/2.) - 0.5*np.log((df-2)*np.pi) lls -= (df+1)/2. * np.log(1. + (y-mu)**2/(df-2)/sigma2) + 0.5 * np.log(sigma2) return lls
Example #21
Source File: estimators.py From vnpy_crypto with MIT License | 5 votes |
def fitbinned(distfn, freq, binedges, start, fixed=None): '''estimate parameters of distribution function for binned data using MLE Parameters ---------- distfn : distribution instance needs to have cdf method, as in scipy.stats freq : array, 1d frequency count, e.g. obtained by histogram binedges : array, 1d binedges including lower and upper bound start : tuple or array_like ? starting values, needs to have correct length Returns ------- paramest : array estimated parameters Notes ----- todo: add fixed parameter option added factorial ''' if not fixed is None: raise NotImplementedError nobs = np.sum(freq) lnnobsfact = special.gammaln(nobs+1) def nloglike(params): '''negative loglikelihood function of binned data corresponds to multinomial ''' prob = np.diff(distfn.cdf(binedges, *params)) return -(lnnobsfact + np.sum(freq*np.log(prob)- special.gammaln(freq+1))) return optimize.fmin(nloglike, start)
Example #22
Source File: weights.py From pyhawkes with MIT License | 5 votes |
def log_likelihood(self, x): """ Compute the log likelihood of the given A and W :param x: an (A,W) tuple :return: """ A,W = x assert isinstance(A, np.ndarray) and A.shape == (self.K,self.K), \ "A must be a KxK adjacency matrix" assert isinstance(W, np.ndarray) and W.shape == (self.K,self.K), \ "W must be a KxK weight matrix" # LL of A rho = self.network.P lp_A = (A * np.log(rho) + (1-A) * np.log(1-rho)) # Get the shape and scale parameters from the network model kappa = self.network.kappa v = self.network.V # Add the LL of the gamma weights # lp_W = np.zeros((self.K, self.K)) # lp_W = A * (kappa * np.log(v) - gammaln(kappa) # + (kappa-1) * np.log(W) - v * W) lp_W0 = (self.kappa_0 * np.log(self.nu_0) - gammaln(self.kappa_0) + (self.kappa_0-1) * np.log(W) - self.nu_0 * W)[A==0] lp_W1 = (kappa * np.log(v) - gammaln(kappa) + (kappa-1) * np.log(W) - v * W)[A==1] # lp_W = A * (kappa * np.log(v) - gammaln(kappa) # + (kappa-1) * np.log(W) - v * W) + \ # (1-A) * (self.kappa_0 * np.log(self.nu_0) - gammaln(self.kappa_0) # + (self.kappa_0-1) * np.log(W) - self.nu_0 * W) ll = lp_A.sum() + lp_W0.sum() + lp_W1.sum() return ll
Example #23
Source File: multivariate.py From vnpy_crypto with MIT License | 5 votes |
def chi_logpdf(x, df): tmp = (df-1.)*np_log(x) + (-x*x*0.5) - (df*0.5-1)*np_log(2.0) \ - sps_gammaln(df*0.5) return tmp
Example #24
Source File: multivariate.py From vnpy_crypto with MIT License | 5 votes |
def chi_pdf(x, df): tmp = (df-1.)*np_log(x) + (-x*x*0.5) - (df*0.5-1)*np_log(2.0) \ - sps_gammaln(df*0.5) return np_exp(tmp) #return x**(df-1.)*np_exp(-x*x*0.5)/(2.0)**(df*0.5-1)/sps_gamma(df*0.5)
Example #25
Source File: family.py From vnpy_crypto with MIT License | 5 votes |
def loglike_obs(self, endog, mu, var_weights=1., scale=1.): r""" The log-likelihood function for each observation in terms of the fitted mean response for the Poisson distribution. Parameters ---------- endog : array Usually the endogenous response variable. mu : array Usually but not always the fitted mean response variable. var_weights : array-like 1d array of variance (analytic) weights. The default is 1. scale : float The scale parameter. The default is 1. Returns ------- ll_i : float The value of the loglikelihood evaluated at (endog, mu, var_weights, scale) as defined below. Notes ----- .. math:: ll_i = var\_weights_i / scale * (endog_i * \ln(\mu_i) - \mu_i - \ln \Gamma(endog_i + 1)) """ return var_weights / scale * (endog * np.log(mu) - mu - special.gammaln(endog + 1))
Example #26
Source File: _tweedie_compound_poisson.py From vnpy_crypto with MIT License | 5 votes |
def _logWj(y, j, p, phi): alpha = _alpha(p) logz = (-alpha * np.log(y) + alpha * np.log(p - 1) - (1 - alpha) * np.log(phi) - np.log(2 - p)) return (j * logz - gammaln(1 + j) - gammaln(-alpha * j))
Example #27
Source File: scipy.py From vnpy_crypto with MIT License | 5 votes |
def _lazywhere(cond, arrays, f, fillvalue=None, f2=None): """ np.where(cond, x, fillvalue) always evaluates x even where cond is False. This one only evaluates f(arr1[cond], arr2[cond], ...). For example, >>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]) >>> def f(a, b): return a*b >>> _lazywhere(a > 2, (a, b), f, np.nan) array([ nan, nan, 21., 32.]) Notice it assumes that all `arrays` are of the same shape, or can be broadcasted together. """ if fillvalue is None: if f2 is None: raise ValueError("One of (fillvalue, f2) must be given.") else: fillvalue = np.nan else: if f2 is not None: raise ValueError("Only one of (fillvalue, f2) can be given.") arrays = np.broadcast_arrays(*arrays) temp = tuple(np.extract(cond, arr) for arr in arrays) tcode = np.mintypecode([a.dtype.char for a in arrays]) out = _valarray(np.shape(arrays[0]), value=fillvalue, typecode=tcode) np.place(out, cond, f(*temp)) if f2 is not None: temp = tuple(np.extract(~cond, arr) for arr in arrays) np.place(out, ~cond, f2(*temp)) return out # Work around for complex chnges in gammaln in 1.0.0. loggamma introduced in 0.18.
Example #28
Source File: arima_process.py From vnpy_crypto with MIT License | 5 votes |
def lpol_fiar(d, n=20): """AR representation of fractional integration .. math:: (1-L)^{d} for |d|<0.5 or |d|<1 (?) Parameters ---------- d : float fractional power n : int number of terms to calculate, including lag zero Returns ------- ar : array coefficients of lag polynomial Notes: first coefficient is 1, negative signs except for first term, ar(L)*x_t """ # hide import inside function until we use this heavily from scipy.special import gammaln j = np.arange(n) ar = - np.exp(gammaln(-d + j) - gammaln(j + 1) - gammaln(-d)) ar[0] = 1 return ar # moved from sandbox.tsa.try_fi
Example #29
Source File: weights.py From pyhawkes with MIT License | 5 votes |
def _resample_A(self, ss): """ Resample A from the marginal distribution after integrating out W :param ss: :return: """ p = self.network.P v = self.network.V kappa0_post = self.kappa_0 + ss[0,:,:] v0_post = self.nu_0 + ss[1,:,:] kappa1_post = self.network.kappa + ss[0,:,:] v1_post = v + ss[1,:,:] # Compute the marginal likelihood of A=1 and of A=0 # The result of the integral is a ratio of gamma distribution normalizing constants lp0 = self.kappa_0 * np.log(self.nu_0) - gammaln(self.kappa_0) lp0 += gammaln(kappa0_post) - kappa0_post * np.log(v0_post) lp1 = self.network.kappa * np.log(v) - gammaln(self.network.kappa) lp1 += gammaln(kappa1_post) - kappa1_post * np.log(v1_post) # Add the prior and normalize lp0 = lp0 + np.log(1.0 - p) lp1 = lp1 + np.log(p) Z = logsumexp(np.concatenate((lp0[:,:,None], lp1[:,:,None]), axis=2), axis=2) # ln p(A=1) = ln (exp(lp1) / (exp(lp0) + exp(lp1))) # = lp1 - ln(exp(lp0) + exp(lp1)) # = lp1 - Z self.A = np.log(np.random.rand(self.K, self.K)) < lp1 - Z
Example #30
Source File: discrete.py From vnpy_crypto with MIT License | 5 votes |
def _logpmf(self, x, mu, w): return _lazywhere(x != 0, (x, mu, w), (lambda x, mu, w: np.log(1. - w) + x * np.log(mu) - gammaln(x + 1.) - mu), np.log(w + (1. - w) * np.exp(-mu)))