Python tensorflow.digamma() Examples
The following are 6
code examples of tensorflow.digamma().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: cenNormalRankOneAlgorithms.py From decompose with MIT License | 6 votes |
def fitGamma(cls, tau): alpha = 0.5/(tf.log(tf.reduce_mean(tau)) + 1e-6 # added due to numerical instability - tf.reduce_mean(tf.log(tau))) for i in range(20): alpha = (1. / (1./alpha + (tf.reduce_mean(tf.log(tau)) - tf.log(tf.reduce_mean(tau)) + tf.log(alpha) - tf.digamma(alpha)) / (alpha**2*(1./alpha - tf.polygamma(tf.ones_like(alpha), alpha))))) beta = alpha/tf.reduce_mean(tau) return(alpha, beta)
Example #2
Source File: gaussMMVAE_collapsed.py From mixture_density_VAEs with MIT License | 6 votes |
def compute_kumar2beta_kld(a, b, alpha, beta): # precompute some terms ab = tf.mul(a,b) a_inv = tf.pow(a, -1) b_inv = tf.pow(b, -1) # compute taylor expansion for E[log (1-v)] term kl = tf.mul(tf.pow(1+ab,-1), beta_fn(a_inv, b)) for idx in xrange(10): kl += tf.mul(tf.pow(idx+2+ab,-1), beta_fn(tf.mul(idx+2., a_inv), b)) kl = tf.mul(tf.mul(beta-1,b), kl) kl += tf.mul(tf.div(a-alpha,a), -0.57721 - tf.digamma(b) - b_inv) # add normalization constants kl += tf.log(ab) + tf.log(beta_fn(alpha, beta)) # final term kl += tf.div(-(b-1),b) return kl
Example #3
Source File: tAlgorithms.py From decompose with MIT License | 5 votes |
def nuStep(cls, nu, n, delta, p=1.): three = tf.constant(3., dtype=nu.dtype) for i in range(2): w = (nu+p)/(nu+delta) fp = (-tf.digamma(nu/2) + tf.log(nu/2) + 1./n*tf.reduce_sum(tf.log((nu+p)/(nu+delta)) - w, axis=0) + 1 + tf.digamma((p+nu)/2) - tf.log((p+nu)/2)) fpp = (tf.polygamma(three, nu/2)/2. + 1./nu + tf.polygamma(three, (p+nu)/2)/2. - 1./(nu+p) + 1./n*tf.reduce_sum((delta-p)/(nu+delta)**2*(w-1), axis=0)) nu = nu + fp/fpp return(nu)
Example #4
Source File: ops.py From tfdeploy with MIT License | 5 votes |
def test_Digamma(self): t = tf.digamma(self.random(4, 3)) self.check(t)
Example #5
Source File: tensorflow.py From deepx with MIT License | 5 votes |
def digamma(self, a): return tf.digamma(a)
Example #6
Source File: pcrl.py From cornac with Apache License 2.0 | 5 votes |
def loss(self, C, X_g, X_, alpha, beta, z, E, Zik, Tk): const_term = C * tf.log(1e-10 + X_) - X_ const_term = tf.reduce_sum(const_term, 1) loss1 = C * tf.log(1e-10 + X_g) - X_g loss1 = tf.reduce_sum(loss1, 1) loss2 = self.log_q(z, alpha + self.B, beta) loss2 = const_term * tf.reduce_sum(loss2, 1) # loss3 = -log_r(E, alpha,beta) loss3 = -self.log_r(E, alpha + self.B, beta) loss3 = const_term * tf.reduce_sum(loss3, 1) # The sum of KL terms of all generator's wheights (up to constant terms) kl_w = 0.0 if not self.w_determinist: for l in range(0, self.L + 1): kl_w += tf.reduce_sum( -0.5 * tf.reduce_sum(tf.square(self.generator_params[l]), 1) ) # KL Divergence term kl_term = ( (alpha - self.aa - Zik) * tf.digamma(alpha) - tf.lgamma(alpha) + (self.aa + Zik) * tf.log(beta) + alpha * (Tk + self.bb - beta) / beta ) kl_term = -tf.reduce_sum(kl_term, 1) return ( -tf.reduce_mean(loss1 + loss2 + loss3 + kl_term) + kl_w / self.aux_data.shape[0] ) # fitting PCRL to observed data