Python chainer.functions.gaussian() Examples
The following are 15
code examples of chainer.functions.gaussian().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.functions
, or try the search function
.
Example #1
Source File: model.py From chainer-gqn with MIT License | 6 votes |
def generate_image(self, v, r): xp = cuda.get_array_module(v) batch_size = v.shape[0] h_t_gen, c_t_gen, u_t, _, _ = self.generate_initial_state( batch_size, xp) v = cf.reshape(v, v.shape[:2] + (1, 1)) for t in range(self.num_layers): generation_core = self.get_generation_core(t) mean_z_p, ln_var_z_p = self.z_prior_distribution.compute_parameter( h_t_gen) z_t = cf.gaussian(mean_z_p, ln_var_z_p) h_next_gen, c_next_gen, u_next = generation_core( h_t_gen, c_t_gen, z_t, v, r, u_t) u_t = u_next h_t_gen = h_next_gen c_t_gen = c_next_gen mean_x = self.map_u_x(u_t) return mean_x.data
Example #2
Source File: net.py From tensorboardX with MIT License | 6 votes |
def get_loss_func(self, C=1.0, k=1): """Get loss function of VAE. The loss value is equal to ELBO (Evidence Lower Bound) multiplied by -1. Args: C (int): Usually this is 1.0. Can be changed to control the second term of ELBO bound, which works as regularization. k (int): Number of Monte Carlo samples used in encoded vector. """ def lf(x): mu, ln_var = self.encode(x) batchsize = len(mu.data) # reconstruction loss rec_loss = 0 for l in six.moves.range(k): z = F.gaussian(mu, ln_var) rec_loss += F.bernoulli_nll(x, self.decode(z, sigmoid=False)) \ / (k * batchsize) self.rec_loss = rec_loss self.loss = self.rec_loss + \ C * gaussian_kl_divergence(mu, ln_var) / batchsize return self.loss return lf
Example #3
Source File: test_gaussian.py From chainer with MIT License | 6 votes |
def test_forward(self, backend_config): m_data, v_data = backend_config.get_array((self.m, self.v)) m = chainer.Variable(m_data) v = chainer.Variable(v_data) # Call forward without eps and retrieve it n1, eps = functions.gaussian(m, v, return_eps=True) self.assertIsInstance(eps, backend_config.xp.ndarray) self.assertEqual(n1.dtype, self.dtype) self.assertEqual(n1.shape, m.shape) self.assertEqual(eps.dtype, self.dtype) self.assertEqual(eps.shape, m.shape) # Call again with retrieved eps n2 = functions.gaussian(m, v, eps=eps) self.assertEqual(n2.dtype, self.dtype) self.assertEqual(n2.shape, m.shape) testing.assert_allclose(n1.array, n2.array)
Example #4
Source File: test_gaussian.py From chainer with MIT License | 6 votes |
def test_double_backward(self, backend_config): m_data, v_data = backend_config.get_array((self.m, self.v)) y_grad = backend_config.get_array(self.gy) m_grad_grad, v_grad_grad = ( backend_config.get_array((self.ggm, self.ggv))) eps = backend_config.get_array( numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)) def f(m, v): # In case numerical gradient computation is held in more precise # dtype than that of backward computation, cast the eps to reuse # before the numerical computation. eps_ = eps.astype(m.dtype) return functions.gaussian(m, v, eps=eps_) gradient_check.check_double_backward( f, (m_data, v_data), y_grad, (m_grad_grad, v_grad_grad), **self.check_double_backward_options)
Example #5
Source File: model.py From chainer-gqn with MIT License | 5 votes |
def sample(self, h): mean, ln_var = self.compute_parameter(h) return cf.gaussian(mean, ln_var)
Example #6
Source File: model.py From chainer-gqn with MIT License | 5 votes |
def sample_z_and_x_params_from_posterior(self, x, v, r): batch_size = x.shape[0] xp = cuda.get_array_module(x) h_t_gen, c_t_gen, u_t, h_t_enc, c_t_enc = self.generate_initial_state( batch_size, xp) v = cf.reshape(v, v.shape + (1, 1)) z_t_params_array = [] for t in range(self.num_layers): inference_core = self.get_inference_core(t) generation_core = self.get_generation_core(t) h_next_enc, c_next_enc = inference_core(h_t_gen, h_t_enc, c_t_enc, x, v, r, u_t) mean_z_q, ln_var_z_q = self.z_posterior_distribution.compute_parameter( h_t_enc) z_t = cf.gaussian(mean_z_q, ln_var_z_q) mean_z_p, ln_var_z_p = self.z_prior_distribution.compute_parameter( h_t_gen) h_next_gen, c_next_gen, u_next = generation_core( h_t_gen, c_t_gen, z_t, v, r, u_t) z_t_params_array.append((mean_z_q, ln_var_z_q, mean_z_p, ln_var_z_p)) u_t = u_next h_t_gen = h_next_gen c_t_gen = c_next_gen h_t_enc = h_next_enc c_t_enc = c_next_enc mean_x = self.map_u_x(u_t) return z_t_params_array, mean_x
Example #7
Source File: model.py From chainer-gqn with MIT License | 5 votes |
def generate_canvas_states(self, v, r, xp): batch_size = v.shape[0] h_t_gen, c_t_gen, u_t, _, _ = self.generate_initial_state( batch_size, xp) v = cf.reshape(v, v.shape[:2] + (1, 1)) u_t_array = [] for t in range(self.num_layers): generation_core = self.get_generation_core(t) mean_z_p, ln_var_z_p = self.z_prior_distribution.compute_parameter( h_t_gen) z_t = cf.gaussian(mean_z_p, ln_var_z_p) h_next_gen, c_next_gen, u_next = generation_core( h_t_gen, c_t_gen, z_t, v, r, u_t) u_t = u_next h_t_gen = h_next_gen c_t_gen = c_next_gen u_t_array.append(u_t) return u_t_array
Example #8
Source File: distribution.py From chainerrl with MIT License | 5 votes |
def sample(self): return F.gaussian(self.mean, self.ln_var)
Example #9
Source File: distribution.py From chainerrl with MIT License | 5 votes |
def sample_with_log_prob(self): x = F.gaussian(self.mean, self.ln_var) normal_log_prob = _eltwise_gaussian_log_likelihood( x, self.mean, self.var, self.ln_var) log_probs = normal_log_prob - _tanh_forward_log_det_jacobian(x) y = F.tanh(x) return y, F.sum(log_probs, axis=1)
Example #10
Source File: distribution.py From chainerrl with MIT License | 5 votes |
def sample(self): # Caution: If you would like to apply `log_prob` later, use # `sample_with_log_prob` instead for stability, especially when # tanh(x) can be close to -1 or 1. y = F.tanh(F.gaussian(self.mean, self.ln_var)) return y
Example #11
Source File: decoder_cells.py From knmt with GNU General Public License v3.0 | 5 votes |
def advance_one_step(self, previous_states, prev_y): if self.noise_on_prev_word: current_mb_size = prev_y.data.shape[0] assert self.mb_size is None or current_mb_size <= self.mb_size prev_y = prev_y * F.gaussian(Variable(self.noise_mean[:current_mb_size]), Variable(self.noise_lnvar[:current_mb_size])) new_states, concatenated, attn = self.advance_state(previous_states, prev_y) logits = self.compute_logits(new_states, concatenated, attn) return new_states, logits, attn
Example #12
Source File: test_gaussian.py From chainer with MIT License | 5 votes |
def test_backward(self, backend_config): m_data, v_data = backend_config.get_array((self.m, self.v)) y_grad = backend_config.get_array(self.gy) eps = backend_config.get_array( numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)) def f(m, v): # In case numerical gradient computation is held in more precise # dtype than that of backward computation, cast the eps to reuse # before the numerical computation. eps_ = eps.astype(m.dtype) return functions.gaussian(m, v, eps=eps_) gradient_check.check_backward( f, (m_data, v_data), y_grad, **self.check_backward_options)
Example #13
Source File: nvdm.py From lda2vec with MIT License | 5 votes |
def encode(self, bow): """ Convert the bag of words vector of shape (n_docs, n_vocab) into latent mean log variance vectors. """ lam = F.relu(self.l1(bow)) pi = F.relu(self.l2(lam)) mu, log_sigma = F.split_axis(self.mu_logsigma(pi), 2, 1) sample = F.gaussian(mu, log_sigma) loss = F.gaussian_kl_divergence(mu, log_sigma) return sample, loss
Example #14
Source File: mdn.py From models with MIT License | 5 votes |
def sample(self, x): pi, mu, log_var = self.get_gaussian_params(x) n_batch = pi.shape[0] # Choose one of Gaussian means and vars n_batch times ps = chainer.backends.cuda.to_cpu(pi.array) idx = [np.random.choice(self.gaussian_mixtures, p=p) for p in ps] mu = F.get_item(mu, [range(n_batch), idx]) log_var = F.get_item(log_var, [range(n_batch), idx]) # Sampling z = F.gaussian(mu, log_var) return z
Example #15
Source File: model.py From chainer-gqn with MIT License | 4 votes |
def get_inference_posterior(self, t): if self.hyperparams.inference_share_posterior: return self.inference_posteriors[0] return self.inference_posteriors[t] # def compute_information_gain(self, x, r): # xp = cuda # h0_gen, c0_gen, u_0, h0_enc, c0_enc = self.generate_initial_state( # 1, xp) # loss_kld = 0 # hl_enc = h0_enc # cl_enc = c0_enc # hl_gen = h0_gen # cl_gen = c0_gen # ul_enc = u_0 # xq = self.inference_downsampler(x) # for l in range(self.num_layers): # inference_core = self.get_inference_core(l) # inference_posterior = self.get_inference_posterior(l) # generation_core = self.get_generation_core(l) # generation_piror = self.get_generation_prior(l) # h_next_enc, c_next_enc = inference_core.forward_onestep( # hl_gen, hl_enc, cl_enc, xq, v, r) # mean_z_q = inference_posterior.compute_mean_z(hl_enc) # ln_var_z_q = inference_posterior.compute_ln_var_z(hl_enc) # ze_l = cf.gaussian(mean_z_q, ln_var_z_q) # mean_z_p = generation_piror.compute_mean_z(hl_gen) # ln_var_z_p = generation_piror.compute_ln_var_z(hl_gen) # h_next_gen, c_next_gen, u_next_enc = generation_core.forward_onestep( # hl_gen, cl_gen, ul_enc, ze_l, v, r) # kld = gqn.nn.functions.gaussian_kl_divergence( # mean_z_q, ln_var_z_q, mean_z_p, ln_var_z_p) # loss_kld += cf.sum(kld) # hl_gen = h_next_gen # cl_gen = c_next_gen # ul_enc = u_next_enc # hl_enc = h_next_enc # cl_enc = c_next_enc