Python torch.distributions.Normal() Examples
The following are 30
code examples of torch.distributions.Normal().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.distributions
, or try the search function
.
Example #1
Source File: loss.py From 6-PACK with MIT License | 6 votes |
def __init__(self, num_key, num_cate): super(Loss, self).__init__(True) self.num_key = num_key self.num_cate = num_cate self.oneone = Variable(torch.ones(1)).cuda() self.normal = tdist.Normal(torch.tensor([0.0]), torch.tensor([0.0005])) self.pconf = torch.ones(num_key) / num_key self.pconf = Variable(self.pconf).cuda() self.sym_axis = Variable(torch.from_numpy(np.array([0, 1, 0]).astype(np.float32))).cuda().view(1, 3, 1) self.threezero = Variable(torch.from_numpy(np.array([0, 0, 0]).astype(np.float32))).cuda() self.zeros = torch.FloatTensor([0.0 for j in range(num_key-1) for i in range(num_key)]).cuda() self.select1 = torch.tensor([i for j in range(num_key-1) for i in range(num_key)]).cuda() self.select2 = torch.tensor([(i%num_key) for j in range(1, num_key) for i in range(j, j+num_key)]).cuda() self.knn = KNearestNeighbor(1)
Example #2
Source File: utils.py From pyfilter with MIT License | 6 votes |
def test_Stacker(self): # ===== Define a mix of parameters ====== # zerod = Parameter(Normal(0., 1.)).sample_((1000,)) oned_luring = Parameter(Normal(torch.tensor([0.]), torch.tensor([1.]))).sample_(zerod.shape) oned = Parameter(MultivariateNormal(torch.zeros(2), torch.eye(2))).sample_(zerod.shape) mu = torch.zeros((3, 3)) norm = Independent(Normal(mu, torch.ones_like(mu)), 2) twod = Parameter(norm).sample_(zerod.shape) # ===== Stack ===== # params = (zerod, oned, oned_luring, twod) stacked = stacker(params, lambda u: u.t_values, dim=1) # ===== Verify it's recreated correctly ====== # for p, m, ps in zip(params, stacked.mask, stacked.prev_shape): v = stacked.concated[..., m] if len(p.c_shape) != 0: v = v.reshape(*v.shape[:-1], *ps) assert (p.t_values == v).all()
Example #3
Source File: utils.py From pyfilter with MIT License | 6 votes |
def test_StateDict(self): # ===== Define model ===== # norm = Normal(0., 1.) linear = AffineProcess((f, g), (1., 1.), norm, norm) linearobs = AffineObservations((fo, go), (1., 1.), norm) model = StateSpaceModel(linear, linearobs) # ===== Define filter ===== # filt = SISR(model, 100).initialize() # ===== Get statedict ===== # sd = filt.state_dict() # ===== Verify that we don't save multiple instances ===== # assert '_model' in sd and '_model' not in sd['_proposal'] newfilt = SISR(model, 1000).load_state_dict(sd) assert newfilt._w_old is not None and newfilt.ssm is newfilt._proposal._model # ===== Test same with UKF and verify that we save UT ===== # ukf = UKF(model).initialize() sd = ukf.state_dict() assert '_model' in sd and '_model' not in sd['_ut']
Example #4
Source File: heads.py From rl_algorithms with MIT License | 6 votes |
def forward( self, x: torch.Tensor, epsilon: float = 1e-6 ) -> Tuple[torch.Tensor, ...]: """Forward method implementation.""" mu, _, std = super(TanhGaussianDistParams, self).get_dist_params(x) # sampling actions dist = Normal(mu, std) z = dist.rsample() # normalize action and log_prob # see appendix C of 'https://arxiv.org/pdf/1812.05905.pdf' action = torch.tanh(z) log_prob = dist.log_prob(z) - torch.log(1 - action.pow(2) + epsilon) log_prob = log_prob.sum(-1, keepdim=True) return action, log_prob, z, mu, std
Example #5
Source File: filters.py From pyfilter with MIT License | 6 votes |
def test_SDE(self): def f(x, a, s): return -a * x def g(x, a, s): return s em = AffineEulerMaruyama((f, g), (0.02, 0.15), Normal(0., 1.), Normal(0., 1.), dt=1e-2, num_steps=10) model = LinearGaussianObservations(em, scale=1e-3) x, y = model.sample_path(500) for filt in [SISR(model, 500, proposal=Bootstrap()), UKF(model)]: filt = filt.initialize().longfilter(y) means = filt.result.filter_means if isinstance(filt, UKF): means = means[:, 0] self.assertLess(torch.std(x - means), 5e-2)
Example #6
Source File: timeseries.py From pyfilter with MIT License | 6 votes |
def test_SDE(self): shape = 1000, 100 a = 1e-2 * torch.ones((shape[0], 1)) dt = 0.1 norm = Normal(0., math.sqrt(dt)) init = Normal(a, 1.) sde = AffineEulerMaruyama((f_sde, g_sde), (a, 0.15), init, norm, dt=dt, num_steps=10) # ===== Initialize ===== # x = sde.i_sample(shape) # ===== Propagate ===== # num = 100 samps = [x] for t in range(num): samps.append(sde.propagate(samps[-1])) samps = torch.stack(samps) self.assertEqual(samps.size(), torch.Size([num + 1, *shape])) # ===== Sample path ===== # path = sde.sample_path(num + 1, shape) self.assertEqual(samps.shape, path.shape)
Example #7
Source File: prob_utils.py From vaeac with MIT License | 6 votes |
def normal_parse_params(params, min_sigma=0): """ Take a Tensor (e. g. neural network output) and return torch.distributions.Normal distribution. This Normal distribution is component-wise independent, and its dimensionality depends on the input shape. First half of channels is mean of the distribution, the softplus of the second half is std (sigma), so there is no restrictions on the input tensor. min_sigma is the minimal value of sigma. I. e. if the above softplus is less than min_sigma, then sigma is clipped from below with value min_sigma. This regularization is required for the numerical stability and may be considered as a neural network architecture choice without any change to the probabilistic model. """ n = params.shape[0] d = params.shape[1] mu = params[:, :d // 2] sigma_params = params[:, d // 2:] sigma = softplus(sigma_params) sigma = sigma.clamp(min=min_sigma) distr = Normal(mu, sigma) return distr
Example #8
Source File: model.py From pyfilter with MIT License | 6 votes |
def test_Sample(self): # ==== Hidden ==== # norm = Normal(0., 1.) linear = AffineProcess((f, g), (1., 1.), norm, norm) # ==== Observable ===== # obs = AffineObservations((fo, go), (1., 0.), norm) # ===== Model ===== # mod = StateSpaceModel(linear, obs) # ===== Sample ===== # x, y = mod.sample_path(100) diff = ((x - y) ** 2).mean().sqrt() assert x.shape == y.shape and x.shape[0] == 100 and diff < 1e-3
Example #9
Source File: ou.py From pyfilter with MIT License | 6 votes |
def __init__(self, kappa, gamma, sigma, ndim: int, dt: float): """ Implements the Ornstein-Uhlenbeck process. :param kappa: The reversion parameter :param gamma: The mean parameter :param sigma: The standard deviation :param ndim: The number of dimensions for the Brownian motion """ def f(x: torch.Tensor, reversion: object, level: object, std: object): return level + (x - level) * torch.exp(-reversion * dt) def g(x: torch.Tensor, reversion: object, level: object, std: object): return std / (2 * reversion).sqrt() * (1 - torch.exp(-2 * reversion * dt)).sqrt() if ndim > 1: dist = Independent(Normal(torch.zeros(ndim), torch.ones(ndim)), 1) else: dist = Normal(0., 1) super().__init__((f, g), (kappa, gamma, sigma), dist, dist)
Example #10
Source File: timeseries.py From pyfilter with MIT License | 6 votes |
def test_MultiDimensional(self): mu = torch.zeros(2) scale = torch.ones_like(mu) shape = 1000, 100 mvn = Independent(Normal(mu, scale), 1) mvn = AffineProcess((f, g), (1., 1.), mvn, mvn) # ===== Initialize ===== # x = mvn.i_sample(shape) # ===== Propagate ===== # num = 100 samps = [x] for t in range(num): samps.append(mvn.propagate(samps[-1])) samps = torch.stack(samps) self.assertEqual(samps.size(), torch.Size([num + 1, *shape, *mu.shape])) # ===== Sample path ===== # path = mvn.sample_path(num + 1, shape) self.assertEqual(samps.shape, path.shape)
Example #11
Source File: tanh_normal.py From garage with MIT License | 6 votes |
def _from_distribution(cls, new_normal): """Construct a new TanhNormal distribution from a normal distribution. Args: new_normal (Independent(Normal)): underlying normal dist for the new TanhNormal distribution. Returns: TanhNormal: A new distribution whose underlying normal dist is new_normal. """ # pylint: disable=protected-access new = cls(torch.zeros(1), torch.zeros(1)) new._normal = new_normal return new
Example #12
Source File: sir.py From pyfilter with MIT License | 6 votes |
def __init__(self, theta, initial_dist, dt, num_steps=10): """ Similar as `OneFactorFractionalStochasticSIR`, but we now have two sources of randomness originating from shocks to both paramters `beta` and `gamma`. :param theta: The parameters (beta, gamma, sigma, eta) """ if initial_dist.event_shape != torch.Size([3]): raise NotImplementedError('Must be of size 3!') def g(x, gamma, beta, sigma, eps): s = torch.zeros((*x.shape[:-1], 3, 2), device=x.device) s[..., 0, 0] = -sigma * x[..., 0] * x[..., 1] s[..., 1, 0] = -s[..., 0, 0] s[..., 1, 1] = -eps * x[..., 1] s[..., 2, 1] = -s[..., 1, 1] return s f_ = lambda u, beta, gamma, sigma, eps: f(u, beta, gamma, sigma) inc_dist = Independent(Normal(torch.zeros(2), math.sqrt(dt) * torch.ones(2)), 1) super().__init__((f_, g), theta, initial_dist, inc_dist, dt=dt, num_steps=num_steps)
Example #13
Source File: tanh_normal.py From garage with MIT License | 6 votes |
def rsample_with_pre_tanh_value(self, sample_shape=torch.Size()): """Return a sample, sampled from this TanhNormal distribution. Returns the sampled value before the tanh transform is applied and the sampled value with the tanh transform applied to it. Args: sample_shape (list): shape of the return. Note: Gradients pass through this operation. Returns: torch.Tensor: Samples from this distribution. torch.Tensor: Samples from the underlying :obj:`torch.distributions.Normal` distribution, prior to being transformed with `tanh`. """ z = self._normal.rsample(sample_shape) return z, torch.tanh(z)
Example #14
Source File: linear.py From pyfilter with MIT License | 6 votes |
def __init__(self, hidden, a=1., scale=1.): """ Implements a State Space model that's linear in the observation equation but has arbitrary dynamics in the state process. :param hidden: The hidden dynamics :param a: The A-matrix :param scale: The variance of the observations """ # ===== Convoluted way to decide number of dimensions ===== # dim, is_1d = _get_shape(a) # ====== Define distributions ===== # n = dists.Normal(0., 1.) if is_1d else dists.Independent(dists.Normal(torch.zeros(dim), torch.ones(dim)), 1) if not isinstance(scale, (torch.Tensor, float, dists.Distribution)): raise ValueError(f'`scale` parameter must be numeric type!') super().__init__(hidden, a, scale, n)
Example #15
Source File: timeseries.py From pyfilter with MIT License | 6 votes |
def test_BatchedParameter(self): norm = Normal(0., 1.) shape = 1000, 100 a = torch.ones((shape[0], 1)) init = Normal(a, 1.) linear = AffineProcess((f, g), (a, 1.), init, norm) # ===== Initialize ===== # x = linear.i_sample(shape) # ===== Propagate ===== # num = 100 samps = [x] for t in range(num): samps.append(linear.propagate(samps[-1])) samps = torch.stack(samps) self.assertEqual(samps.size(), torch.Size([num + 1, *shape])) # ===== Sample path ===== # path = linear.sample_path(num + 1, shape) self.assertEqual(samps.shape, path.shape)
Example #16
Source File: timeseries.py From pyfilter with MIT License | 6 votes |
def test_LinearBatch(self): norm = Normal(0., 1.) linear = AffineProcess((f, g), (1., 1.), norm, norm) # ===== Initialize ===== # shape = 1000, 100 x = linear.i_sample(shape) # ===== Propagate ===== # num = 100 samps = [x] for t in range(num): samps.append(linear.propagate(samps[-1])) samps = torch.stack(samps) self.assertEqual(samps.size(), torch.Size([num + 1, *shape])) # ===== Sample path ===== # path = linear.sample_path(num + 1, shape) self.assertEqual(samps.shape, path.shape)
Example #17
Source File: vae.py From torchsupport with MIT License | 5 votes |
def normal_kl_loss(mean, logvar, r_mean=None, r_logvar=None): if r_mean is None or r_logvar is None: result = -0.5 * torch.mean(1 + logvar - mean.pow(2) - logvar.exp(), dim=0) else: distribution = Normal(mean, torch.exp(0.5 * logvar)) reference = Normal(r_mean, torch.exp(0.5 * r_logvar)) result = kl_divergence(distribution, reference) return result.sum()
Example #18
Source File: learners.py From TorchFusion with MIT License | 5 votes |
def train(self, train_loader, gen_optimizer, disc_optimizer, latent_size,relative_mode=True, dist=distribution.Normal(0, 1), num_classes=0, num_samples=5,**kwargs): self.latent_size = latent_size self.dist = dist self.classes = num_classes self.num_samples = num_samples self.conditional = (num_classes > 0) self.relative_mode = relative_mode super().__train_loop__(train_loader, gen_optimizer, disc_optimizer, **kwargs)
Example #19
Source File: vae.py From torchsupport with MIT License | 5 votes |
def sample(self, mean, logvar, probabilities): normal = Normal(mean, torch.exp(0.5 * logvar)) categorical = RelaxedOneHotCategorical( self.temperature, probabilities ) return normal.rsample(), categorical.rsample()
Example #20
Source File: vae.py From torchsupport with MIT License | 5 votes |
def sample(self, mean, logvar): distribution = Normal(mean, torch.exp(0.5 * logvar)) sample = distribution.rsample() return sample
Example #21
Source File: rlwe.py From PySyft with Apache License 2.0 | 5 votes |
def sample_poly_normal(param): """Generate a polynomial from normal distribution where negative values are represented as (modulus - value) a positive value. Args: parms (EncryptionParam): Encryption parameters. Returns: A 2-dim list having integer from normal distributions. """ coeff_modulus = param.coeff_modulus coeff_mod_size = len(coeff_modulus) coeff_count = param.poly_modulus result = [0] * coeff_mod_size for i in range(coeff_mod_size): result[i] = [0] * coeff_count for i in range(coeff_count): noise = Normal(th.tensor([0.0]), th.tensor(NOISE_STANDARD_DEVIATION)) noise = int(noise.sample().item()) if noise > 0: for j in range(coeff_mod_size): result[j][i] = noise elif noise < 0: noise = -noise for j in range(coeff_mod_size): result[j][i] = coeff_modulus[j] - noise else: for j in range(coeff_mod_size): result[j][i] = 0 return result
Example #22
Source File: set_mnist_ebm.py From torchsupport with MIT License | 5 votes |
def forward(self, image, condition): image = image.view(-1, 28 * 28) out = self.input_process(self.input(image)) mean, logvar = self.condition(condition) #distribution = Normal(mean, torch.exp(0.5 * logvar)) sample = mean + torch.randn_like(mean) * torch.exp(0.5 * logvar)#distribution.rsample() cond = self.postprocess(sample) cond = torch.repeat_interleave(cond, 5, dim=0) result = self.combine(torch.cat((out, cond), dim=1)) return result, (mean, logvar)
Example #23
Source File: set_yeast_ebm.py From torchsupport with MIT License | 5 votes |
def forward(self, image, condition): image = image.view(-1, 3, 64, 64) out = self.input_process(self.input(image)) mean, logvar = self.condition(condition) #distribution = Normal(mean, torch.exp(0.5 * logvar)) sample = mean + torch.randn_like(mean) * torch.exp(0.5 * logvar)#distribution.rsample() cond = self.postprocess(sample) cond = torch.repeat_interleave(cond, 5, dim=0) result = self.combine(torch.cat((out, cond), dim=1)) return result, (mean, logvar)
Example #24
Source File: set_mnist_gan.py From torchsupport with MIT License | 5 votes |
def forward(self, data): support, values = data mean, logvar = self.encoder(support) distribution = Normal(mean, torch.exp(0.5 * logvar)) latent_sample = distribution.rsample() latent_sample = torch.repeat_interleave(latent_sample, self.size, dim=0) combined = torch.cat((values.view(-1, 28 * 28), latent_sample), dim=1) return self.verdict(combined)
Example #25
Source File: set_mnist_gan.py From torchsupport with MIT License | 5 votes |
def sample(self, data): support, values = data mean, logvar = self.condition(support) distribution = Normal(mean, torch.exp(0.5 * logvar)) latent_sample = distribution.rsample() latent_sample = torch.repeat_interleave(latent_sample, self.size, dim=0) local_samples = torch.randn(support.size(0) * self.size, 16) sample = torch.cat((latent_sample, local_samples), dim=1) return (support, sample), (mean, logvar)
Example #26
Source File: distributions.py From leap with MIT License | 5 votes |
def rsample(self, return_pretanh_value=False): """ Sampling in the reparameterization case. """ z = self.normal_mean + \ self.normal_std * \ ptu.Variable( Normal(torch.zeros(self.normal_mean.size()), torch.ones(self.normal_std.size())).sample(), requires_grad=False) if return_pretanh_value: return torch.tanh(z), z else: return torch.tanh(z)
Example #27
Source File: distributions.py From leap with MIT License | 5 votes |
def __init__(self, normal_mean, normal_std, epsilon=1e-6): """ :param normal_mean: Mean of the normal distribution :param normal_std: Std of the normal distribution :param epsilon: Numerical stability epsilon when computing log-prob. """ self.normal_mean = normal_mean self.normal_std = normal_std self.normal = Normal(normal_mean, normal_std) self.epsilon = epsilon
Example #28
Source File: toy_runner.py From ncsn with GNU General Public License v3.0 | 5 votes |
def __init__(self, radius, width): self.radius = radius self.width = width self.r_dist = Normal(loc=radius, scale=width)
Example #29
Source File: tanh_normal.py From garage with MIT License | 5 votes |
def __init__(self, loc, scale): self._normal = Independent(Normal(loc, scale), 1) super().__init__()
Example #30
Source File: vae.py From scVI with MIT License | 5 votes |
def sample_from_posterior_z( self, x, y=None, give_mean=False, n_samples=5000 ) -> torch.Tensor: """Samples the tensor of latent values from the posterior Parameters ---------- x tensor of values with shape ``(batch_size, n_input)`` y tensor of cell-types labels with shape ``(batch_size, n_labels)`` (Default value = None) give_mean is True when we want the mean of the posterior distribution rather than sampling (Default value = False) n_samples how many MC samples to average over for transformed mean (Default value = 5000) Returns ------- type tensor of shape ``(batch_size, n_latent)`` """ if self.log_variational: x = torch.log(1 + x) qz_m, qz_v, z = self.z_encoder(x, y) # y only used in VAEC if give_mean: if self.latent_distribution == "ln": samples = Normal(qz_m, qz_v.sqrt()).sample([n_samples]) z = self.z_encoder.z_transformation(samples) z = z.mean(dim=0) else: z = qz_m return z