Python torch.distributions.Independent() Examples
The following are 18
code examples of torch.distributions.Independent().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.distributions
, or try the search function
.
Example #1
Source File: linear.py From pyfilter with MIT License | 6 votes |
def __init__(self, hidden, a=1., scale=1.): """ Implements a State Space model that's linear in the observation equation but has arbitrary dynamics in the state process. :param hidden: The hidden dynamics :param a: The A-matrix :param scale: The variance of the observations """ # ===== Convoluted way to decide number of dimensions ===== # dim, is_1d = _get_shape(a) # ====== Define distributions ===== # n = dists.Normal(0., 1.) if is_1d else dists.Independent(dists.Normal(torch.zeros(dim), torch.ones(dim)), 1) if not isinstance(scale, (torch.Tensor, float, dists.Distribution)): raise ValueError(f'`scale` parameter must be numeric type!') super().__init__(hidden, a, scale, n)
Example #2
Source File: normal_mlp.py From pytorch-maml-rl with MIT License | 6 votes |
def forward(self, input, params=None): if params is None: params = OrderedDict(self.named_parameters()) output = input for i in range(1, self.num_layers): output = F.linear(output, weight=params['layer{0}.weight'.format(i)], bias=params['layer{0}.bias'.format(i)]) output = self.nonlinearity(output) mu = F.linear(output, weight=params['mu.weight'], bias=params['mu.bias']) scale = torch.exp(torch.clamp(params['sigma'], min=self.min_log_std)) return Independent(Normal(loc=mu, scale=scale), 1)
Example #3
Source File: sir.py From pyfilter with MIT License | 6 votes |
def __init__(self, theta, initial_dist, dt, num_steps=10): """ Similar as `OneFactorFractionalStochasticSIR`, but we now have two sources of randomness originating from shocks to both paramters `beta` and `gamma`. :param theta: The parameters (beta, gamma, sigma, eta) """ if initial_dist.event_shape != torch.Size([3]): raise NotImplementedError('Must be of size 3!') def g(x, gamma, beta, sigma, eps): s = torch.zeros((*x.shape[:-1], 3, 2), device=x.device) s[..., 0, 0] = -sigma * x[..., 0] * x[..., 1] s[..., 1, 0] = -s[..., 0, 0] s[..., 1, 1] = -eps * x[..., 1] s[..., 2, 1] = -s[..., 1, 1] return s f_ = lambda u, beta, gamma, sigma, eps: f(u, beta, gamma, sigma) inc_dist = Independent(Normal(torch.zeros(2), math.sqrt(dt) * torch.ones(2)), 1) super().__init__((f_, g), theta, initial_dist, inc_dist, dt=dt, num_steps=num_steps)
Example #4
Source File: ou.py From pyfilter with MIT License | 6 votes |
def __init__(self, kappa, gamma, sigma, ndim: int, dt: float): """ Implements the Ornstein-Uhlenbeck process. :param kappa: The reversion parameter :param gamma: The mean parameter :param sigma: The standard deviation :param ndim: The number of dimensions for the Brownian motion """ def f(x: torch.Tensor, reversion: object, level: object, std: object): return level + (x - level) * torch.exp(-reversion * dt) def g(x: torch.Tensor, reversion: object, level: object, std: object): return std / (2 * reversion).sqrt() * (1 - torch.exp(-2 * reversion * dt)).sqrt() if ndim > 1: dist = Independent(Normal(torch.zeros(ndim), torch.ones(ndim)), 1) else: dist = Normal(0., 1) super().__init__((f, g), (kappa, gamma, sigma), dist, dist)
Example #5
Source File: timeseries.py From pyfilter with MIT License | 6 votes |
def test_MultiDimensional(self): mu = torch.zeros(2) scale = torch.ones_like(mu) shape = 1000, 100 mvn = Independent(Normal(mu, scale), 1) mvn = AffineProcess((f, g), (1., 1.), mvn, mvn) # ===== Initialize ===== # x = mvn.i_sample(shape) # ===== Propagate ===== # num = 100 samps = [x] for t in range(num): samps.append(mvn.propagate(samps[-1])) samps = torch.stack(samps) self.assertEqual(samps.size(), torch.Size([num + 1, *shape, *mu.shape])) # ===== Sample path ===== # path = mvn.sample_path(num + 1, shape) self.assertEqual(samps.shape, path.shape)
Example #6
Source File: linearized.py From pyfilter with MIT License | 5 votes |
def construct(self, y, x): # ===== Mean of propagated dist ===== # h_loc, h_scale = self._model.hidden.mean_scale(x) h_loc.requires_grad_(True) # ===== Get gradients ===== # logl = self._model.observable.log_prob(y, h_loc) + self._model.hidden.log_prob(h_loc, x) g = grad(logl, h_loc, grad_outputs=torch.ones_like(logl), create_graph=self._alpha is None)[-1] # ===== Define mean and scale ===== # if self._alpha is None: step = -1 / grad(g, h_loc, grad_outputs=torch.ones_like(g))[-1] std = step.sqrt() else: std = h_scale.detach() step = self._alpha mean = h_loc.detach() + step * g.detach() x.detach_() if self._model.hidden_ndim == 0: self._kernel = Normal(mean, std) else: self._kernel = Independent(Normal(mean, std), self._model.hidden_ndim) return self
Example #7
Source File: distributions.py From texar-pytorch with Apache License 2.0 | 5 votes |
def MultivariateNormalDiag(loc, scale_diag): if loc.dim() < 1: raise ValueError("loc must be at least one-dimensional.") return Independent(Normal(loc, scale_diag), 1)
Example #8
Source File: torch_utils.py From pytorch-maml-rl with MIT License | 5 votes |
def detach_distribution(pi): if isinstance(pi, Independent): distribution = Independent(detach_distribution(pi.base_dist), pi.reinterpreted_batch_ndims) elif isinstance(pi, Categorical): distribution = Categorical(logits=pi.logits.detach()) elif isinstance(pi, Normal): distribution = Normal(loc=pi.loc.detach(), scale=pi.scale.detach()) else: raise NotImplementedError('Only `Categorical`, `Independent` and ' '`Normal` policies are valid policies. Got ' '`{0}`.'.format(type(pi))) return distribution
Example #9
Source File: likelihood_eval.py From latent_ode with MIT License | 5 votes |
def gaussian_log_likelihood(mu_2d, data_2d, obsrv_std, indices = None): n_data_points = mu_2d.size()[-1] if n_data_points > 0: gaussian = Independent(Normal(loc = mu_2d, scale = obsrv_std.repeat(n_data_points)), 1) log_prob = gaussian.log_prob(data_2d) log_prob = log_prob / n_data_points else: log_prob = torch.zeros([1]).to(get_device(data_2d)).squeeze() return log_prob
Example #10
Source File: probabilistic_unet.py From Probabilistic-Unet-Pytorch with Apache License 2.0 | 5 votes |
def forward(self, input, segm=None): #If segmentation is not none, concatenate the mask to the channel axis of the input if segm is not None: self.show_img = input self.show_seg = segm input = torch.cat((input, segm), dim=1) self.show_concat = input self.sum_input = torch.sum(input) encoding = self.encoder(input) self.show_enc = encoding #We only want the mean of the resulting hxw image encoding = torch.mean(encoding, dim=2, keepdim=True) encoding = torch.mean(encoding, dim=3, keepdim=True) #Convert encoding to 2 x latent dim and split up for mu and log_sigma mu_log_sigma = self.conv_layer(encoding) #We squeeze the second dimension twice, since otherwise it won't work when batch size is equal to 1 mu_log_sigma = torch.squeeze(mu_log_sigma, dim=2) mu_log_sigma = torch.squeeze(mu_log_sigma, dim=2) mu = mu_log_sigma[:,:self.latent_dim] log_sigma = mu_log_sigma[:,self.latent_dim:] #This is a multivariate normal with diagonal covariance matrix sigma #https://github.com/pytorch/pytorch/pull/11178 dist = Independent(Normal(loc=mu, scale=torch.exp(log_sigma)),1) return dist
Example #11
Source File: timeseries.py From pyfilter with MIT License | 5 votes |
def test_StochasticSIR(self): dist = Independent(Binomial(torch.tensor([1000, 1, 0]), torch.tensor([1, 1, 1e-6])), 1) sir = m.StochasticSIR((0.1, 0.05, 0.01), dist, 1e-1) x = sir.sample_path(1000, 10) self.assertEqual(x.shape, torch.Size([1000, 10, 3]))
Example #12
Source File: meanfield.py From pyfilter with MIT License | 5 votes |
def dist(self): return Independent(Normal(self._mean, self._log_std.exp()), self._model.ndim + 1)
Example #13
Source File: sir.py From pyfilter with MIT License | 5 votes |
def __init__(self, theta, initial_dist, dt, num_steps=10): """ Implements a two factor stochastic SEIRD model, inspired by the blog: https://towardsdatascience.com/infectious-disease-modelling-beyond-the-basic-sir-model-216369c584c4 and models above. :param theta: The parameters of the model. Corresponds to (beta, gamma, delta, alpha, rho, sigma, eta) """ def f(x, beta, gamma, delta, alpha, rho, sigma, eps): s = -beta * x[..., 0] * x[..., 2] e = -s - delta * x[..., 1] r = (1 - alpha) * gamma * x[..., 2] i = delta * x[..., 1] - r - alpha * rho * x[..., 2] d = alpha * rho * x[..., 2] return concater(s, e, i, r, d) def g(x, beta, gamma, delta, alpha, rho, sigma, eps): s = torch.zeros((*x.shape[:-1], 5, 2), device=x.device) s[..., 0, 0] = -sigma * x[..., 0] * x[..., 2] s[..., 1, 0] = -s[..., 0, 0] s[..., 3, 1] = eps * (1 - alpha) * x[..., 2] s[..., 2, 1] = -s[..., 3, 1] - alpha * eps * x[..., 2] s[..., 4, 1] = alpha * eps * x[..., 2] return s if initial_dist.event_shape != torch.Size([5]): raise NotImplementedError('Must be of size 5!') inc_dist = Independent(Normal(torch.zeros(2), math.sqrt(dt) * torch.ones(2)), 1) super().__init__((f, g), theta, initial_dist, inc_dist, dt, num_steps=num_steps)
Example #14
Source File: sir.py From pyfilter with MIT License | 5 votes |
def __init__(self, theta, initial_dist, dt, num_steps=10): """ Similar as `TwoFactorSIR`, but we now have three sources of randomness, as well as incorporating death rates. :param theta: The parameters (beta, gamma, alpha, rho, sigma, eta, nu) """ if initial_dist.event_shape != torch.Size([4]): raise NotImplementedError('Must be of size 4!') def f_(x, beta, gamma, alpha, rho, sigma, eps, nu): s = -beta * x[..., 0] * x[..., 1] r = (1 - alpha) * gamma * x[..., 1] i = -s - r - alpha * rho * x[..., 1] d = alpha * rho * x[..., 1] return concater(s, i, r, d) def g(x, beta, gamma, alpha, rho, sigma, eps, nu): s = torch.zeros((*x.shape[:-1], 4, 3), device=x.device) s[..., 0, 0] = -sigma * x[..., 0] * x[..., 1] s[..., 1, 0] = -s[..., 0, 0] s[..., 1, 1] = -eps * (1 - alpha) * x[..., 1] s[..., 1, 2] = -alpha * nu * x[..., 1] s[..., 2, 1] = -s[..., 1, 1] s[..., 3, 2] = -s[..., 1, 2] return s inc_dist = Independent(Normal(torch.zeros(3), math.sqrt(dt) * torch.ones(3)), 1) super().__init__((f_, g), theta, initial_dist, inc_dist, dt=dt, num_steps=num_steps)
Example #15
Source File: sir.py From pyfilter with MIT License | 5 votes |
def prop_state(x, beta, gamma, eta, dt): f = _f(x, beta, gamma, eta, dt) bins = Independent(Binomial(x[..., :-1], f), 1) samp = bins.sample() s = x[..., 0] - samp[..., 0] i = x[..., 1] + samp[..., 0] - samp[..., 1] r = x[..., 2] + samp[..., 1] return concater(s, i, r)
Example #16
Source File: affine.py From pyfilter with MIT License | 5 votes |
def __init__(self, std: Union[torch.Tensor, float, Distribution]): """ Defines a random walk. :param std: The vector of standard deviations :type std: torch.Tensor|float|Distribution """ if not isinstance(std, torch.Tensor): normal = Normal(0., 1.) else: normal = Normal(0., 1.) if std.shape[-1] < 2 else Independent(Normal(torch.zeros_like(std), std), 1) super().__init__((_f, _g), (std,), normal, normal)
Example #17
Source File: meanfield.py From pyfilter with MIT License | 5 votes |
def dist(self): return Independent(Normal(self._mean, self._log_std.exp()), 1)
Example #18
Source File: inference.py From pyfilter with MIT License | 4 votes |
def test_Inference(self): # ===== Distributions ===== # dist = Normal(0., 1.) mvn = Independent(Normal(torch.zeros(2), torch.ones(2)), 1) # ===== Define model ===== # linear = AffineProcess((f, g), (1., 0.25), dist, dist) model = LinearGaussianObservations(linear, scale=0.1) mv_linear = AffineProcess((fmvn, gmvn), (0.5, 0.25), mvn, mvn) mvnmodel = LinearGaussianObservations(mv_linear, torch.eye(2), scale=0.1) # ===== Test for multiple models ===== # priors = Exponential(1.), LogNormal(0., 1.) hidden1d = AffineProcess((f, g), priors, dist, dist) oned = LinearGaussianObservations(hidden1d, 1., scale=0.1) hidden2d = AffineProcess((fmvn, gmvn), priors, mvn, mvn) twod = LinearGaussianObservations(hidden2d, torch.eye(2), scale=0.1 * torch.ones(2)) particles = 1000 # ====== Run inference ===== # for trumod, model in [(model, oned), (mvnmodel, twod)]: x, y = trumod.sample_path(1000) algs = [ (NESS, {'particles': particles, 'filter_': APF(model.copy(), 200)}), (NESS, {'particles': particles, 'filter_': UKF(model.copy())}), (SMC2, {'particles': particles, 'filter_': APF(model.copy(), 200)}), (SMC2FW, {'particles': particles, 'filter_': APF(model.copy(), 200)}), (NESSMC2, {'particles': particles, 'filter_': APF(model.copy(), 200)}) ] for alg, props in algs: alg = alg(**props).initialize() alg = alg.fit(y) w = normalize(alg._w_rec if hasattr(alg, '_w_rec') else torch.ones(particles)) tru_params = trumod.hidden.theta._cont + trumod.observable.theta._cont inf_params = alg.filter.ssm.hidden.theta._cont + alg.filter.ssm.observable.theta._cont for trup, p in zip(tru_params, inf_params): if not p.trainable: continue kde = p.get_kde(weights=w) transed = p.bijection.inv(trup) densval = kde.logpdf(transed.numpy().reshape(-1, 1)) priorval = p.distr.log_prob(trup) assert (densval > priorval.numpy()).all()