Python torch.distributions.MultivariateNormal() Examples
The following are 25
code examples of torch.distributions.MultivariateNormal().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.distributions
, or try the search function
.
Example #1
Source File: test_action_head.py From vel with MIT License | 6 votes |
def test_kl_divergence_diag_gaussian(): """ Test kl divergence between multivariate gaussian distributions with a diagonal covariance matrix """ head = DiagGaussianActionHead(1, 5) distrib1 = d.MultivariateNormal(torch.tensor([1.0, -1.0]), covariance_matrix=torch.tensor([[2.0, 0.0], [0.0, 0.5]])) distrib2 = d.MultivariateNormal(torch.tensor([0.3, 0.7]), covariance_matrix=torch.tensor([[1.8, 0.0], [0.0, 5.5]])) pd_params1 = torch.tensor([[1.0, -1.0], [np.log(np.sqrt(2.0)), np.log(np.sqrt(0.5))]]).t() pd_params2 = torch.tensor([[0.3, 0.7], [np.log(np.sqrt(1.8)), np.log(np.sqrt(5.5))]]).t() kl_div_1 = d.kl_divergence(distrib1, distrib2) kl_div_2 = head.kl_divergence(pd_params1[None], pd_params2[None]) assert kl_div_1.item() == pytest.approx(kl_div_2.item(), 0.001)
Example #2
Source File: linear.py From pyfilter with MIT License | 6 votes |
def _kernel_2d(self, y, loc, h_var_inv, o_var_inv, c): tc = c if self._model.obs_ndim > 0 else c.unsqueeze(-2) # ===== Define covariance ===== # ttc = tc.transpose(-2, -1) diag_o_var_inv = construct_diag(o_var_inv if self._model.observable.ndim > 0 else o_var_inv.unsqueeze(-1)) t2 = torch.matmul(ttc, torch.matmul(diag_o_var_inv, tc)) cov = (construct_diag(h_var_inv) + t2).inverse() # ===== Get mean ===== # t1 = h_var_inv * loc t2 = torch.matmul(diag_o_var_inv, y if y.dim() > 0 else y.unsqueeze(-1)) t3 = torch.matmul(ttc, t2.unsqueeze(-1))[..., 0] m = torch.matmul(cov, (t1 + t3).unsqueeze(-1))[..., 0] return MultivariateNormal(m, scale_tril=torch.cholesky(cov))
Example #3
Source File: utils.py From pyfilter with MIT License | 6 votes |
def test_UnscentedTransform2D(self): # ===== 2D model ===== # mat = torch.eye(2) scale = torch.diag(mat) norm = Normal(0., 1.) mvn = MultivariateNormal(torch.zeros(2), torch.eye(2)) mvnlinear = AffineProcess((fmvn, g), (mat, scale), mvn, mvn) mvnoblinear = AffineObservations((fomvn, gomvn), (1.,), norm) mvnmodel = StateSpaceModel(mvnlinear, mvnoblinear) # ===== Perform unscented transform ===== # uft = UnscentedFilterTransform(mvnmodel) res = uft.initialize(3000) p = uft.predict(res) c = uft.correct(0., p) assert isinstance(c.x_dist(), MultivariateNormal) and c.x_dist().mean.shape == torch.Size([3000, 2])
Example #4
Source File: cem_state_action_vfunc.py From machina with MIT License | 6 votes |
def _fitting_multivari(self, best_samples): """ Fit multivariate gaussian and sampling from it Parameters ---------- best_samples : torch.Tensor shape (self.cem_batch_size, self.num_best_sampling, self.dim_ac) Returns ------- samples : torch.Tensor """ def fitting(best_samples): mean = best_samples.mean(dim=0) fs_m = best_samples.sub(mean.expand_as(best_samples)) cov_mat = fs_m.transpose(0, 1).mm(fs_m) / (self.num_sampling - 1) cov_mat = cov_mat + self.delta * torch.eye(cov_mat.shape[0]) pd = MultivariateNormal(mean, cov_mat) samples = pd.sample((self.num_sampling,)) return samples samples = torch.cat([fitting(best_sample) for best_sample in best_samples], dim=0) return samples
Example #5
Source File: multivariate_normal.py From gpytorch with MIT License | 6 votes |
def __init__(self, mean, covariance_matrix, validate_args=False): self._islazy = isinstance(mean, LazyTensor) or isinstance(covariance_matrix, LazyTensor) if self._islazy: if validate_args: ms = mean.size(-1) cs1 = covariance_matrix.size(-1) cs2 = covariance_matrix.size(-2) if not (ms == cs1 and ms == cs2): raise ValueError(f"Wrong shapes in {self._repr_sizes(mean, covariance_matrix)}") self.loc = mean self._covar = covariance_matrix self.__unbroadcasted_scale_tril = None self._validate_args = validate_args batch_shape = _mul_broadcast_shape(self.loc.shape[:-1], covariance_matrix.shape[:-2]) event_shape = self.loc.shape[-1:] # TODO: Integrate argument validation for LazyTensors into torch.distribution validation logic super(TMultivariateNormal, self).__init__(batch_shape, event_shape, validate_args=False) else: super().__init__(loc=mean, covariance_matrix=covariance_matrix, validate_args=validate_args)
Example #6
Source File: test_multivariate_normal.py From gpytorch with MIT License | 6 votes |
def test_log_prob(self, cuda=False): device = torch.device("cuda") if cuda else torch.device("cpu") for dtype in (torch.float, torch.double): mean = torch.randn(4, device=device, dtype=dtype) var = torch.randn(4, device=device, dtype=dtype).abs_() values = torch.randn(4, device=device, dtype=dtype) res = MultivariateNormal(mean, DiagLazyTensor(var)).log_prob(values) actual = TMultivariateNormal(mean, torch.eye(4, device=device, dtype=dtype) * var).log_prob(values) self.assertLess((res - actual).div(res).abs().item(), 1e-2) mean = torch.randn(3, 4, device=device, dtype=dtype) var = torch.randn(3, 4, device=device, dtype=dtype).abs_() values = torch.randn(3, 4, device=device, dtype=dtype) res = MultivariateNormal(mean, DiagLazyTensor(var)).log_prob(values) actual = TMultivariateNormal( mean, var.unsqueeze(-1) * torch.eye(4, device=device, dtype=dtype).repeat(3, 1, 1) ).log_prob(values) self.assertLess((res - actual).div(res).abs().norm(), 1e-2)
Example #7
Source File: PPO_continuous.py From PPO-PyTorch with MIT License | 5 votes |
def evaluate(self, state, action): action_mean = self.actor(state) action_var = self.action_var.expand_as(action_mean) cov_mat = torch.diag_embed(action_var).to(device) dist = MultivariateNormal(action_mean, cov_mat) action_logprobs = dist.log_prob(action) dist_entropy = dist.entropy() state_value = self.critic(state) return action_logprobs, torch.squeeze(state_value), dist_entropy
Example #8
Source File: test_action_head.py From vel with MIT License | 5 votes |
def test_entropy_diag_gaussian(): """ Test entropy of a multivariate gaussian distribution with a diagonal covariance matrix """ head = DiagGaussianActionHead(1, 5) distrib = d.MultivariateNormal(torch.tensor([1.0, -1.0]), covariance_matrix=torch.tensor([[2.0, 0.0], [0.0, 0.5]])) pd_params = torch.tensor([[1.0, -1.0], [np.log(np.sqrt(2.0)), np.log(np.sqrt(0.5))]]).t() entropy1 = distrib.entropy() entropy2 = head.entropy(pd_params[None]) nt.assert_allclose(entropy1.detach().cpu().numpy(), entropy2.detach().cpu().numpy())
Example #9
Source File: test_action_head.py From vel with MIT License | 5 votes |
def test_neglogp_diag_gaussian(): """ Test negative logarithm of likelihood of a multivariate gaussian distribution with a diagonal covariance matrix """ head = DiagGaussianActionHead(1, 5) distrib = d.MultivariateNormal(torch.tensor([1.0, -1.0]), covariance_matrix=torch.tensor([[2.0, 0.0], [0.0, 0.5]])) pd_params = torch.tensor([[1.0, -1.0], [np.log(np.sqrt(2.0)), np.log(np.sqrt(0.5))]]).t() sample = head.sample(pd_params[None]) log_prob1 = distrib.log_prob(sample) log_prob2 = head.logprob(sample, pd_params[None]) nt.assert_allclose(log_prob1.detach().cpu().numpy(), log_prob2.detach().cpu().numpy(), rtol=1e-5)
Example #10
Source File: test_multivariate_normal_prior.py From gpytorch with MIT License | 5 votes |
def test_multivariate_normal_prior_batch_log_prob(self, cuda=False): device = torch.device("cuda") if cuda else torch.device("cpu") mean = torch.tensor([[0.0, 1.0], [-0.5, 2.0]], device=device) cov = torch.eye(2, device=device).repeat(2, 1, 1) prior = MultivariateNormalPrior(mean, covariance_matrix=cov) dist = MultivariateNormal(mean, covariance_matrix=cov) t = torch.tensor([-1, 0.5], device=device) self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t))) t = torch.tensor([[-1, 0.5], [1.5, -2.0]], device=device) self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t))) with self.assertRaises(RuntimeError): prior.log_prob(torch.zeros(1, 3, device=device)) mean = torch.rand(3, 2, 2, device=device) cov = torch.eye(2, device=device).repeat(3, 2, 1, 1) prior = MultivariateNormalPrior(mean, covariance_matrix=cov) dist = MultivariateNormal(mean, covariance_matrix=cov) t = torch.rand(2, device=device) self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t))) t = torch.rand(2, 2, device=device) self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t))) t = torch.rand(3, 2, 2, device=device) self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t))) t = torch.rand(2, 3, 2, 2, device=device) self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t))) with self.assertRaises(RuntimeError): prior.log_prob(torch.rand(3, device=device)) with self.assertRaises(RuntimeError): prior.log_prob(torch.rand(3, 2, 3, device=device))
Example #11
Source File: test_multivariate_normal_prior.py From gpytorch with MIT License | 5 votes |
def test_multivariate_normal_prior_log_prob_log_transform(self, cuda=False): device = torch.device("cuda") if cuda else torch.device("cpu") mean = torch.tensor([0.0, 1.0], device=device) cov = torch.eye(2, device=device) prior = MultivariateNormalPrior(mean, covariance_matrix=cov, transform=torch.exp) dist = MultivariateNormal(mean, covariance_matrix=cov) t = torch.tensor([-1, 0.5], device=device) self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t.exp()))) t = torch.tensor([[-1, 0.5], [1.5, -2.0]], device=device) self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t.exp()))) with self.assertRaises(RuntimeError): prior.log_prob(torch.zeros(3, device=device))
Example #12
Source File: test_multivariate_normal_prior.py From gpytorch with MIT License | 5 votes |
def test_multivariate_normal_prior_log_prob(self, cuda=False): device = torch.device("cuda") if cuda else torch.device("cpu") mean = torch.tensor([0.0, 1.0], device=device) cov = torch.eye(2, device=device) prior = MultivariateNormalPrior(mean, covariance_matrix=cov) dist = MultivariateNormal(mean, covariance_matrix=cov) t = torch.tensor([-1, 0.5], device=device) self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t))) t = torch.tensor([[-1, 0.5], [1.5, -2.0]], device=device) self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t))) with self.assertRaises(RuntimeError): prior.log_prob(torch.zeros(3, device=device))
Example #13
Source File: basic.py From ddsp_pytorch with GNU General Public License v3.0 | 5 votes |
def construct_flow(flow_dim, flow_type='maf', flow_length=16, amortization='input'): """ Construct normalizing flow """ if flow_type == 'planar': blocks = [ PlanarFlow ] elif flow_type == 'sylvester': blocks = [ TriangularSylvesterFlow, BatchNormFlow, ShuffleFlow ] elif flow_type == 'real_nvp': blocks = [ MaskedCouplingFlow, BatchNormFlow, ShuffleFlow ] elif flow_type == 'maf': blocks = [ MAFlow, BatchNormFlow, ReverseFlow ] elif flow_type == 'iaf': blocks = [ IAFlow, BatchNormFlow, ShuffleFlow ] elif flow_type == 'dsf': blocks = [ DeepSigmoidFlow, BatchNormFlow, ReverseFlow ] elif flow_type == 'ddsf': blocks = [ DeepDenseSigmoidFlow, BatchNormFlow, ReverseFlow ] elif flow_type == 'ddsf_iaf': blocks = [ DDSF_IAFlow, BatchNormFlow, ShuffleFlow ] elif flow_type == 'iaf_ctx': blocks = [ ContextIAFlow, BatchNormFlow, ShuffleFlow ] elif flow_type == 'maf_ctx': blocks = [ ContextMAFlow, BatchNormFlow, ReverseFlow ] else: raise ValueError('Invalid flow choice : ' + flow_type) flow = NormalizingFlow( dim=flow_dim, blocks=blocks, flow_length=flow_length, density=MultivariateNormal(torch.zeros(flow_dim), torch.eye(flow_dim)), amortized='self' ) return flow, blocks
Example #14
Source File: flow.py From ddsp_pytorch with GNU General Public License v3.0 | 5 votes |
def __init__(self, dim, blocks, generative_layers, args, target_density=distrib.MultivariateNormal, learn_top=False, y_condition=False): """ Initialize normalizing flow """ super(GenerativeFlow, self).__init__(dim, blocks, generative_layers, target_density, 'none') biject = [] self.n_params = [] self.output_shapes = [] self.target_density = target_density # Get input size C, H, W = args.input_size # Create the L layers for l in range(generative_layers): C, H, W = C * 4, H // 2, W // 2 self.output_shapes.append([-1, C, H, W]) for b_flow in blocks: cur_block = b_flow(C, amortized='none') biject.append(cur_block) self.n_params.append(cur_block.n_parameters()) C = C // 2 C, H, W = C * 4, H // 2, W // 2 self.output_shapes.append([-1, C, H, W]) # Add a last layer (avoiding last block) for b_flow in blocks[:-1]: cur_block = b_flow(C, amortized='none') biject.append(cur_block) self.n_params.append(cur_block.n_parameters()) self.transforms = transform.ComposeTransform(biject) self.bijectors = nn.ModuleList(biject) self.final_density = distrib.TransformedDistribution(target_density, self.transforms) self.dim = dim # self.y_classes = hparams.Glow.y_classes self.learn_top = learn_top self.y_condition = y_condition # for prior if self.learn_top: self.top_layer = nn.Conv2d(C * 2, C * 2) if self.y_condition: self.project_ycond = LinearZeros(y_classes, 2 * C) self.project_class = LinearZeros(C, y_classes) # Register learnable prior self.prior_h = nn.Parameter(torch.zeros([args.batch_size, C * 2, H, W]))
Example #15
Source File: flow.py From ddsp_pytorch with GNU General Public License v3.0 | 5 votes |
def __init__(self, dim, blocks, flow_length, final_block=None, density=None, amortized='none'): """ Initialize normalizing flow """ super().__init__() biject = [] self.n_params = [] # Start density (z0) if density is None: density = MultivariateNormal(torch.zeros(dim), torch.eye(dim)) self.base_density = density for f in range(flow_length-1): for b_flow in blocks: cur_block = b_flow(dim, amortized=amortized) self.n_params.append(cur_block.n_parameters()) biject.append(cur_block) # Add only first block last cur_block = blocks[0](dim, amortized=amortized) self.n_params.append(cur_block.n_parameters()) biject.append(cur_block) if (final_block is not None): cur_block = final_block self.n_params.append(cur_block.n_parameters()) biject.append(cur_block) # Full set of transforms self.transforms = transform.ComposeTransform(biject) self.bijectors = nn.ModuleList(biject) # Final density (zK) defined as transformed distribution self.final_density = distrib.TransformedDistribution(density, self.transforms) self.amortized = amortized # Handle different amortizations if amortized in ('self', 'input'): self.amortized_seed = torch.ones(1, dim).detach() self.amortized_params = self.parameters_network(dim, self.n_parameters()) self.log_det = [] self.dim = dim
Example #16
Source File: uft.py From pyfilter with MIT License | 5 votes |
def _helper(m, c): if m.shape[-1] > 1: return MultivariateNormal(m, c) return Normal(m[..., 0], c[..., 0, 0].sqrt())
Example #17
Source File: PPO_continuous.py From PPO-PyTorch with MIT License | 5 votes |
def act(self, state, memory): action_mean = self.actor(state) cov_mat = torch.diag(self.action_var).to(device) dist = MultivariateNormal(action_mean, cov_mat) action = dist.sample() action_logprob = dist.log_prob(action) memory.states.append(state) memory.actions.append(action) memory.logprobs.append(action_logprob) return action.detach()
Example #18
Source File: gaussian.py From torch-kalman with MIT License | 5 votes |
def _log_prob_with_subsetting(self, obs: Tensor, group_idx: Selector, time_idx: Selector, measure_idx: Selector, **kwargs) -> Tensor: self._check_lp_sub_input(group_idx, time_idx) idx_3d = bmat_idx(group_idx, time_idx, measure_idx) idx_4d = bmat_idx(group_idx, time_idx, measure_idx, measure_idx) dist = MultivariateNormal(self.predictions[idx_3d], self.prediction_uncertainty[idx_4d]) return dist.log_prob(obs[idx_3d])
Example #19
Source File: gaussian.py From torch-kalman with MIT License | 5 votes |
def sample_measurements(self, eps: Optional[Tensor] = None) -> Tensor: distribution = MultivariateNormal(self.predictions, self.prediction_uncertainty) return deterministic_sample_mvnorm(distribution, eps=eps)
Example #20
Source File: gaussian.py From torch-kalman with MIT License | 5 votes |
def sample_transition(self, eps: Optional[Tensor] = None) -> Tensor: distribution = MultivariateNormal(loc=self.means, covariance_matrix=self.covs) return deterministic_sample_mvnorm(distribution, eps=eps)
Example #21
Source File: utils.py From torch-kalman with MIT License | 5 votes |
def deterministic_sample_mvnorm(distribution: MultivariateNormal, eps: Optional[Tensor] = None) -> Tensor: if isinstance(eps, Tensor): if eps.shape[-len(distribution.event_shape):] != distribution.event_shape: raise RuntimeError(f"Expected shape ending in {distribution.event_shape}, got {eps.shape}.") else: shape = distribution.batch_shape + distribution.event_shape if eps is None: eps = 1.0 eps *= _standard_normal(shape, dtype=distribution.loc.dtype, device=distribution.loc.device) return distribution.loc + _batch_mv(distribution._unbroadcasted_scale_tril, eps)
Example #22
Source File: gmm.py From ncsn with GNU General Public License v3.0 | 5 votes |
def __init__(self, dim, ill_conditioned): cov = torch.eye(dim) # cov = torch.range(1, dim).diag() if ill_conditioned: cov[dim // 2:, dim // 2:] = 0.0001 * torch.eye(dim // 2) # mean = 0 * torch.ones(dim) mean = torch.range(1, dim) / 10 m = MultivariateNormal(mean, cov) self.gmm = m
Example #23
Source File: linear.py From pyfilter with MIT License | 5 votes |
def pre_weight(self, y, x): hloc, hscale = self._model.hidden.mean_scale(x) oloc, oscale = self._model.observable.mean_scale(hloc) c = self._model.observable.theta_vals[0] ovar = oscale ** 2 hvar = hscale ** 2 if self._model.obs_ndim < 1: if self._model.hidden_ndim < 1: cov = ovar + c ** 2 * hvar else: tc = c.unsqueeze(-2) cov = (ovar + tc.matmul(tc.transpose(-2, -1)) * hvar)[..., 0, 0] return Normal(oloc, cov.sqrt()).log_prob(y) if self._model.hidden_ndim < 1: tc = c.unsqueeze(-2) cov = (ovar + tc.matmul(tc.transpose(-2, -1)) * hvar)[..., 0, 0] else: diag_ovar = construct_diag(ovar) diag_hvar = construct_diag(hvar) cov = diag_ovar + c.matmul(diag_hvar).matmul(c.transpose(-2, -1)) return MultivariateNormal(oloc, cov).log_prob(y)
Example #24
Source File: utils.py From pyfilter with MIT License | 5 votes |
def _construct_mvn(x: torch.Tensor, w: torch.Tensor): """ Constructs a multivariate normal distribution of weighted samples. :param x: The samples :param w: The weights """ mean = (x * w.unsqueeze(-1)).sum(0) centralized = x - mean cov = torch.matmul(w * centralized.t(), centralized) return MultivariateNormal(mean, scale_tril=torch.cholesky(cov))
Example #25
Source File: plane.py From nsf with MIT License | 4 votes |
def _create_data(self, rotate=True): # probs = (1 / self.width**2) * torch.ones(self.width**2) # # means = torch.Tensor([ # (x, y) # for x in torch.linspace(-self.bound, self.bound, self.width) # for y in torch.linspace(-self.bound, self.bound, self.width) # ]) # # covariance = self.std**2 * torch.eye(2) # covariances = covariance[None, ...].repeat(self.width**2, 1, 1) # # mixture_distribution = distributions.OneHotCategorical( # probs=probs # ) # components_distribution = distributions.MultivariateNormal( # loc=means, # covariance_matrix=covariances # ) # # mask = mixture_distribution.sample((self.num_points,))[..., None].repeat(1, 1, 2) # samples = components_distribution.sample((self.num_points,)) # self.data = torch.sum(mask * samples, dim=-2) # if rotate: # rotation_matrix = torch.Tensor([ # [1 / np.sqrt(2), -1 / np.sqrt(2)], # [1 / np.sqrt(2), 1 / np.sqrt(2)] # ]) # self.data = self.data @ rotation_matrix means = np.array([ (x + 1e-3 * np.random.rand(), y + 1e-3 * np.random.rand()) for x in np.linspace(-self.bound, self.bound, self.width) for y in np.linspace(-self.bound, self.bound, self.width) ]) covariance_factor = self.std * np.eye(2) index = np.random.choice(range(self.width ** 2), size=self.num_points, replace=True) noise = np.random.randn(self.num_points, 2) self.data = means[index] + noise @ covariance_factor if rotate: rotation_matrix = np.array([ [1 / np.sqrt(2), -1 / np.sqrt(2)], [1 / np.sqrt(2), 1 / np.sqrt(2)] ]) self.data = self.data @ rotation_matrix self.data = self.data.astype(np.float32) self.data = torch.Tensor(self.data)