Python torch.distributions.Uniform() Examples
The following are 4
code examples of torch.distributions.Uniform().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.distributions
, or try the search function
.
Example #1
Source File: distributions.py From vgan with MIT License | 6 votes |
def get_zdist(dist_name, dim, device=None): # Get distribution if dist_name == 'uniform': low = -torch.ones(dim, device=device) high = torch.ones(dim, device=device) zdist = distributions.Uniform(low, high) elif dist_name == 'gauss': mu = torch.zeros(dim, device=device) scale = torch.ones(dim, device=device) zdist = distributions.Normal(mu, scale) else: raise NotImplementedError # Add dim attribute zdist.dim = dim return zdist
Example #2
Source File: audio.py From crnn-audio-classification with MIT License | 5 votes |
def __init__(self, max_perc, hop_length=None, n_freq=201, fixed_rate=None): super(RandomTimeStretch, self).__init__(hop_length, n_freq, fixed_rate) self._dist = Uniform(1.-max_perc, 1+max_perc)
Example #3
Source File: conv_iResNet.py From invertible-resnet with MIT License | 5 votes |
def logistic_distribution(loc, log_scale): scale = torch.exp(log_scale) + 1e-5 base_distribution = distributions.Uniform(torch.zeros_like(loc), torch.ones_like(loc)) transforms = [LogisticTransform(), distributions.AffineTransform(loc=loc, scale=scale)] logistic = distributions.TransformedDistribution(base_distribution, transforms) return logistic
Example #4
Source File: aem.py From autoregressive-energy-machines with MIT License | 4 votes |
def _sample_batch_from_proposal(self, batch_size, return_log_density_of_samples=False): # need to do n_samples passes through autoregressive net samples = torch.zeros(batch_size, self.autoregressive_net.input_dim) log_density_of_samples = torch.zeros(batch_size, self.autoregressive_net.input_dim) for dim in range(self.autoregressive_net.input_dim): # compute autoregressive outputs autoregressive_outputs = self.autoregressive_net(samples).reshape(-1, self.dim, self.autoregressive_net.output_dim_multiplier) # grab proposal params for dth dimensions proposal_params = autoregressive_outputs[..., dim, self.context_dim:] # make mixture coefficients, locs, and scales for proposal logits = proposal_params[..., :self.n_proposal_mixture_components] # [B, D, M] if logits.shape[0] == 1: logits = logits.reshape(self.dim, self.n_proposal_mixture_components) locs = proposal_params[..., self.n_proposal_mixture_components:( 2 * self.n_proposal_mixture_components)] # [B, D, M] scales = self.mixture_component_min_scale + self.scale_activation( proposal_params[..., (2 * self.n_proposal_mixture_components):]) # [B, D, M] # create proposal if self.Component is not None: mixture_distribution = distributions.OneHotCategorical( logits=logits, validate_args=True ) components_distribution = self.Component(loc=locs, scale=scales) self.proposal = distributions_.MixtureSameFamily( mixture_distribution=mixture_distribution, components_distribution=components_distribution ) proposal_samples = self.proposal.sample((1,)) # [S, B, D] else: self.proposal = distributions.Uniform(low=-4, high=4) proposal_samples = self.proposal.sample( (1, batch_size, 1) ) proposal_samples = proposal_samples.permute(1, 2, 0) # [B, D, S] proposal_log_density = self.proposal.log_prob(proposal_samples) log_density_of_samples[:, dim] += proposal_log_density.reshape(-1).detach() samples[:, dim] += proposal_samples.reshape(-1).detach() if return_log_density_of_samples: return samples, torch.sum(log_density_of_samples, dim=-1) else: return samples