Python torch.distributions.TransformedDistribution() Examples

The following are 6 code examples of torch.distributions.TransformedDistribution(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.distributions , or try the search function .
Example #1
Source File: module.py    From pyfilter with MIT License 6 votes vote down vote up
def _iterate_distribution(d: Distribution) -> Tuple[Distribution, ...]:
    """
    Helper method for iterating over distributions.
    :param d: The distribution
    """

    res = tuple()
    if not isinstance(d, TransformedDistribution):
        res += tuple(_find_types(d, torch.Tensor).values())

        for sd in _find_types(d, Distribution).values():
            res += _iterate_distribution(sd)

    else:
        res += _iterate_distribution(d.base_dist)

        for t in d.transforms:
            res += tuple(_find_types(t, torch.Tensor).values())

    return res 
Example #2
Source File: parameter.py    From pyfilter with MIT License 5 votes vote down vote up
def transformed_dist(self):
        """
        Returns the unconstrained distribution.
        """

        if not self.trainable:
            raise ValueError('Is not of `Distribution` instance!')

        return TransformedDistribution(self._prior, [self.bijection.inv]) 
Example #3
Source File: affine.py    From pyfilter with MIT License 5 votes vote down vote up
def _define_transdist(loc: torch.Tensor, scale: torch.Tensor, inc_dist: Distribution, ndim: int):
    loc, scale = torch.broadcast_tensors(loc, scale)

    shape = loc.shape[:-ndim] if ndim > 0 else loc.shape

    return TransformedDistribution(
        inc_dist.expand(shape), AffineTransform(loc, scale, event_dim=ndim)
    ) 
Example #4
Source File: flow.py    From ddsp_pytorch with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, dim, blocks, flow_length, final_block=None, density=None, amortized='none'):
        """ Initialize normalizing flow """
        super().__init__()
        biject = []
        self.n_params = []
        # Start density (z0)
        if density is None:
            density = MultivariateNormal(torch.zeros(dim), torch.eye(dim))
        self.base_density = density
        for f in range(flow_length-1):
            for b_flow in blocks:
                cur_block = b_flow(dim, amortized=amortized)
                self.n_params.append(cur_block.n_parameters())
                biject.append(cur_block)
        # Add only first block last
        cur_block = blocks[0](dim, amortized=amortized)
        self.n_params.append(cur_block.n_parameters())
        biject.append(cur_block)
        if (final_block is not None):            
            cur_block = final_block
            self.n_params.append(cur_block.n_parameters())
            biject.append(cur_block)
        # Full set of transforms
        self.transforms = transform.ComposeTransform(biject)
        self.bijectors = nn.ModuleList(biject)
        # Final density (zK) defined as transformed distribution
        self.final_density = distrib.TransformedDistribution(density, self.transforms)
        self.amortized = amortized
        # Handle different amortizations
        if amortized in ('self', 'input'):
            self.amortized_seed = torch.ones(1, dim).detach()
            self.amortized_params = self.parameters_network(dim, self.n_parameters())
        self.log_det = []
        self.dim = dim 
Example #5
Source File: flow.py    From ddsp_pytorch with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, dim, blocks, generative_layers, args, target_density=distrib.MultivariateNormal, learn_top=False, y_condition=False):
        """ Initialize normalizing flow """
        super(GenerativeFlow, self).__init__(dim, blocks, generative_layers, target_density, 'none')
        biject = []
        self.n_params = []
        self.output_shapes = []
        self.target_density = target_density
        # Get input size
        C, H, W = args.input_size
        # Create the L layers
        for l in range(generative_layers):
            C, H, W = C * 4, H // 2, W // 2
            self.output_shapes.append([-1, C, H, W])
            for b_flow in blocks:
                cur_block = b_flow(C, amortized='none')
                biject.append(cur_block)
                self.n_params.append(cur_block.n_parameters())
            C = C // 2
        C, H, W = C * 4, H // 2, W // 2
        self.output_shapes.append([-1, C, H, W])
        # Add a last layer (avoiding last block)
        for b_flow in blocks[:-1]:
            cur_block = b_flow(C, amortized='none')
            biject.append(cur_block)
            self.n_params.append(cur_block.n_parameters())
        self.transforms = transform.ComposeTransform(biject)
        self.bijectors = nn.ModuleList(biject)
        self.final_density = distrib.TransformedDistribution(target_density, self.transforms)
        self.dim = dim
        # self.y_classes = hparams.Glow.y_classes
        self.learn_top = learn_top
        self.y_condition = y_condition
        # for prior
        if self.learn_top:
            self.top_layer = nn.Conv2d(C * 2, C * 2)
        if self.y_condition:
            self.project_ycond = LinearZeros(y_classes, 2 * C)
            self.project_class = LinearZeros(C, y_classes)
        # Register learnable prior
        self.prior_h = nn.Parameter(torch.zeros([args.batch_size, C * 2, H, W])) 
Example #6
Source File: conv_iResNet.py    From invertible-resnet with MIT License 5 votes vote down vote up
def logistic_distribution(loc, log_scale):
    scale = torch.exp(log_scale) + 1e-5
    base_distribution = distributions.Uniform(torch.zeros_like(loc), torch.ones_like(loc))
    transforms = [LogisticTransform(), distributions.AffineTransform(loc=loc, scale=scale)]
    logistic = distributions.TransformedDistribution(base_distribution, transforms)
    return logistic