Python torch.rand_like() Examples

The following are 30 code examples of torch.rand_like(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: invertible_resnet.py    From FrEIA with MIT License 7 votes vote down vote up
def test_init(self):
        x = torch.randn(self.batch_size, *self.inp_size_linear)
        x = x * torch.rand_like(x) + torch.randn_like(x)
        y = self.net_linear(x)
        # Channel-wise mean should be zero
        self.assertTrue(torch.allclose(y.transpose(0,1).contiguous().view(self.inp_size_linear[0], -1).mean(dim=-1),
                                       torch.zeros(self.inp_size_linear[0]), atol=1e-06))
        # Channel-wise std should be one
        self.assertTrue(torch.allclose(y.transpose(0,1).contiguous().view(self.inp_size_linear[0], -1).std(dim=-1),
                                       torch.ones(self.inp_size_linear[0]), atol=1e-06))

        x = torch.randn(self.batch_size, *self.inp_size_conv)
        x = x * torch.rand_like(x) + torch.randn_like(x)
        y = self.net_conv(x)
        # Channel-wise mean should be zero
        self.assertTrue(torch.allclose(y.transpose(0,1).contiguous().view(self.inp_size_conv[0], -1).mean(dim=-1),
                                       torch.zeros(self.inp_size_conv[0]), atol=1e-06))
        # Channel-wise std should be one
        self.assertTrue(torch.allclose(y.transpose(0,1).contiguous().view(self.inp_size_conv[0], -1).std(dim=-1),
                                       torch.ones(self.inp_size_conv[0]), atol=1e-06)) 
Example #2
Source File: train.py    From nice_pytorch with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def load_cifar10(train=True, batch_size=1, num_workers=0):
    """Rescale and preprocess CIFAR10 dataset."""
    # check if ZCA matrix exists on dataset yet:
    assert os.path.exists("./datasets/cifar/zca_matrix.pt"), \
        "[load_cifar10] ZCA whitening matrix not built! Run `python make_datasets.py` first."
    zca_matrix = torch.load("./datasets/cifar/zca_matrix.pt")

    cifar10_transform = torchvision.transforms.Compose([
        # convert PIL image to tensor:
        torchvision.transforms.ToTensor(),
        # flatten:
        torchvision.transforms.Lambda(lambda x: x.view(-1)),
        # add uniform noise ~ [-1/256, +1/256]:
        torchvision.transforms.Lambda(lambda x: (x + torch.rand_like(x).div_(128.).add_(-1./256.))),
        # rescale to [-1,1]:
        torchvision.transforms.Lambda(lambda x: rescale(x,-1.,1.)),
        # exact ZCA:
        torchvision.transforms.LinearTransformation(zca_matrix)
    ])
    return data.DataLoader(
        torchvision.datasets.CIFAR10(root="./datasets/cifar", train=train, transform=cifar10_transform, download=False),
        batch_size=batch_size,
        pin_memory=CUDA,
        drop_last=train
    ) 
Example #3
Source File: test_balance.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def test_balance_by_size_latent():
    class Expand(nn.Module):
        def __init__(self, times):
            super().__init__()
            self.times = times

        def forward(self, x):
            for i in range(self.times):
                x = x + torch.rand_like(x, requires_grad=True)
            return x

    sample = torch.rand(10, 100, 100)

    model = nn.Sequential(*[Expand(i) for i in [1, 2, 3, 4, 5, 6]])
    balance = balance_by_size(2, model, sample)
    assert balance == [4, 2]

    model = nn.Sequential(*[Expand(i) for i in [6, 5, 4, 3, 2, 1]])
    balance = balance_by_size(2, model, sample)
    assert balance == [2, 4] 
Example #4
Source File: paccrf.py    From openseg.pytorch with MIT License 6 votes vote down vote up
def reset_parameters(self, pairwise_idx=None):
        if pairwise_idx is None:
            idxs = range(len(self.messengers))
            if not self.fixed_weighting:
                self.unary_weight.data.fill_(self.init_unary_weight)
        else:
            idxs = [pairwise_idx]

        for i in idxs:
            self.messengers[i].reset_parameters()
            if isinstance(self.messengers[i], nn.Conv2d):
                # TODO: gaussian initialization for XY kernels?
                pass
            if self.compat[i] is not None:
                self.compat[i].weight.data[:, :, 0, 0] = 1.0 - th.eye(self.channels, dtype=th.float32)
                if self.perturbed_init:
                    perturb_range = 0.001
                    self.compat[i].weight.data.add_((th.rand_like(self.compat[i].weight.data) - 0.5) * perturb_range)
            self.pairwise_weights[i].data = th.ones_like(self.pairwise_weights[i]) * self.init_pairwise_weights[i] 
Example #5
Source File: test_loss.py    From nussl with MIT License 6 votes vote down vote up
def test_combination_invariant_loss_sdr():
    n_batch = 40
    n_samples = 16000
    n_sources = 2

    references = torch.randn(n_batch, n_samples, n_sources)

    noise_amount = [0.01, 0.05, 0.1, 0.5, 1.0]
    LossCPIT = ml.train.loss.CombinationInvariantLoss(
        loss_function=ml.train.loss.SISDRLoss())
    LossSDR = ml.train.loss.SISDRLoss()

    for n in noise_amount:
        estimates = references + n * torch.randn(n_batch, n_samples, n_sources)
        _loss_a = LossSDR(estimates, references).item()

        for shift in range(n_sources):
            sources_a = estimates[..., shift:]
            sources_b = estimates[..., :shift]
            sources_c = torch.rand_like(estimates)
            shifted_sources = torch.cat(
                [sources_a, sources_b, sources_c], dim=-1)
            _loss_b = LossCPIT(shifted_sources, references).item()
            assert np.allclose(_loss_a, _loss_b, atol=1e-4) 
Example #6
Source File: test_gyrovector_math.py    From geoopt with Apache License 2.0 6 votes vote down vote up
def test_weighted_midpoint_weighted_zero_sum(_k, lincomb):
    manifold = stereographic.Stereographic(_k, learnable=True)
    a = manifold.expmap0(torch.eye(3, 10)).detach().requires_grad_(True)
    weights = torch.rand_like(a[..., 0])
    weights = weights - weights.sum() / weights.numel()
    mid = manifold.weighted_midpoint(
        a, lincomb=lincomb, weights=weights, posweight=True
    )
    if _k == 0 and lincomb:
        np.testing.assert_allclose(
            mid.detach(),
            torch.cat([weights, torch.zeros(a.size(-1) - a.size(0))]),
            atol=1e-6,
        )
    assert mid.shape == a.shape[-1:]
    assert torch.isfinite(mid).all()
    mid.sum().backward()
    assert torch.isfinite(a.grad).all() 
Example #7
Source File: membership_inference.py    From CrypTen with MIT License 6 votes vote down vote up
def compute_rewards(weights, dataset, epsilon=0.0):
    """
    Perform inference using epsilon-greedy contextual bandit (without updates).
    """
    context, rewards = dataset
    context = context.type(torch.float32)

    # compute scores:
    scores = torch.matmul(weights, context.t()).squeeze()
    explore = (torch.rand(scores.shape[1]) < epsilon).type(torch.float32)
    rand_scores = torch.rand_like(scores)
    scores.mul_(1 - explore).add_(rand_scores.mul(explore))

    # select arm and observe reward:
    selected_arms = scores.argmax(dim=0)
    return rewards[range(rewards.shape[0]), selected_arms] 
Example #8
Source File: train.py    From nice_pytorch with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def load_mnist(train=True, batch_size=1, num_workers=0):
    """Rescale and preprocess MNIST dataset."""
    mnist_transform = torchvision.transforms.Compose([
        # convert PIL image to tensor:
        torchvision.transforms.ToTensor(),
        # flatten:
        torchvision.transforms.Lambda(lambda x: x.view(-1)),
        # add uniform noise:
        torchvision.transforms.Lambda(lambda x: (x + torch.rand_like(x).div_(256.))),
        # rescale to [0,1]:
        torchvision.transforms.Lambda(lambda x: rescale(x, 0., 1.))
    ])
    return data.DataLoader(
        torchvision.datasets.MNIST(root="./datasets/mnist", train=train, transform=mnist_transform, download=False),
        batch_size=batch_size,
        pin_memory=CUDA,
        drop_last=train
    ) 
Example #9
Source File: test_loss.py    From snorkel with Apache License 2.0 6 votes vote down vote up
def test_sce_equals_ce(self):
        # Does soft ce loss match classic ce loss when labels are one-hot?
        Y_golds = torch.LongTensor([0, 1, 2])
        Y_golds_probs = torch.Tensor(preds_to_probs(Y_golds.numpy(), num_classes=4))

        Y_probs = torch.rand_like(Y_golds_probs)
        Y_probs = Y_probs / Y_probs.sum(dim=1).reshape(-1, 1)

        ce_loss = F.cross_entropy(Y_probs, Y_golds, reduction="none")
        ces_loss = cross_entropy_with_probs(Y_probs, Y_golds_probs, reduction="none")
        np.testing.assert_equal(ce_loss.numpy(), ces_loss.numpy())

        ce_loss = F.cross_entropy(Y_probs, Y_golds, reduction="sum")
        ces_loss = cross_entropy_with_probs(Y_probs, Y_golds_probs, reduction="sum")
        np.testing.assert_equal(ce_loss.numpy(), ces_loss.numpy())

        ce_loss = F.cross_entropy(Y_probs, Y_golds, reduction="mean")
        ces_loss = cross_entropy_with_probs(Y_probs, Y_golds_probs, reduction="mean")
        np.testing.assert_equal(ce_loss.numpy(), ces_loss.numpy()) 
Example #10
Source File: gradient_tests.py    From torchdiffeq with MIT License 6 votes vote down vote up
def test_dopri5_adjoint_against_dopri5(self):
        func, y0, t_points = self.problem()
        ys = torchdiffeq.odeint_adjoint(func, y0, t_points, method='dopri5')
        gradys = torch.rand_like(ys) * 0.1
        ys.backward(gradys)

        adj_y0_grad = y0.grad
        adj_t_grad = t_points.grad
        adj_A_grad = func.A.grad
        self.assertEqual(max_abs(func.unused_module.weight.grad), 0)
        self.assertEqual(max_abs(func.unused_module.bias.grad), 0)

        func, y0, t_points = self.problem()
        ys = torchdiffeq.odeint(func, y0, t_points, method='dopri5')
        ys.backward(gradys)

        self.assertLess(max_abs(y0.grad - adj_y0_grad), 3e-4)
        self.assertLess(max_abs(t_points.grad - adj_t_grad), 1e-4)
        self.assertLess(max_abs(func.A.grad - adj_A_grad), 2e-3) 
Example #11
Source File: real_nvp.py    From real-nvp with MIT License 6 votes vote down vote up
def _pre_process(self, x):
        """Dequantize the input image `x` and convert to logits.

        Args:
            x (torch.Tensor): Input image.

        Returns:
            y (torch.Tensor): Dequantized logits of `x`.

        See Also:
            - Dequantization: https://arxiv.org/abs/1511.01844, Section 3.1
            - Modeling logits: https://arxiv.org/abs/1605.08803, Section 4.1
        """
        y = (x * 255. + torch.rand_like(x)) / 256.
        y = (2 * y - 1) * self.data_constraint
        y = (y + 1) / 2
        y = y.log() - (1. - y).log()

        # Save log-determinant of Jacobian of initial transform
        ldj = F.softplus(y) + F.softplus(-y) \
            - F.softplus((1. - self.data_constraint).log() - self.data_constraint.log())
        sldj = ldj.view(ldj.size(0), -1).sum(-1)

        return y, sldj 
Example #12
Source File: action_selectors.py    From pymarl with Apache License 2.0 6 votes vote down vote up
def select_action(self, agent_inputs, avail_actions, t_env, test_mode=False):

        # Assuming agent_inputs is a batch of Q-Values for each agent bav
        self.epsilon = self.schedule.eval(t_env)

        if test_mode:
            # Greedy action selection only
            self.epsilon = 0.0

        # mask actions that are excluded from selection
        masked_q_values = agent_inputs.clone()
        masked_q_values[avail_actions == 0.0] = -float("inf")  # should never be selected!

        random_numbers = th.rand_like(agent_inputs[:, :, 0])
        pick_random = (random_numbers < self.epsilon).long()
        random_actions = Categorical(avail_actions.float()).sample().long()

        picked_actions = pick_random * random_actions + (1 - pick_random) * masked_q_values.max(dim=2)[1]
        return picked_actions 
Example #13
Source File: test_lorentz_math.py    From geoopt with Apache License 2.0 6 votes vote down vote up
def test_parallel_transport0_back(a, b, k):
    man = lorentz.Lorentz(k=k)
    a = man.projx(a)
    b = man.projx(b)

    v_0 = torch.rand_like(a) + 1e-5
    v_0 = man.proju(a, v_0)  # project on tangent plane

    zero = torch.ones_like(a)
    d = zero.size(1) - 1
    zero = torch.cat(
        (zero.narrow(1, 0, 1) * torch.sqrt(k), zero.narrow(1, 1, d) * 0.0), dim=1
    )

    v_t = man.transp0back(a, v_0)
    v_t = man.transp0(b, v_t)

    v_s = man.transp(a, zero, v_0)
    v_s = man.transp(zero, b, v_s)

    np.testing.assert_allclose(v_t, v_s, atol=1e-5, rtol=1e-5) 
Example #14
Source File: test_lorentz_math.py    From geoopt with Apache License 2.0 6 votes vote down vote up
def test_parallel_transport0_preserves_inner_products(a, k):
    man = lorentz.Lorentz(k=k)
    a = man.projx(a)

    v_0 = torch.rand_like(a) + 1e-5
    u_0 = torch.rand_like(a) + 1e-5

    zero = torch.ones_like(a)
    d = zero.size(1) - 1
    zero = torch.cat(
        (zero.narrow(1, 0, 1) * torch.sqrt(k), zero.narrow(1, 1, d) * 0.0), dim=1
    )

    v_0 = man.proju(zero, v_0)  # project on tangent plane
    u_0 = man.proju(zero, u_0)  # project on tangent plane

    v_a = man.transp0(a, v_0)
    u_a = man.transp0(a, u_0)

    vu_0 = man.inner(v_0, u_0, keepdim=True)
    vu_a = man.inner(v_a, u_a, keepdim=True)
    np.testing.assert_allclose(vu_a, vu_0, atol=1e-5, rtol=1e-5) 
Example #15
Source File: gradient_tests.py    From torchdiffeq with MIT License 6 votes vote down vote up
def test_adams_adjoint_against_dopri5(self):
        func, y0, t_points = self.problem()
        ys_ = torchdiffeq.odeint_adjoint(func, y0, t_points, method='adams')
        gradys = torch.rand_like(ys_) * 0.1
        ys_.backward(gradys)

        adj_y0_grad = y0.grad
        adj_t_grad = t_points.grad
        adj_A_grad = func.A.grad
        self.assertEqual(max_abs(func.unused_module.weight.grad), 0)
        self.assertEqual(max_abs(func.unused_module.bias.grad), 0)

        func, y0, t_points = self.problem()
        ys = torchdiffeq.odeint(func, y0, t_points, method='dopri5')
        ys.backward(gradys)

        self.assertLess(max_abs(y0.grad - adj_y0_grad), 5e-2)
        self.assertLess(max_abs(t_points.grad - adj_t_grad), 5e-4)
        self.assertLess(max_abs(func.A.grad - adj_A_grad), 2e-2) 
Example #16
Source File: glow.py    From glow with MIT License 6 votes vote down vote up
def _pre_process(self, x):
        """Dequantize the input image `x` and convert to logits.

        See Also:
            - Dequantization: https://arxiv.org/abs/1511.01844, Section 3.1
            - Modeling logits: https://arxiv.org/abs/1605.08803, Section 4.1

        Args:
            x (torch.Tensor): Input image.

        Returns:
            y (torch.Tensor): Dequantized logits of `x`.
        """
        y = (x * 255. + torch.rand_like(x)) / 256.
        y = (2 * y - 1) * self.bounds
        y = (y + 1) / 2
        y = y.log() - (1. - y).log()

        # Save log-determinant of Jacobian of initial transform
        ldj = F.softplus(y) + F.softplus(-y) \
            - F.softplus((1. - self.bounds).log() - self.bounds.log())
        sldj = ldj.flatten(1).sum(-1)

        return y, sldj 
Example #17
Source File: test_loss.py    From snorkel with Apache License 2.0 5 votes vote down vote up
def test_invalid_reduction(self):
        Y_golds = torch.LongTensor([0, 1, 2])
        Y_golds_probs = torch.Tensor(preds_to_probs(Y_golds.numpy(), num_classes=4))

        Y_probs = torch.rand_like(Y_golds_probs)
        Y_probs = Y_probs / Y_probs.sum(dim=1).reshape(-1, 1)

        with self.assertRaisesRegex(ValueError, "Keyword 'reduction' must be"):
            cross_entropy_with_probs(Y_probs, Y_golds_probs, reduction="bad") 
Example #18
Source File: quantizer.py    From SWALP with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def add_r_(data):
    r = torch.rand_like(data)
    data.add_(r) 
Example #19
Source File: plain_contextual_bandits.py    From CrypTen with MIT License 5 votes vote down vote up
def epsilon_greedy(
    sampler,
    epsilon=0.0,
    dtype=torch.double,
    device="cpu",
    monitor_func=None,
    checkpoint_func=None,
    checkpoint_every=0,
):
    """
    Run epsilon-greedy linear least squares learner on dataset.

    The `sampler` is expected to be an iterator that returns one sample at a time.
    Samples are assumed to be `dict`s with a `'context'` and a `'rewards'` field.

    The function takes a hyperpameter `epsilon`, `dtype`, and `device` as optional
    arguments. It also takes an optional `monitor_func` closure that does logging,
    and an optional `checkpoint_func` that does checkpointing.
    """

    # define scoring function:
    def score_func(scores, A_inv, b, context):
        # Implement as (p < epsilon) * scores + (p > epsilon) * random
        # in order to match private version
        explore = random.random() < epsilon
        rand_scores = torch.rand_like(scores)
        scores.mul_(1 - explore).add_(rand_scores.mul(explore))

    # run online learner:
    online_learner(
        sampler,
        dtype=dtype,
        device=device,
        score_func=score_func,
        monitor_func=monitor_func,
        checkpoint_func=checkpoint_func,
        checkpoint_every=checkpoint_every,
    ) 
Example #20
Source File: gradient_tests.py    From torchdiffeq with MIT License 5 votes vote down vote up
def test_adjoint(self):
        """
        Test against dopri5
        """
        f, y0, t_points, _ = construct_problem(TEST_DEVICE)

        func = lambda y0, t_points: torchdiffeq.odeint(f, y0, t_points, method='dopri5')
        ys = func(y0, t_points)
        torch.manual_seed(0)
        gradys = torch.rand_like(ys)
        ys.backward(gradys)

        # reg_y0_grad = y0.grad
        reg_t_grad = t_points.grad
        reg_a_grad = f.a.grad
        reg_b_grad = f.b.grad

        f, y0, t_points, _ = construct_problem(TEST_DEVICE)

        func = lambda y0, t_points: torchdiffeq.odeint_adjoint(f, y0, t_points, method='dopri5')
        ys = func(y0, t_points)
        ys.backward(gradys)

        # adj_y0_grad = y0.grad
        adj_t_grad = t_points.grad
        adj_a_grad = f.a.grad
        adj_b_grad = f.b.grad

        # self.assertLess(max_abs(reg_y0_grad - adj_y0_grad), eps)
        self.assertLess(max_abs(reg_t_grad - adj_t_grad), eps)
        self.assertLess(max_abs(reg_a_grad - adj_a_grad), eps)
        self.assertLess(max_abs(reg_b_grad - adj_b_grad), eps) 
Example #21
Source File: drop.py    From pytorch-image-models with Apache License 2.0 5 votes vote down vote up
def drop_block_fast_2d(
        x: torch.Tensor, drop_prob: float = 0.1, block_size: int = 7,
        gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, batchwise: bool = False):
    """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf

    DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid
    block mask at edges.
    """
    B, C, H, W = x.shape
    total_size = W * H
    clipped_block_size = min(block_size, min(W, H))
    gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
            (W - block_size + 1) * (H - block_size + 1))

    if batchwise:
        # one mask for whole batch, quite a bit faster
        block_mask = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) < gamma
    else:
        # mask per batch element
        block_mask = torch.rand_like(x) < gamma
    block_mask = F.max_pool2d(
        block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2)

    if with_noise:
        normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x)
        if inplace:
            x.mul_(1. - block_mask).add_(normal_noise * block_mask)
        else:
            x = x * (1. - block_mask) + normal_noise * block_mask
    else:
        block_mask = 1 - block_mask
        normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(dtype=x.dtype)
        if inplace:
            x.mul_(block_mask * normalize_scale)
        else:
            x = x * block_mask * normalize_scale
    return x 
Example #22
Source File: test_balance.py    From torchgpipe with Apache License 2.0 5 votes vote down vote up
def test_balance_by_size_param_scale():
    class Tradeoff(nn.Module):
        def __init__(self, param_size, latent_size):
            super().__init__()
            self.fc = nn.Linear(param_size, param_size)
            self.latent_size = latent_size

        def forward(self, x):
            for i in range(self.latent_size):
                x = x + torch.rand_like(x, requires_grad=True)
            return x

    model = nn.Sequential(
        Tradeoff(param_size=1, latent_size=6),
        Tradeoff(param_size=2, latent_size=5),
        Tradeoff(param_size=3, latent_size=4),
        Tradeoff(param_size=4, latent_size=3),
        Tradeoff(param_size=5, latent_size=2),
        Tradeoff(param_size=6, latent_size=1),
    )

    sample = torch.rand(1, requires_grad=True)

    balance = balance_by_size(2, model, sample, param_scale=0)
    assert balance == [2, 4]

    balance = balance_by_size(2, model, sample, param_scale=100)
    assert balance == [4, 2] 
Example #23
Source File: utils.py    From FlexTensor with MIT License 5 votes vote down vote up
def gumbel_softmax(logits):
    import torch
    from torch.autograd import Variable
    epsilon = 1e-20
    G = torch.rand_like(logits)
    y = logits + -Variable(torch.log(-torch.log(G + epsilon) + epsilon))
    soft_y = torch.softmax(y, dim=-1)
    _, index = soft_y.max(dim=-1)
    hard_y = torch.zeros_like(soft_y).view(-1, soft_y.shape[-1])
    hard_y.scatter_(1, index.view(-1, 1), 1)
    hard_y = hard_y.view(*soft_y.shape)
    return soft_y + (hard_y - soft_y).detach() 
Example #24
Source File: utils.py    From FlexTensor with MIT License 5 votes vote down vote up
def gumbel_softmax(logits):
    import torch
    from torch.autograd import Variable
    epsilon = 1e-20
    G = torch.rand_like(logits)
    y = logits + -Variable(torch.log(-torch.log(G + epsilon) + epsilon))
    soft_y = torch.softmax(y, dim=-1)
    _, index = soft_y.max(dim=-1)
    hard_y = torch.zeros_like(soft_y).view(-1, soft_y.shape[-1])
    hard_y.scatter_(1, index.view(-1, 1), 1)
    hard_y = hard_y.view(*soft_y.shape)
    return soft_y + (hard_y - soft_y).detach() 
Example #25
Source File: energy.py    From torchsupport with MIT License 5 votes vote down vote up
def prepare(self, batch_size):
    data = torch.rand_like(batch_size, *self.shape)
    return self.sample_type(
      data=data, args=None
    ) 
Example #26
Source File: __init__.py    From torchsupport with MIT License 5 votes vote down vote up
def _conditional_bernoulli(self, hard):
  noise = torch.rand_like(hard)
  on_condition = noise * hard
  off_condition = noise * (1 - hard)
  on_condition = on_condition * self.probs + (1 - self.probs)
  off_condition = off_condition * (1 - self.probs)
  total = on_condition + off_condition
  soft_conditional = torch.log(self.probs / (1 - self.probs + 1e-16) + 1e-16)
  soft_conditional += torch.log(total / (1 - total + 1e-16) + 1e-16)
  return soft_conditional 
Example #27
Source File: __init__.py    From torchsupport with MIT License 5 votes vote down vote up
def _condtitional_categorical(self, hard):
  noise = -torch.log(torch.rand_like(self.logits) + 1e-16)
  on_condition = noise * hard
  off_condition = noise * (1 - hard)
  offset = on_condition.view(-1, hard.size(-1)).sum(dim=-1).view(*hard.shape[:-1], 1)
  off_condition = off_condition / (self.probs + 1e-16) - offset
  soft_conditional = -torch.log(on_condition + off_condition + 1e-16)
  return soft_conditional 
Example #28
Source File: samplers.py    From torchsupport with MIT License 5 votes vote down vote up
def metropolis(self, current, proposal):
    log_alpha = - (proposal - current) / self.temperature
    alpha = log_alpha.exp().view(-1)
    uniform = torch.rand_like(alpha)
    accept = uniform < alpha
    return accept 
Example #29
Source File: energy.py    From torchsupport with MIT License 5 votes vote down vote up
def prepare(self):
    data = self.data[random.randrange(len(self.data))]
    _, reference, *condition = self.data_key(data)
    return (torch.rand_like(reference), reference, *condition) 
Example #30
Source File: energy.py    From torchsupport with MIT License 5 votes vote down vote up
def bad_prepare(self):
    _, reference, *args = self.prepare()
    _, bad_reference, *_ = self.prepare()
    noise = 0.1 * torch.rand(bad_reference.size(0), 1, 1, 1)
    bad_reference = (1 - noise) * bad_reference + noise * torch.rand_like(bad_reference)
    result = (bad_reference, reference, *args)
    return result