Python torch.isclose() Examples

The following are 30 code examples of torch.isclose(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: test_losses.py    From MatchZoo-py with Apache License 2.0 6 votes vote down vote up
def test_hinge_loss():
    true_value = torch.Tensor([[1.2], [1], [1], [1]])
    pred_value = torch.Tensor([[1.2], [0.1], [0], [-0.3]])
    expected_loss = torch.Tensor([(0 + 1 - 0.3 + 0) / 2.0])
    loss = losses.RankHingeLoss()(pred_value, true_value)
    assert torch.isclose(expected_loss, loss)
    expected_loss = torch.Tensor(
        [(2 + 0.1 - 1.2 + 2 - 0.3 + 0) / 2.0])
    loss = losses.RankHingeLoss(margin=2)(pred_value, true_value)
    assert torch.isclose(expected_loss, loss)
    true_value = torch.Tensor(
        [[1.2], [1], [0.8], [1], [1], [0.8]])
    pred_value = torch.Tensor(
        [[1.2], [0.1], [-0.5], [0], [0], [-0.3]])
    expected_loss = torch.Tensor(
        [(0 + 1 - 0.15) / 2.0])
    loss = losses.RankHingeLoss(num_neg=2, margin=1)(
        pred_value, true_value)
    assert torch.isclose(expected_loss, loss) 
Example #2
Source File: test_crypten.py    From CrypTen with MIT License 6 votes vote down vote up
def test_rand(self):
        """Tests uniform random variable generation on [0, 1)"""
        for size in [(10,), (10, 10), (10, 10, 10)]:
            randvec = crypten.rand(*size)
            self.assertTrue(randvec.size() == size, "Incorrect size")
            tensor = randvec.get_plain_text()
            self.assertTrue(
                (tensor >= 0).all() and (tensor < 1).all(), "Invalid values"
            )

        randvec = crypten.rand(int(1e6)).get_plain_text()
        mean = torch.mean(randvec)
        var = torch.var(randvec)
        self.assertTrue(torch.isclose(mean, torch.Tensor([0.5]), rtol=1e-3, atol=1e-3))
        self.assertTrue(
            torch.isclose(var, torch.Tensor([1.0 / 12]), rtol=1e-3, atol=1e-3)
        ) 
Example #3
Source File: test_angular_loss.py    From pytorch-metric-learning with MIT License 6 votes vote down vote up
def test_angular_loss(self):
        loss_func = AngularLoss(alpha=40)
        embedding_angles = [0, 20, 40, 60, 80]
        embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings
        labels = torch.LongTensor([0, 0, 1, 1, 2])

        loss = loss_func(embeddings, labels)
        loss.backward()
        sq_tan_alpha = torch.tan(torch.tensor(np.radians(40)))**2
        triplets = [(0,1,2), (0,1,3), (0,1,4), (1,0,2), (1,0,3), (1,0,4), (2,3,0), (2,3,1), (2,3,4), (3,2,0), (3,2,1), (3,2,4)]

        correct_losses = [0,0,0,0]
        for a, p, n in triplets:
            anchor, positive, negative = embeddings[a], embeddings[p], embeddings[n]
            exponent = 4*sq_tan_alpha*torch.matmul(anchor+positive,negative) - 2*(1+sq_tan_alpha)*torch.matmul(anchor, positive)
            correct_losses[a] += torch.exp(exponent)
        total_loss = 0
        for c in correct_losses:
            total_loss += torch.log(1+c)
        total_loss /= len(correct_losses)
        self.assertTrue(torch.isclose(loss, total_loss.to(torch.float32))) 
Example #4
Source File: test_triplet_margin_loss.py    From pytorch-metric-learning with MIT License 6 votes vote down vote up
def test_triplet_margin_loss(self):
        margin = 0.2
        loss_funcA = TripletMarginLoss(margin=margin)
        loss_funcB = TripletMarginLoss(margin=margin, reducer=MeanReducer())
        embedding_angles = [0, 20, 40, 60, 80]
        embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings
        labels = torch.LongTensor([0, 0, 1, 1, 2])

        lossA = loss_funcA(embeddings, labels)
        lossB = loss_funcB(embeddings, labels)
        lossA.backward()
        lossB.backward()
        
        triplets = [(0,1,2), (0,1,3), (0,1,4), (1,0,2), (1,0,3), (1,0,4), (2,3,0), (2,3,1), (2,3,4), (3,2,0), (3,2,1), (3,2,4)]

        correct_loss = 0
        num_non_zero_triplets = 0
        for a, p, n in triplets:
            anchor, positive, negative = embeddings[a], embeddings[p], embeddings[n]
            curr_loss = torch.relu(torch.sqrt(torch.sum((anchor-positive)**2)) - torch.sqrt(torch.sum((anchor-negative)**2)) + margin)
            if curr_loss > 0:
                num_non_zero_triplets += 1
            correct_loss += curr_loss
        self.assertTrue(torch.isclose(lossA, correct_loss/num_non_zero_triplets))
        self.assertTrue(torch.isclose(lossB, correct_loss/len(triplets))) 
Example #5
Source File: test_cosface_loss.py    From pytorch-metric-learning with MIT License 6 votes vote down vote up
def test_cosface_loss(self):
        margin = 0.5
        scale = 64
        loss_func = CosFaceLoss(margin=margin, scale=scale, num_classes=10, embedding_size=2)

        embedding_angles = torch.arange(0, 180)
        embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings
        labels = torch.randint(low=0, high=10, size=(180,))

        loss = loss_func(embeddings, labels)
        loss.backward()

        weights = torch.nn.functional.normalize(loss_func.W, p=2, dim=0)
        logits = torch.matmul(embeddings, weights)
        for i, c in enumerate(labels):
            logits[i, c] -= margin
        
        correct_loss = torch.nn.functional.cross_entropy(logits*scale, labels)
        self.assertTrue(torch.isclose(loss, correct_loss)) 
Example #6
Source File: over_time.py    From torch-kalman with MIT License 6 votes vote down vote up
def _components(self) -> Dict[Tuple[str, str, str], Tuple[Tensor, Tensor]]:
        states_per_measure = defaultdict(list)
        for state_belief in self.state_beliefs:
            for m, measure in enumerate(self.design.measures):
                H = state_belief.H[:, m, :].data
                m = H * state_belief.means.data
                std = H * torch.diagonal(state_belief.covs.data, dim1=-2, dim2=-1).sqrt()
                states_per_measure[measure].append((m, std))

        out = {}
        for measure, means_and_stds in states_per_measure.items():
            means, stds = zip(*means_and_stds)
            means = torch.stack(means).permute(1, 0, 2)
            stds = torch.stack(stds).permute(1, 0, 2)
            for s, (process_name, state_element) in enumerate(self.design.state_elements):
                if ~torch.isclose(means[:, :, s].abs().max(), torch.zeros(1)):
                    out[(measure, process_name, state_element)] = (means[:, :, s], stds[:, :, s])
        return out 
Example #7
Source File: test_losses.py    From MatchZoo-py with Apache License 2.0 6 votes vote down vote up
def test_rank_crossentropy_loss():
    losses.neg_num = 1

    def softmax(x):
        return np.exp(x) / np.sum(np.exp(x), axis=0)

    true_value = torch.Tensor([[1.], [0.], [0.], [1.]])
    pred_value = torch.Tensor([[0.8], [0.1], [0.8], [0.1]])
    expected_loss = torch.Tensor(
        [(-np.log(softmax([0.8, 0.1])[0]) - np.log(
            softmax([0.8, 0.1])[1])) / 2])
    loss = losses.RankCrossEntropyLoss()(pred_value, true_value)
    assert torch.isclose(expected_loss, loss)
    true_value = torch.Tensor([[1.], [0.], [0.], [0.], [1.], [0.]])
    pred_value = torch.Tensor([[0.8], [0.1], [0.1], [0.8], [0.1], [0.1]])
    expected_loss = torch.Tensor(
        [(-np.log(softmax([0.8, 0.1, 0.1])[0]) - np.log(
            softmax([0.8, 0.1, 0.1])[1])) / 2])
    loss = losses.RankCrossEntropyLoss(num_neg=2)(
        pred_value, true_value)
    assert torch.isclose(expected_loss, loss) 
Example #8
Source File: test_layers.py    From pytorch-image-models with Apache License 2.0 6 votes vote down vote up
def _run_act_layer_grad(act_type):
    x = torch.rand(10, 1000) * 10
    m = MLP(act_layer=act_type)

    def _run(x, act_layer=''):
        if act_layer:
            # replace act layer if set
            m.act = create_act_layer(act_layer, inplace=True)
        out = m(x)
        l = (out - 0).pow(2).sum()
        return l

    out_me = _run(x)

    with set_layer_config(scriptable=True):
        out_jit = _run(x, act_type)

    assert torch.isclose(out_jit, out_me)

    with set_layer_config(no_jit=True):
        out_basic = _run(x, act_type)

    assert torch.isclose(out_basic, out_jit) 
Example #9
Source File: test_pate.py    From PySyft with Apache License 2.0 6 votes vote down vote up
def test_torch_ref_match():

    # Verify if the torch implementation values match the original Numpy implementation.

    num_teachers, num_examples, num_labels = (100, 50, 10)
    preds = (np.random.rand(num_teachers, num_examples) * num_labels).astype(int)  # fake preds

    indices = (np.random.rand(num_examples) * num_labels).astype(int)  # true answers

    preds[:, 0:10] *= 0

    data_dep_eps, data_ind_eps = pate.perform_analysis_torch(
        preds, indices, noise_eps=0.1, delta=1e-5
    )

    data_dep_eps_ref, data_ind_eps_ref = pate.perform_analysis(
        preds, indices, noise_eps=0.1, delta=1e-5
    )

    assert torch.isclose(data_dep_eps, torch.tensor(data_dep_eps_ref, dtype=torch.float32))
    assert torch.isclose(data_ind_eps, torch.tensor(data_ind_eps_ref, dtype=torch.float32)) 
Example #10
Source File: test_proxy_nca_loss.py    From pytorch-metric-learning with MIT License 6 votes vote down vote up
def test_proxy_nca_loss(self):
        softmax_scale = 10
        loss_func = ProxyNCALoss(softmax_scale=softmax_scale, num_classes=10, embedding_size=2)

        embedding_angles = torch.arange(0, 180)
        embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings
        labels = torch.randint(low=0, high=10, size=(180,))

        loss = loss_func(embeddings, labels)
        loss.backward()

        proxies = torch.nn.functional.normalize(loss_func.proxies, p=2, dim=1)
        correct_loss = 0
        for i in range(len(embeddings)):
            curr_emb, curr_label = embeddings[i], labels[i]
            curr_proxy = proxies[curr_label]
            denominator = torch.sum((curr_emb-proxies)**2, dim=1)
            denominator = torch.sum(torch.exp(-denominator*softmax_scale))
            numerator = torch.sum((curr_emb-curr_proxy)**2)
            numerator = torch.exp(-numerator*softmax_scale)
            correct_loss += -torch.log(numerator/denominator)
        
        correct_loss /= len(embeddings)
        self.assertTrue(torch.isclose(loss, correct_loss)) 
Example #11
Source File: test_arcface_loss.py    From pytorch-metric-learning with MIT License 6 votes vote down vote up
def test_arcface_loss(self):
        margin = 30
        scale = 64
        loss_func = ArcFaceLoss(margin=margin, scale=scale, num_classes=10, embedding_size=2)

        embedding_angles = torch.arange(0, 180)
        embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings
        labels = torch.randint(low=0, high=10, size=(180,))

        loss = loss_func(embeddings, labels)
        loss.backward()

        weights = torch.nn.functional.normalize(loss_func.W, p=2, dim=0)
        logits = torch.matmul(embeddings, weights)
        for i, c in enumerate(labels):
            logits[i, c] = torch.cos(torch.acos(logits[i, c]) + torch.tensor(np.radians(margin)))
        
        correct_loss = torch.nn.functional.cross_entropy(logits*scale, labels)
        self.assertTrue(torch.isclose(loss, correct_loss)) 
Example #12
Source File: test_regular_face_regularizer.py    From pytorch-metric-learning with MIT License 5 votes vote down vote up
def test_regular_face_regularizer(self):
        temperature = 0.1
        num_classes = 10
        embedding_size = 512
        reg_weight = 0.1
        loss_func = NormalizedSoftmaxLoss(temperature=temperature, 
                                            num_classes=num_classes, 
                                            embedding_size=embedding_size,
                                            regularizer=RegularFaceRegularizer(),
                                            reg_weight=reg_weight)

        embeddings = torch.nn.functional.normalize(torch.randn((180, embedding_size), requires_grad=True, dtype=torch.float))
        labels = torch.randint(low=0, high=10, size=(180,))

        loss = loss_func(embeddings, labels)
        loss.backward()

        weights = torch.nn.functional.normalize(loss_func.W, p=2, dim=0)
        logits = torch.matmul(embeddings, weights)
        correct_class_loss = torch.nn.functional.cross_entropy(logits/temperature, labels)

        weight_cos_matrix = torch.matmul(weights.t(), weights)
        weight_cos_matrix.fill_diagonal_(float('-inf'))
        correct_reg_loss = 0
        for i in range(num_classes):
            correct_reg_loss += torch.max(weight_cos_matrix[i])
        correct_reg_loss /= num_classes

        correct_total_loss = correct_class_loss+(correct_reg_loss*reg_weight)
        self.assertTrue(torch.isclose(loss, correct_total_loss)) 
Example #13
Source File: test_multi_similarity_loss.py    From pytorch-metric-learning with MIT License 5 votes vote down vote up
def test_multi_similarity_loss(self):
        alpha, beta, base = 0.1, 40, 0.5
        loss_func = MultiSimilarityLoss(alpha=alpha, beta=beta, base=base)

        embedding_angles = [0, 20, 40, 60, 80]
        embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings
        labels = torch.LongTensor([0, 0, 1, 1, 2])

        loss = loss_func(embeddings, labels)
        loss.backward()

        pos_pairs = [(0,1), (1,0), (2,3), (3,2)]
        neg_pairs = [(0,2), (0,3), (0,4), (1,2), (1,3), (1,4), (2,0), (2,1), (2,4), (3,0), (3,1), (3,4), (4,0), (4,1), (4,2), (4,3)]

        correct_total = 0
        for i in range(len(embeddings)):
            correct_pos_loss = 0
            correct_neg_loss = 0
            for a,p in pos_pairs:
                if a == i:
                    anchor, positive = embeddings[a], embeddings[p]
                    correct_pos_loss += torch.exp(-alpha*(torch.matmul(anchor,positive)-base))
            if correct_pos_loss > 0:
                correct_pos_loss = (1/alpha) * torch.log(1+correct_pos_loss)

            for a,n in neg_pairs:
                if a == i:
                    anchor, negative = embeddings[a], embeddings[n]
                    correct_neg_loss += torch.exp(beta*(torch.matmul(anchor,negative)-base))
            if correct_neg_loss > 0:
                correct_neg_loss = (1/beta) * torch.log(1+correct_neg_loss)
            correct_total += correct_pos_loss + correct_neg_loss

        correct_total /= embeddings.size(0)
        self.assertTrue(torch.isclose(loss, correct_total)) 
Example #14
Source File: test_margin_loss.py    From pytorch-metric-learning with MIT License 5 votes vote down vote up
def test_margin_loss(self):
        for learn_beta, num_classes in [(False, None), (True, None), (False, 3), (True, 3)]:
            margin, nu, beta = 0.1, 0.1, 1
            loss_func = MarginLoss(margin=margin, nu=nu, beta=beta, learn_beta=learn_beta, num_classes=num_classes)

            embedding_angles = [0, 20, 40, 60, 80]
            embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings
            labels = torch.LongTensor([0, 0, 1, 1, 2])

            loss = loss_func(embeddings, labels)
            loss.backward()

            triplets = [(0,1,2), (0,1,3), (0,1,4), (1,0,2), (1,0,3), (1,0,4), (2,3,0), (2,3,1), (2,3,4), (3,2,0), (3,2,1), (3,2,4)]

            correct_total_loss = 0
            num_non_zero = 0
            for a, p, n in triplets:
                anchor, positive, negative = embeddings[a], embeddings[p], embeddings[n]
                pos_loss = torch.relu(torch.sqrt(torch.sum((anchor-positive)**2)) - beta + margin)
                neg_loss = torch.relu(beta - torch.sqrt(torch.sum((anchor-negative)**2)) + margin)
                correct_total_loss += pos_loss + neg_loss
                if pos_loss > 0:
                    num_non_zero += 1
                if neg_loss > 0:
                    num_non_zero += 1
                    
            if num_non_zero > 0:
                correct_total_loss /= num_non_zero
                if learn_beta:
                    if num_classes is None:
                        correct_beta_reg_loss = (loss_func.beta*nu)
                    else:
                        anchor_idx = [x[0] for x in triplets]
                        correct_beta_reg_loss = torch.sum(loss_func.beta[labels[anchor_idx]]*nu) / num_non_zero
                    correct_total_loss += correct_beta_reg_loss.item()

            self.assertTrue(torch.isclose(loss, correct_total_loss)) 
Example #15
Source File: test_npairs_loss.py    From pytorch-metric-learning with MIT License 5 votes vote down vote up
def test_npairs_loss(self):
        loss_funcA = NPairsLoss()
        loss_funcB = NPairsLoss(l2_reg_weight=1)

        embedding_angles = list(range(0,180,20))[:7]
        embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings
        labels = torch.LongTensor([0, 0, 1, 1, 1, 2, 3])

        lossA = loss_funcA(embeddings, labels)
        lossB = loss_funcB(embeddings, labels)

        lossA.backward()
        lossB.backward()

        pos_pairs = [(0,1), (2,3)]
        neg_pairs = [(0,3), (2,1)]

        total_loss = 0
        for a1, p in pos_pairs:
            anchor, positive = embeddings[a1], embeddings[p]
            numerator = torch.exp(torch.matmul(anchor, positive))
            denominator = numerator.clone()
            for a2, n in neg_pairs:
                if a2 == a1:
                    negative = embeddings[n]
                    denominator += torch.exp(torch.matmul(anchor, negative))
            curr_loss = -torch.log(numerator/denominator)
            total_loss += curr_loss
        
        total_loss /= len(pos_pairs[0])
        self.assertTrue(torch.isclose(lossA, total_loss))
        self.assertTrue(torch.isclose(lossB, total_loss+1)) # l2_reg is going to be 1 since the embeddings are normalized 
Example #16
Source File: test_center_invariant_regularizer.py    From pytorch-metric-learning with MIT License 5 votes vote down vote up
def test_center_invariant_regularizer(self):
        temperature = 0.1
        num_classes = 10
        embedding_size = 512
        reg_weight = 0.1
        loss_func = NormalizedSoftmaxLoss(temperature=temperature, 
                                            num_classes=num_classes, 
                                            embedding_size=embedding_size,
                                            regularizer=CenterInvariantRegularizer(),
                                            reg_weight=reg_weight)

        embeddings = torch.nn.functional.normalize(torch.randn((180, embedding_size), requires_grad=True, dtype=torch.float))
        labels = torch.randint(low=0, high=10, size=(180,))

        loss = loss_func(embeddings, labels)
        loss.backward()

        weights = torch.nn.functional.normalize(loss_func.W, p=2, dim=0)
        logits = torch.matmul(embeddings, weights)
        correct_class_loss = torch.nn.functional.cross_entropy(logits/temperature, labels)

        correct_reg_loss = 0
        average_squared_weight_norms = 0
        for i in range(num_classes):
            average_squared_weight_norms += torch.norm(loss_func.W[:,i], p=2)**2
        average_squared_weight_norms /= num_classes
        for i in range(num_classes):
            deviation = torch.norm(loss_func.W[:,i], p=2)**2 - average_squared_weight_norms
            correct_reg_loss += (deviation**2) / 4
        correct_reg_loss /= num_classes

        correct_total_loss = correct_class_loss+(correct_reg_loss*reg_weight)
        self.assertTrue(torch.isclose(loss, correct_total_loss)) 
Example #17
Source File: test_gyrovector_math.py    From geoopt with Apache License 2.0 5 votes vote down vote up
def test_weighted_midpoint(_k, lincomb):
    manifold = stereographic.Stereographic(_k, learnable=True)
    a = manifold.random(2, 3, 10).requires_grad_(True)
    mid = manifold.weighted_midpoint(a, lincomb=lincomb)
    assert torch.isfinite(mid).all()
    assert mid.shape == (a.shape[-1],)
    mid.sum().backward()
    assert torch.isfinite(a.grad).all()
    assert not torch.isclose(manifold.k.grad, manifold.k.new_zeros(())) 
Example #18
Source File: test_lifted_structure_loss.py    From pytorch-metric-learning with MIT License 5 votes vote down vote up
def test_lifted_structure_loss(self):
        neg_margin = 0.5
        loss_func = LiftedStructureLoss(neg_margin=neg_margin)

        embedding_angles = [0, 20, 40, 60, 80]
        embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings
        labels = torch.LongTensor([0, 0, 1, 1, 2])

        loss = loss_func(embeddings, labels)
        loss.backward()

        pos_pairs = [(0,1), (1,0), (2,3), (3,2)]
        neg_pairs = [(0,2), (0,3), (0,4), (1,2), (1,3), (1,4), (2,0), (2,1), (2,4), (3,0), (3,1), (3,4), (4,0), (4,1), (4,2), (4,3)]

        total_loss = 0
        for a1,p in pos_pairs:
            anchor, positive = embeddings[a1], embeddings[p]
            pos_pair_component = torch.sqrt(torch.sum((anchor-positive)**2))
            neg_pair_component = 0
            for a2,n in neg_pairs:
                negative = embeddings[n]
                if a2 == a1:
                    neg_pair_component += torch.exp(neg_margin - torch.sqrt(torch.sum((anchor-negative)**2)))
                elif a2 == p:
                    neg_pair_component += torch.exp(neg_margin - torch.sqrt(torch.sum((positive-negative)**2)))
                else:
                    continue
            total_loss += torch.relu(torch.log(neg_pair_component) + pos_pair_component)**2
        
        total_loss /= 2*len(pos_pairs)

        self.assertTrue(torch.isclose(loss, total_loss)) 
Example #19
Source File: test_lifted_structure_loss.py    From pytorch-metric-learning with MIT License 5 votes vote down vote up
def test_generalized_lifted_structure_loss(self):
        neg_margin = 0.5
        loss_func = GeneralizedLiftedStructureLoss(neg_margin=neg_margin)

        embedding_angles = [0, 20, 40, 60, 80]
        embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings
        labels = torch.LongTensor([0, 0, 1, 1, 2])

        loss = loss_func(embeddings, labels)
        loss.backward()

        pos_pairs = [(0,1), (1,0), (2,3), (3,2)]
        neg_pairs = [(0,2), (0,3), (0,4), (1,2), (1,3), (1,4), (2,0), (2,1), (2,4), (3,0), (3,1), (3,4), (4,0), (4,1), (4,2), (4,3)]

        correct_total = 0
        for i in range(len(embeddings)):
            correct_pos_loss = 0
            correct_neg_loss = 0
            for a,p in pos_pairs:
                if a == i:
                    anchor, positive = embeddings[a], embeddings[p]
                    correct_pos_loss += torch.exp(torch.sqrt(torch.sum((anchor-positive)**2)))
            if correct_pos_loss > 0:
                correct_pos_loss = torch.log(correct_pos_loss)

            for a,n in neg_pairs:
                if a == i:
                    anchor, negative = embeddings[a], embeddings[n]
                    correct_neg_loss += torch.exp(neg_margin - torch.sqrt(torch.sum((anchor-negative)**2)))
            if correct_neg_loss > 0:
                correct_neg_loss = torch.log(correct_neg_loss)

            correct_total += torch.relu(correct_pos_loss + correct_neg_loss)

        correct_total /= embeddings.size(0)

        self.assertTrue(torch.isclose(loss, correct_total)) 
Example #20
Source File: test_normalized_softmax_loss.py    From pytorch-metric-learning with MIT License 5 votes vote down vote up
def test_normalized_softmax_loss(self):
        temperature = 0.1
        loss_func = NormalizedSoftmaxLoss(temperature=temperature, num_classes=10, embedding_size=2)

        embedding_angles = torch.arange(0, 180)
        embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings
        labels = torch.randint(low=0, high=10, size=(180,))

        loss = loss_func(embeddings, labels)
        loss.backward()

        weights = torch.nn.functional.normalize(loss_func.W, p=2, dim=0)
        logits = torch.matmul(embeddings, weights)
        correct_loss = torch.nn.functional.cross_entropy(logits/temperature, labels)
        self.assertTrue(torch.isclose(loss, correct_loss)) 
Example #21
Source File: test_tuplet_margin_loss.py    From pytorch-metric-learning with MIT License 5 votes vote down vote up
def test_tuplet_margin_loss(self):
        margin, scale = 5, 64
        loss_func = TupletMarginLoss(margin=margin, scale=scale)

        embedding_angles = [0, 20, 40, 60, 80]
        embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings
        labels = torch.LongTensor([0, 0, 1, 1, 2])

        loss = loss_func(embeddings, labels)
        loss.backward()

        pos_pairs = [(0,1), (1,0), (2,3), (3,2)]
        neg_pairs = [(0,2), (0,3), (0,4), (1,2), (1,3), (1,4), (2,0), (2,1), (2,4), (3,0), (3,1), (3,4), (4,0), (4,1), (4,2), (4,3)]

        correct_total = 0
        
        for a1,p in pos_pairs:
            curr_loss = 0
            anchor1, positive = embeddings[a1], embeddings[p]
            ap_angle = torch.acos(torch.matmul(anchor1, positive)) #embeddings are normalized, so dot product == cosine
            ap_cos = torch.cos(ap_angle-np.radians(margin))
            for a2,n in neg_pairs:
                if a2 == a1:
                    anchor2, negative = embeddings[a2], embeddings[n]
                    an_cos = torch.matmul(anchor2, negative)
                    curr_loss += torch.exp(scale*(an_cos-ap_cos))

            curr_total = torch.log(1+curr_loss)
            correct_total += curr_total
            
        correct_total /= len(pos_pairs)
        self.assertTrue(torch.isclose(loss, correct_total)) 
Example #22
Source File: test_signal_to_noise_ratio_losses.py    From pytorch-metric-learning with MIT License 5 votes vote down vote up
def test_snr_contrastive_loss(self):
        pos_margin, neg_margin, regularizer_weight = 0, 0.1, 0.1
        loss_func = SignalToNoiseRatioContrastiveLoss(pos_margin=pos_margin, neg_margin=neg_margin, regularizer_weight=regularizer_weight)

        embedding_angles = [0, 20, 40, 60, 80]
        embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings
        labels = torch.LongTensor([0, 0, 1, 1, 2])

        loss = loss_func(embeddings, labels)
        loss.backward()

        pos_pairs = [(0,1), (1,0), (2,3), (3,2)]
        neg_pairs = [(0,2), (0,3), (0,4), (1,2), (1,3), (1,4), (2,0), (2,1), (2,4), (3,0), (3,1), (3,4), (4,0), (4,1), (4,2), (4,3)]

        correct_pos_loss = 0
        correct_neg_loss = 0
        num_non_zero = 0
        for a,p in pos_pairs:
            anchor, positive = embeddings[a], embeddings[p]
            curr_loss = torch.relu(torch.var(anchor-positive) / torch.var(anchor) - pos_margin)
            correct_pos_loss += curr_loss
            if curr_loss > 0:
                num_non_zero += 1
        if num_non_zero > 0:
            correct_pos_loss /= num_non_zero

        num_non_zero = 0
        for a,n in neg_pairs:
            anchor, negative = embeddings[a], embeddings[n]
            curr_loss = torch.relu(neg_margin - torch.var(anchor-negative) / torch.var(anchor))
            correct_neg_loss += curr_loss
            if curr_loss > 0:
                num_non_zero += 1
        if num_non_zero > 0:
            correct_neg_loss /= num_non_zero

        reg_loss = torch.mean(torch.abs(torch.sum(embeddings, dim=1)))

        correct_total = correct_pos_loss + correct_neg_loss + regularizer_weight*reg_loss
        self.assertTrue(torch.isclose(loss, correct_total)) 
Example #23
Source File: test_intra_pair_variance_loss.py    From pytorch-metric-learning with MIT License 5 votes vote down vote up
def test_intra_pair_variance_loss(self):
        pos_eps, neg_eps = 0.01, 0.02
        loss_func = IntraPairVarianceLoss(pos_eps, neg_eps)

        embedding_angles = [0, 20, 40, 60, 80]
        embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings
        labels = torch.LongTensor([0, 0, 1, 1, 2])

        loss = loss_func(embeddings, labels)
        loss.backward()

        pos_pairs = [(0,1), (1,0), (2,3), (3,2)]
        neg_pairs = [(0,2), (0,3), (0,4), (1,2), (1,3), (1,4), (2,0), (2,1), (2,4), (3,0), (3,1), (3,4), (4,0), (4,1), (4,2), (4,3)]

        pos_total, neg_total = 0, 0
        mean_pos = 0
        mean_neg = 0
        for a,p in pos_pairs:
            mean_pos += torch.matmul(embeddings[a], embeddings[p])
        for a,n in neg_pairs:
            mean_neg += torch.matmul(embeddings[a], embeddings[n])
        mean_pos /= len(pos_pairs)
        mean_neg /= len(neg_pairs)

        for a,p in pos_pairs:
            pos_total += torch.relu(((1-pos_eps)*mean_pos - torch.matmul(embeddings[a], embeddings[p])))**2
        for a,n in neg_pairs:
            neg_total += torch.relu((torch.matmul(embeddings[a], embeddings[n])-(1+neg_eps)*mean_neg))**2

        pos_total /= len(pos_pairs)
        neg_total /= len(neg_pairs)
        correct_total = pos_total+neg_total
        self.assertTrue(torch.isclose(loss, correct_total)) 
Example #24
Source File: test_ntxent_loss.py    From pytorch-metric-learning with MIT License 5 votes vote down vote up
def test_ntxent_loss(self):
        temperature = 0.1
        loss_func = NTXentLoss(temperature=temperature)

        embedding_angles = [0, 20, 40, 60, 80]
        embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings
        labels = torch.LongTensor([0, 0, 1, 1, 2])

        loss = loss_func(embeddings, labels)
        loss.backward()

        pos_pairs = [(0,1), (1,0), (2,3), (3,2)]
        neg_pairs = [(0,2), (0,3), (0,4), (1,2), (1,3), (1,4), (2,0), (2,1), (2,4), (3,0), (3,1), (3,4), (4,0), (4,1), (4,2), (4,3)]

        total_loss = 0
        for a1,p in pos_pairs:
            anchor, positive = embeddings[a1], embeddings[p]
            numerator = torch.exp(torch.matmul(anchor, positive)/temperature)
            denominator = numerator.clone()
            for a2,n in neg_pairs:
                if a2 == a1:
                    negative = embeddings[n]
                else:
                    continue
                denominator += torch.exp(torch.matmul(anchor, negative)/temperature)
            curr_loss = -torch.log(numerator/denominator)
            total_loss += curr_loss
        
        total_loss /= len(pos_pairs)
        self.assertTrue(torch.isclose(loss, total_loss)) 
Example #25
Source File: test_circle_loss.py    From pytorch-metric-learning with MIT License 5 votes vote down vote up
def test_circle_loss(self):
        margin, gamma = 0.4, 2
        Op, On = 1+margin, -margin
        delta_p, delta_n = 1-margin, margin
        loss_func = CircleLoss(m=margin, gamma=gamma)

        embedding_angles = [0, 20, 40, 60, 80]
        embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings
        labels = torch.LongTensor([0, 0, 1, 1, 2])

        loss = loss_func(embeddings, labels)
        loss.backward()

        pos_pairs = [(0,1), (1,0), (2,3), (3,2)]
        neg_pairs = [(0,2), (0,3), (0,4), (1,2), (1,3), (1,4), (2,0), (2,1), (2,4), (3,0), (3,1), (3,4), (4,0), (4,1), (4,2), (4,3)]

        correct_total = 0
        totals = []
        for i in range(len(embeddings)):
            pos_exp = 0
            neg_exp = 0
            for a,p in pos_pairs:
                if a == i:
                    anchor, positive = embeddings[a], embeddings[p]
                    ap_sim = torch.matmul(anchor,positive)
                    logit_p = -gamma*torch.relu(Op-ap_sim)*(ap_sim-delta_p)
                    pos_exp += torch.exp(logit_p)

            for a,n in neg_pairs:
                if a == i:
                    anchor, negative = embeddings[a], embeddings[n]
                    an_sim = torch.matmul(anchor,negative)
                    logit_n = gamma*torch.relu(an_sim-On)*(an_sim-delta_n)
                    neg_exp += torch.exp(logit_n)

            totals.append(torch.log(1+pos_exp*neg_exp))
            correct_total += torch.log(1+pos_exp*neg_exp)

        correct_total /= 4 # only 4 of the embeddings have both pos and neg pairs
        self.assertTrue(torch.isclose(loss, correct_total)) 
Example #26
Source File: test_nca_loss.py    From pytorch-metric-learning with MIT License 5 votes vote down vote up
def test_nca_loss(self):
        softmax_scale = 10
        loss_func = NCALoss(softmax_scale=softmax_scale)

        embedding_angles = [0, 20, 40, 60, 80]
        embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings
        labels = torch.LongTensor([0, 0, 1, 1, 2])

        loss = loss_func(embeddings, labels)
        loss.backward()

        pos_pairs = [(0,1), (1,0), (2,3), (3,2)]
        neg_pairs = [(0,2), (0,3), (0,4), (1,2), (1,3), (1,4), (2,0), (2,1), (2,4), (3,0), (3,1), (3,4), (4,0), (4,1), (4,2), (4,3)]

        correct_total = 0
        for a1,p in pos_pairs:
            anchor1, positive = embeddings[a1], embeddings[p]
            ap_dist = torch.sum((anchor1-positive)**2)
            numerator = torch.exp(-ap_dist*softmax_scale)
            denominator = numerator.clone()
            for a2,n in neg_pairs:
                if a2 == a1:
                    anchor2, negative = embeddings[a2], embeddings[n]
                    an_dist = torch.sum((anchor2-negative)**2)
                    denominator += torch.exp(-an_dist*softmax_scale)

            correct_total += -torch.log(numerator/denominator)

        correct_total /= len(pos_pairs)
        self.assertTrue(torch.isclose(loss, correct_total)) 
Example #27
Source File: test_vsl.py    From bpr with MIT License 5 votes vote down vote up
def test_calculate_recall_float():
    pred = torch.tensor([1, 2, 3, 5, 7, 6], dtype=torch.long)
    true = torch.tensor([1, 2, 3, 4, 5], dtype=torch.long)
    pred = VariableShapeList.from_tensors([pred])
    true = VariableShapeList.from_tensors([true])

    recall = vsl_recall(pred, true)
    assert torch.isclose(recall[0], torch.tensor(4/5)) 
Example #28
Source File: test_gyrovector_math.py    From geoopt with Apache License 2.0 5 votes vote down vote up
def test_add_infinity_and_beyond(a, b, c, negative, manifold, dtype):
    _a = a
    if torch.isclose(c, c.new_zeros(())).any():
        pytest.skip("zero not checked")
    infty = b * 10000000
    for i in range(100):
        z = manifold.expmap(a, infty, project=False)
        z = manifold.projx(z)
        assert not torch.isnan(z).any(), ("Found nans", i, z)
        assert torch.isfinite(z).all(), ("Found Infs", i, z)
        z = manifold.mobius_scalar_mul(
            torch.tensor(1000.0, dtype=z.dtype), z, project=False
        )
        z = manifold.projx(z)
        assert not torch.isnan(z).any(), ("Found nans", i, z)
        assert torch.isfinite(z).all(), ("Found Infs", i, z)

        infty = manifold.transp(a, z, infty)
        assert torch.isfinite(infty).all(), (i, infty)
        a = z
    z = manifold.expmap(a, -infty)
    # they just need to be very far, exact answer is not supposed
    tolerance = {
        torch.float32: dict(rtol=3e-1, atol=2e-1),
        torch.float64: dict(rtol=1e-1, atol=1e-3),
    }
    if negative:
        np.testing.assert_allclose(z.detach(), -a.detach(), **tolerance[dtype])
    else:
        assert not torch.isnan(z).any(), "Found nans"
        assert not torch.isnan(a).any(), "Found nans" 
Example #29
Source File: test_gyrovector_math.py    From geoopt with Apache License 2.0 5 votes vote down vote up
def test_weighted_midpoint_reduce_dim(_k, lincomb):
    manifold = stereographic.Stereographic(_k, learnable=True)
    a = manifold.random(2, 3, 10).requires_grad_(True)
    mid = manifold.weighted_midpoint(a, reducedim=[0], lincomb=lincomb)
    assert mid.shape == a.shape[-2:]
    assert torch.isfinite(mid).all()
    mid.sum().backward()
    assert torch.isfinite(a.grad).all()
    assert not torch.isclose(manifold.k.grad, manifold.k.new_zeros(())) 
Example #30
Source File: test_gyrovector_math.py    From geoopt with Apache License 2.0 5 votes vote down vote up
def test_weighted_midpoint_weighted(_k, lincomb):
    manifold = stereographic.Stereographic(_k, learnable=True)
    a = manifold.random(2, 3, 10).requires_grad_(True)
    mid = manifold.weighted_midpoint(
        a, reducedim=[0], lincomb=lincomb, weights=torch.rand_like(a[..., 0])
    )
    assert mid.shape == a.shape[-2:]
    assert torch.isfinite(mid).all()
    mid.sum().backward()
    assert torch.isfinite(a.grad).all()
    assert not torch.isclose(manifold.k.grad, manifold.k.new_zeros(()))