Python torch.nn.functional.pairwise_distance() Examples

The following are 30 code examples of torch.nn.functional.pairwise_distance(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn.functional , or try the search function .
Example #1
Source File: test_inference_remote.py    From pyprob with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_inference_remote_hmm_posterior_random_walk_metropolis_hastings(self):
        samples = lightweight_metropolis_hastings_samples
        burn_in = lightweight_metropolis_hastings_burn_in
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS, observe=observation)[burn_in:]
        add_random_walk_metropolis_hastings_duration(time.time() - start)
        posterior_mean = posterior.mean

        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'burn_in', 'posterior_mean', 'posterior_mean_correct', 'l2_distance', 'kl_divergence')
        add_random_walk_metropolis_hastings_kl_divergence(kl_divergence)

        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1) 
Example #2
Source File: distillation.py    From incremental_learning.pytorch with MIT License 6 votes vote down vote up
def perceptual_features_reconstruction(list_attentions_a, list_attentions_b, factor=1.):
    loss = 0.

    for i, (a, b) in enumerate(zip(list_attentions_a, list_attentions_b)):
        bs, c, w, h = a.shape

        # a of shape (b, c, w, h) to (b, c * w * h)
        a = a.view(bs, -1)
        b = b.view(bs, -1)

        a = F.normalize(a, p=2, dim=-1)
        b = F.normalize(b, p=2, dim=-1)

        layer_loss = (F.pairwise_distance(a, b, p=2)**2) / (c * w * h)
        loss += torch.mean(layer_loss)

    return factor * (loss / len(list_attentions_a)) 
Example #3
Source File: tripletnet.py    From fashion-compatibility with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def text_forward(self, x, y, z):
        """ x: Anchor data
            y: Distant (negative) data
            z: Close (positive) data
        """
        desc_x = self.text_branch(x.text)
        desc_y = self.text_branch(y.text)
        desc_z = self.text_branch(z.text)
        distd_p = F.pairwise_distance(desc_y, desc_z, 2)
        distd_n1 = F.pairwise_distance(desc_x, desc_y, 2)
        distd_n2 = F.pairwise_distance(desc_x, desc_z, 2)
        has_text = x.has_text * y.has_text * z.has_text
        loss_sim_t1 = selective_margin_loss(distd_p, distd_n1, self.margin, has_text)
        loss_sim_t2 = selective_margin_loss(distd_p, distd_n2, self.margin, has_text)
        loss_sim_t = (loss_sim_t1 + loss_sim_t2) / 2.
        return loss_sim_t, desc_x, desc_y, desc_z 
Example #4
Source File: tripletnet.py    From fashion-compatibility with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def calc_vse_loss(self, desc_x, general_x, general_y, general_z, has_text):
        """ Both y and z are assumed to be negatives because they are not from the same 
            item as x

            desc_x: Anchor language embedding
            general_x: Anchor visual embedding
            general_y: Visual embedding from another item from input triplet
            general_z: Visual embedding from another item from input triplet
            has_text: Binary indicator of whether x had a text description
        """
        distd1_p = F.pairwise_distance(general_x, desc_x, 2)
        distd1_n1 = F.pairwise_distance(general_y, desc_x, 2)
        distd1_n2 = F.pairwise_distance(general_z, desc_x, 2)
        loss_vse_1 = selective_margin_loss(distd1_p, distd1_n1, self.margin, has_text)
        loss_vse_2 = selective_margin_loss(distd1_p, distd1_n2, self.margin, has_text)
        return (loss_vse_1 + loss_vse_2) / 2. 
Example #5
Source File: actor_observer_wrapper.py    From PyVideoResearch with GNU General Public License v3.0 6 votes vote down vote up
def base(self, x, y, z):
        #base_y = self.basenet(y)
        #if random.random() > .5:  # TODO Debug, make sure order doesn't matter
        #    base_x = self.basenet(x)
        #    base_z = self.basenet(z)
        #else:
        #    base_z = self.basenet(z)
        #    base_x = self.basenet(x)
        base_x = self.basenet(x)
        base_y = self.basenet(y)
        base_z = self.basenet(z)

        if self.distance == 'cosine':
            dist_a = .5 - .5 * F.cosine_similarity(base_x, base_y, 1, 1e-6).view(-1)
            dist_b = .5 - .5 * F.cosine_similarity(base_y, base_z, 1, 1e-6).view(-1)
        elif self.distance == 'l2':
            dist_a = F.pairwise_distance(base_x, base_y, 2).view(-1)
            dist_b = F.pairwise_distance(base_y, base_z, 2).view(-1)
        else:
            assert False, "Wrong args.distance"

        print('fc7 norms:', base_x.norm().item(), base_y.norm().item(), base_z.norm().item())
        print('pairwise dist means:', dist_a.mean().item(), dist_b.mean().item())
        return base_x, base_y, base_z, dist_a, dist_b 
Example #6
Source File: lite_model.py    From castor with Apache License 2.0 6 votes vote down vote up
def _algo_1_horiz_comp(self, sent1_block_a, sent2_block_a):
        comparison_feats = []
        regM1, regM2 = [], []
        for ws in self.filter_widths:
            x1 = sent1_block_a[ws]['max'].unsqueeze(2)
            x2 = sent2_block_a[ws]['max'].unsqueeze(2)
            if np.isinf(ws):
                x1 = x1.expand(-1, self.n_holistic_filters, -1)
                x2 = x2.expand(-1, self.n_holistic_filters, -1)
            regM1.append(x1)
            regM2.append(x2)

        regM1 = torch.cat(regM1, dim=2)
        regM2 = torch.cat(regM2, dim=2)

        # Cosine similarity
        comparison_feats.append(F.cosine_similarity(regM1, regM2, dim=2))
        # Euclidean distance
        pairwise_distances = []
        for x1, x2 in zip(regM1, regM2):
            dist = F.pairwise_distance(x1, x2).view(1, -1)
            pairwise_distances.append(dist)
        comparison_feats.append(torch.cat(pairwise_distances))

        return torch.cat(comparison_feats, dim=1) 
Example #7
Source File: loss.py    From deep-image-retrieval with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def forward(self, anchor, positive, negative):
        assert anchor.size() == positive.size(), "Input sizes between positive and negative must be equal."
        assert anchor.size() == negative.size(), "Input sizes between anchor and negative must be equal."
        assert positive.size() == negative.size(), "Input sizes between positive and negative must be equal."
        assert anchor.dim() == 2, "Input must be a 2D matrix."

        d_p = F.pairwise_distance(anchor, positive, self.p, self.eps)
        d_n = F.pairwise_distance(anchor, negative, self.p, self.eps)

        if self.swap:
            d_s = F.pairwise_distance(positive, negative, self.p, self.eps)
            d_n = torch.min(d_n, d_s)

        dist = torch.log(1 + torch.exp(d_p - d_n))
        loss = torch.mean(dist)
        return loss 
Example #8
Source File: test_inference.py    From pyprob with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_inference_hmm_posterior_importance_sampling(self):
        samples = importance_sampling_samples
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct
        posterior_effective_sample_size_min = samples * 0.001

        start = time.time()
        posterior = self._model.posterior_results(samples, observe=observation)
        add_importance_sampling_duration(time.time() - start)
        posterior_mean_unweighted = posterior.unweighted().mean
        posterior_mean = posterior.mean
        posterior_effective_sample_size = float(posterior.effective_sample_size)

        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'posterior_effective_sample_size', 'posterior_effective_sample_size_min', 'l2_distance', 'kl_divergence')
        add_importance_sampling_kl_divergence(kl_divergence)

        self.assertGreater(posterior_effective_sample_size, posterior_effective_sample_size_min)
        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1) 
Example #9
Source File: model.py    From castor with Apache License 2.0 6 votes vote down vote up
def _algo_2_vert_comp(self, sent1_block_a, sent2_block_a, sent1_block_b, sent2_block_b):
        comparison_feats = []
        ws_no_inf = [w for w in self.filter_widths if not np.isinf(w)]
        for pool in ('max', 'min', 'mean'):
            for ws1 in self.filter_widths:
                x1 = sent1_block_a[ws1][pool]
                for ws2 in self.filter_widths:
                    x2 = sent2_block_a[ws2][pool]
                    if (not np.isinf(ws1) and not np.isinf(ws2)) or (np.isinf(ws1) and np.isinf(ws2)):
                        comparison_feats.append(F.cosine_similarity(x1, x2).unsqueeze(1))
                        comparison_feats.append(F.pairwise_distance(x1, x2).unsqueeze(1))
                        comparison_feats.append(torch.abs(x1 - x2))

        for pool in ('max', 'min'):
            for ws in ws_no_inf:
                oG_1B = sent1_block_b[ws][pool]
                oG_2B = sent2_block_b[ws][pool]
                for i in range(0, self.n_per_dim_filters):
                    x1 = oG_1B[:, :, i]
                    x2 = oG_2B[:, :, i]
                    comparison_feats.append(F.cosine_similarity(x1, x2).unsqueeze(1))
                    comparison_feats.append(F.pairwise_distance(x1, x2).unsqueeze(1))
                    comparison_feats.append(torch.abs(x1 - x2))

        return torch.cat(comparison_feats, dim=1) 
Example #10
Source File: test_inference.py    From pyprob with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_inference_hmm_posterior_importance_sampling_with_inference_network_ff(self):
        samples = importance_sampling_with_inference_network_ff_samples
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct
        posterior_effective_sample_size_min = samples * 0.001

        self._model.reset_inference_network()
        self._model.learn_inference_network(num_traces=importance_sampling_with_inference_network_ff_training_traces, observe_embeddings={'obs{}'.format(i): {'depth': 2, 'dim': 32} for i in range(len(observation))}, prior_inflation=importance_sampling_with_inference_network_ff_prior_inflation, inference_network=InferenceNetwork.FEEDFORWARD)

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK, observe=observation)
        add_importance_sampling_with_inference_network_ff_duration(time.time() - start)
        posterior_mean_unweighted = posterior.unweighted().mean
        posterior_mean = posterior.mean
        posterior_effective_sample_size = float(posterior.effective_sample_size)

        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'posterior_effective_sample_size', 'posterior_effective_sample_size_min', 'l2_distance', 'kl_divergence')
        add_importance_sampling_with_inference_network_ff_kl_divergence(kl_divergence)

        self.assertGreater(posterior_effective_sample_size, posterior_effective_sample_size_min)
        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1) 
Example #11
Source File: test_inference.py    From pyprob with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_inference_hmm_posterior_importance_sampling_with_inference_network_lstm(self):
        samples = importance_sampling_with_inference_network_ff_samples
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct
        posterior_effective_sample_size_min = samples * 0.001

        self._model.reset_inference_network()
        self._model.learn_inference_network(num_traces=importance_sampling_with_inference_network_lstm_training_traces, observe_embeddings={'obs{}'.format(i): {'depth': 2, 'dim': 32} for i in range(len(observation))}, prior_inflation=importance_sampling_with_inference_network_lstm_prior_inflation, inference_network=InferenceNetwork.LSTM)

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK, observe=observation)
        add_importance_sampling_with_inference_network_lstm_duration(time.time() - start)
        posterior_mean_unweighted = posterior.unweighted().mean
        posterior_mean = posterior.mean
        posterior_effective_sample_size = float(posterior.effective_sample_size)

        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'posterior_effective_sample_size', 'posterior_effective_sample_size_min', 'l2_distance', 'kl_divergence')
        add_importance_sampling_with_inference_network_lstm_kl_divergence(kl_divergence)

        self.assertGreater(posterior_effective_sample_size, posterior_effective_sample_size_min)
        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1) 
Example #12
Source File: triplet_net.py    From mmfashion with Apache License 2.0 6 votes vote down vote up
def embed_forward(self, embed_x, embed_y, embed_z):
        """embed_x, mask_norm_x: type_specific net output (Anchor)
           embed_y, mask_norm_y: type_specifc net output (Negative)
           embed_z, mask_norm_z: type_specifi net output (Positive)
           conditions: only x(anchor data) has conditions
        """
        if self.metric_branch is None:
            dist_neg = F.pairwise_distance(embed_x, embed_y, 2)
            dist_pos = F.pairwise_distance(embed_x, embed_z, 2)
        else:
            dist_neg = self.metric_branch(embed_x * embed_y)
            dist_pos = self.metric_branch(embed_x * embed_z)

        target = torch.FloatTensor(dist_neg.size()).fill_(1)
        target = Variable(target.cuda())

        # type specific triplet loss
        loss_type_triplet = self.loss_triplet(dist_neg, dist_pos, target)
        return loss_type_triplet 
Example #13
Source File: pytorch_clusters.py    From pytorch_active_learning with MIT License 6 votes vote down vote up
def cosine_similary(self, item):
        text = item[1]
        words = text.split()  
        
        vector = [0] * len(self.feature_vector)
        for word in words:
            if word not in self.feature_idx:
                self.feature_idx[word] = len(self.feature_vector)
                self.feature_vector.append(0)
                vector.append(1)
            else:
                while len(vector) <= self.feature_idx[word]:
                    vector.append(0)
                    self.feature_vector.append(0)
                              
                vector[self.feature_idx[word]] += 1
        
        item_tensor = torch.FloatTensor(vector)
        cluster_tensor = torch.FloatTensor(self.feature_vector)
        
        similarity = F.cosine_similarity(item_tensor, cluster_tensor, 0)
        
        # Alternatively using `F.pairwise_distance()` but normalize the cluster first
        
        return similarity.item() # item() converts tensor value to float 
Example #14
Source File: fast_gaussian.py    From torchsupport with MIT License 6 votes vote down vote up
def kmeans(input, n_clusters=16, tol=1e-6):
  """
  TODO: check correctness
  """
  indices = torch.Tensor(np.random.choice(input.size(-1), n_clusters))
  values = input[:, :, indices]

  while True:
    dist = func.pairwise_distance(
      input.unsqueeze(2).expand(-1, -1, values.size(2), input.size(2)).reshape(
        input.size(0), input.size(1), input.size(2) * values.size(2)),
      values.unsqueeze(3).expand(-1, -1, values.size(2), input.size(2)).reshape(
        input.size(0), input.size(1), input.size(2) * values.size(2))
    )
    choice_cluster = torch.argmin(dist, dim=1)
    old_values = values
    values = input[choice_cluster.nonzeros()]
    shift = (old_values - values).norm(dim=1)
    if shift.max() ** 2 < tol:
      break

  return values 
Example #15
Source File: triplet_margin_loss.py    From pytorch-metric-learning with MIT License 6 votes vote down vote up
def compute_loss(self, embeddings, labels, indices_tuple):
        indices_tuple = lmu.convert_to_triplets(indices_tuple, labels, t_per_anchor=self.triplets_per_anchor)
        anchor_idx, positive_idx, negative_idx = indices_tuple
        if len(anchor_idx) == 0:
            return self.zero_losses()
        anchors, positives, negatives = embeddings[anchor_idx], embeddings[positive_idx], embeddings[negative_idx]
        a_p_dist = F.pairwise_distance(anchors, positives, self.distance_norm)
        a_n_dist = F.pairwise_distance(anchors, negatives, self.distance_norm)
        if self.swap:
            p_n_dist = F.pairwise_distance(positives, negatives, self.distance_norm)
            a_n_dist = torch.min(a_n_dist, p_n_dist)
        a_p_dist = a_p_dist ** self.power
        a_n_dist = a_n_dist ** self.power
        if self.smooth_loss:
            inside_exp = a_p_dist - a_n_dist
            inside_exp = self.maybe_modify_loss(inside_exp)
            loss = torch.log(1 + torch.exp(inside_exp))
        else:
            dist = a_p_dist - a_n_dist
            loss_modified = self.maybe_modify_loss(dist + self.margin)
            loss = torch.nn.functional.relu(loss_modified)
        return {"loss": {"losses": loss, "indices": indices_tuple, "reduction_type": "triplet"}} 
Example #16
Source File: test_inference.py    From pyprob with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_inference_hmm_posterior_lightweight_metropolis_hastings(self):
        samples = lightweight_metropolis_hastings_samples
        burn_in = lightweight_metropolis_hastings_burn_in
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS, observe=observation)[burn_in:]
        add_lightweight_metropolis_hastings_duration(time.time() - start)
        posterior_mean = posterior.mean

        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'burn_in', 'posterior_mean', 'posterior_mean_correct', 'l2_distance', 'kl_divergence')
        add_lightweight_metropolis_hastings_kl_divergence(kl_divergence)

        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1) 
Example #17
Source File: test_inference.py    From pyprob with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_inference_hmm_posterior_random_walk_metropolis_hastings(self):
        samples = lightweight_metropolis_hastings_samples
        burn_in = lightweight_metropolis_hastings_burn_in
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS, observe=observation)[burn_in:]
        add_random_walk_metropolis_hastings_duration(time.time() - start)
        posterior_mean = posterior.mean

        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'burn_in', 'posterior_mean', 'posterior_mean_correct', 'l2_distance', 'kl_divergence')
        add_random_walk_metropolis_hastings_kl_divergence(kl_divergence)

        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1) 
Example #18
Source File: test_inference_remote.py    From pyprob with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_inference_remote_hmm_posterior_importance_sampling_with_inference_network(self):
        samples = importance_sampling_with_inference_network_samples
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct
        posterior_effective_sample_size_min = samples * 0.03

        self._model.reset_inference_network()
        self._model.learn_inference_network(num_traces=importance_sampling_with_inference_network_training_traces, observe_embeddings={'obs{}'.format(i): {'depth': 2, 'dim': 16} for i in range(len(observation))})

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK, observe=observation)
        add_importance_sampling_with_inference_network_duration(time.time() - start)
        posterior_mean_unweighted = posterior.unweighted().mean
        posterior_mean = posterior.mean
        posterior_effective_sample_size = float(posterior.effective_sample_size)

        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'posterior_effective_sample_size', 'posterior_effective_sample_size_min', 'l2_distance', 'kl_divergence')
        add_importance_sampling_with_inference_network_kl_divergence(kl_divergence)

        self.assertGreater(posterior_effective_sample_size, posterior_effective_sample_size_min)
        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1) 
Example #19
Source File: test_inference_remote.py    From pyprob with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_inference_remote_hmm_posterior_lightweight_metropolis_hastings(self):
        samples = lightweight_metropolis_hastings_samples
        burn_in = lightweight_metropolis_hastings_burn_in
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS, observe=observation)[burn_in:]
        add_lightweight_metropolis_hastings_duration(time.time() - start)
        posterior_mean = posterior.mean

        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'burn_in', 'posterior_mean', 'posterior_mean_correct', 'l2_distance', 'kl_divergence')
        add_lightweight_metropolis_hastings_kl_divergence(kl_divergence)

        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1) 
Example #20
Source File: molecule_utils.py    From graph-tutorial.pytorch with MIT License 5 votes vote down vote up
def forward(self, output1, output2, label):
        euclidean_distance = F.pairwise_distance(output1, output2)
        loss_contrastive = torch.mean((1-label) * torch.pow(euclidean_distance, 2) +
                                      (label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))

        return loss_contrastive 
Example #21
Source File: distillation.py    From incremental_learning.pytorch with MIT License 5 votes vote down vote up
def mmd(x, y, sigmas=[1, 5, 10], normalize=False):
    """Maximum Mean Discrepancy with several Gaussian kernels."""
    # Flatten:
    x = x.view(x.shape[0], -1)
    y = y.view(y.shape[0], -1)

    if len(sigmas) == 0:
        mean_dist = torch.mean(torch.pow(torch.pairwise_distance(x, y, p=2), 2))
        factors = (-1 / (2 * mean_dist)).view(1, 1, 1)
    else:
        factors = _get_mmd_factor(sigmas, x.device)

    if normalize:
        x = F.normalize(x, p=2, dim=1)
        y = F.normalize(y, p=2, dim=1)

    xx = torch.pairwise_distance(x, x, p=2)**2
    yy = torch.pairwise_distance(y, y, p=2)**2
    xy = torch.pairwise_distance(x, y, p=2)**2

    k_xx, k_yy, k_xy = 0, 0, 0

    div = 1 / (x.shape[1]**2)

    k_xx = div * torch.exp(factors * xx).sum(0).squeeze()
    k_yy = div * torch.exp(factors * yy).sum(0).squeeze()
    k_xy = div * torch.exp(factors * xy).sum(0).squeeze()

    mmd_sq = torch.sum(k_xx) - 2 * torch.sum(k_xy) + torch.sum(k_yy)
    return torch.sqrt(mmd_sq) 
Example #22
Source File: ActorObserverBase.py    From actor-observer with GNU General Public License v3.0 5 votes vote down vote up
def base(self, x, y, z):
        base_x = self.basenet(x)
        base_y = self.basenet(y)
        base_z = self.basenet(z)
        dist_a = F.pairwise_distance(base_x, base_y, 2).view(-1)
        dist_b = F.pairwise_distance(base_y, base_z, 2).view(-1)
        dprint('fc7 norms: {} \t {} \t {}', base_x.data.norm(), base_y.data.norm(), base_z.data.norm())
        dprint('pairwise dist means: {} \t {}', dist_a.data.mean(), dist_b.data.mean())
        return base_x, base_y, base_z, dist_a, dist_b 
Example #23
Source File: triplet.py    From actor-observer with GNU General Public License v3.0 5 votes vote down vote up
def forward(self, x, y, z):
        embedded_x = self.embeddingnet(x)
        embedded_y = self.embeddingnet(y)
        embedded_z = self.embeddingnet(z)
        dist_a = F.pairwise_distance(embedded_x, embedded_y, 2).view(-1)
        dist_b = F.pairwise_distance(embedded_y, embedded_z, 2).view(-1)
        return dist_a, dist_b 
Example #24
Source File: tracker.py    From tracking_wo_bnw with GNU General Public License v3.0 5 votes vote down vote up
def test_features(self, test_features):
		"""Compares test_features to features of this Track object"""
		if len(self.features) > 1:
			features = torch.cat(list(self.features), dim=0)
		else:
			features = self.features[0]
		features = features.mean(0, keepdim=True)
		dist = F.pairwise_distance(features, test_features, keepdim=True)
		return dist 
Example #25
Source File: mpcnn_lite.py    From sentence-similarity with MIT License 5 votes vote down vote up
def _algo_2_vert_comp(self, sent1_block_a, sent2_block_a):
        comparison_feats = []
        ws_no_inf = [w for w in self.filter_widths if not np.isinf(w)]
        for pool in ('max', ):
            for ws1 in self.filter_widths:
                x1 = sent1_block_a[ws1][pool]
                batch_size = x1.size()[0]
                for ws2 in self.filter_widths:
                    x2 = sent2_block_a[ws2][pool]
                    if (not np.isinf(ws1) and not np.isinf(ws2)) or (np.isinf(ws1) and np.isinf(ws2)):
                        comparison_feats.append(F.cosine_similarity(x1, x2).contiguous().view(batch_size, 1))
                        comparison_feats.append(F.pairwise_distance(x1, x2))
                        comparison_feats.append(torch.abs(x1 - x2))

        return torch.cat(comparison_feats, dim=1) 
Example #26
Source File: lite_model.py    From castor with Apache License 2.0 5 votes vote down vote up
def _algo_2_vert_comp(self, sent1_block_a, sent2_block_a):
        comparison_feats = []
        ws_no_inf = [w for w in self.filter_widths if not np.isinf(w)]
        for ws1 in self.filter_widths:
            x1 = sent1_block_a[ws1]['max']
            for ws2 in self.filter_widths:
                x2 = sent2_block_a[ws2]['max']
                if (not np.isinf(ws1) and not np.isinf(ws2)) or (np.isinf(ws1) and np.isinf(ws2)):
                    comparison_feats.append(F.cosine_similarity(x1, x2).unsqueeze(1))
                    comparison_feats.append(F.pairwise_distance(x1, x2).unsqueeze(1))
                    comparison_feats.append(torch.abs(x1 - x2))

        return torch.cat(comparison_feats, dim=1) 
Example #27
Source File: model.py    From castor with Apache License 2.0 5 votes vote down vote up
def _algo_1_horiz_comp(self, sent1_block_a, sent2_block_a):
        comparison_feats = []
        for pool in ('max', 'min', 'mean'):
            regM1, regM2 = [], []
            for ws in self.filter_widths:
                x1 = sent1_block_a[ws][pool].unsqueeze(2)
                x2 = sent2_block_a[ws][pool].unsqueeze(2)
                if np.isinf(ws):
                    x1 = x1.expand(-1, self.n_holistic_filters, -1)
                    x2 = x2.expand(-1, self.n_holistic_filters, -1)
                regM1.append(x1)
                regM2.append(x2)

            regM1 = torch.cat(regM1, dim=2)
            regM2 = torch.cat(regM2, dim=2)

            # Cosine similarity
            comparison_feats.append(F.cosine_similarity(regM1, regM2, dim=2))
            # Euclidean distance
            pairwise_distances = []
            for x1, x2 in zip(regM1, regM2):
                dist = F.pairwise_distance(x1, x2).view(1, -1)
                pairwise_distances.append(dist)
            comparison_feats.append(torch.cat(pairwise_distances))

        return torch.cat(comparison_feats, dim=1) 
Example #28
Source File: mean_pairwise_distance.py    From ignite with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def update(self, output: Sequence[torch.Tensor]) -> None:
        y_pred, y = output
        distances = pairwise_distance(y_pred, y, p=self._p, eps=self._eps)
        self._sum_of_distances += torch.sum(distances).item()
        self._num_examples += y.shape[0] 
Example #29
Source File: test_pyprof_nvtx.py    From apex with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_pairwise_distance(self):
        inp1 = torch.randn(1024, 128, device='cuda', dtype=self.dtype)
        inp2 = torch.randn(1024, 128, device='cuda', dtype=self.dtype)
        output = F.pairwise_distance(inp1, inp2, p=2.0, eps=1e-06, keepdim=False) 
Example #30
Source File: mean_pairwise_distance.py    From NeuralSceneDecomposition with GNU General Public License v3.0 5 votes vote down vote up
def update(self, output):
        y_pred, y = output
        distances = pairwise_distance(y_pred, y, p=self._p, eps=self._eps)
        self._sum_of_distances += torch.sum(distances).item()
        self._num_examples += y.shape[0]