Python torch.zeros_like() Examples

The following are 30 code examples of torch.zeros_like(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: loss.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def forward(self, x, target):
        similarity_matrix = x @ x.T  # need gard here
        label_matrix = target.unsqueeze(1) == target.unsqueeze(0)
        negative_matrix = label_matrix.logical_not()
        positive_matrix = label_matrix.fill_diagonal_(False)

        sp = torch.where(positive_matrix, similarity_matrix,
                         torch.zeros_like(similarity_matrix))
        sn = torch.where(negative_matrix, similarity_matrix,
                         torch.zeros_like(similarity_matrix))

        ap = torch.clamp_min(1 + self.m - sp.detach(), min=0.)
        an = torch.clamp_min(sn.detach() + self.m, min=0.)

        logit_p = -self.gamma * ap * (sp - self.dp)
        logit_n = self.gamma * an * (sn - self.dn)

        logit_p = torch.where(positive_matrix, logit_p,
                              torch.zeros_like(logit_p))
        logit_n = torch.where(negative_matrix, logit_n,
                              torch.zeros_like(logit_n))

        loss = F.softplus(torch.logsumexp(logit_p, dim=1) +
                          torch.logsumexp(logit_n, dim=1)).mean()
        return loss 
Example #2
Source File: crop_and_resize.py    From medicaldetectiontoolkit with Apache License 2.0 6 votes vote down vote up
def forward(self, image, boxes, box_ind):
        crops = torch.zeros_like(image)

        if image.is_cuda:
            _backend.crop_and_resize_gpu_forward(
                image, boxes, box_ind,
                self.extrapolation_value, self.crop_height, self.crop_width, self.crop_zdepth, crops)
        else:
            _backend.crop_and_resize_forward(
                image, boxes, box_ind,
                self.extrapolation_value, self.crop_height, self.crop_width, self.crop_zdepth, crops)

        # save for backward
        self.im_size = image.size()
        self.save_for_backward(boxes, box_ind)

        return crops 
Example #3
Source File: math_util.py    From ConvLab with MIT License 6 votes vote down vote up
def calc_gaes(rewards, dones, v_preds, gamma, lam):
    '''
    Calculate GAE from Schulman et al. https://arxiv.org/pdf/1506.02438.pdf
    v_preds are values predicted for current states, with one last element as the final next_state
    delta is defined as r + gamma * V(s') - V(s) in eqn 10
    GAE is defined in eqn 16
    This method computes in torch tensor to prevent unnecessary moves between devices (e.g. GPU tensor to CPU numpy)
    NOTE any standardization is done outside of this method
    '''
    T = len(rewards)
    assert T + 1 == len(v_preds)  # v_preds includes states and 1 last next_state
    gaes = torch.zeros_like(rewards)
    future_gae = torch.tensor(0.0, dtype=rewards.dtype)
    # to multiply with not_dones to handle episode boundary (last state has no V(s'))
    not_dones = 1 - dones
    for t in reversed(range(T)):
        delta = rewards[t] + gamma * v_preds[t + 1] * not_dones[t] - v_preds[t]
        gaes[t] = future_gae = delta + gamma * lam * not_dones[t] * future_gae
    return gaes 
Example #4
Source File: dfw.py    From dfw with MIT License 6 votes vote down vote up
def __init__(self, params, eta=required, momentum=0, weight_decay=0, eps=1e-5):
        if eta is not required and eta <= 0.0:
            raise ValueError("Invalid eta: {}".format(eta))
        if momentum < 0.0:
            raise ValueError("Invalid momentum value: {}".format(momentum))
        if weight_decay < 0.0:
            raise ValueError("Invalid weight_decay value: {}".format(weight_decay))

        defaults = dict(eta=eta, momentum=momentum, weight_decay=weight_decay)
        super(DFW, self).__init__(params, defaults)
        self.eps = eps

        for group in self.param_groups:
            if group['momentum']:
                for p in group['params']:
                    self.state[p]['momentum_buffer'] = torch.zeros_like(p.data, requires_grad=False) 
Example #5
Source File: bpgrad.py    From dfw with MIT License 6 votes vote down vote up
def __init__(self, params, eta=required, momentum=0, weight_decay=0, eps=1e-5):
        if eta is not required and eta <= 0.0:
            raise ValueError("Invalid eta: {}".format(eta))
        if momentum < 0.0:
            raise ValueError("Invalid momentum value: {}".format(momentum))
        if weight_decay < 0.0:
            raise ValueError("Invalid weight_decay value: {}".format(weight_decay))

        defaults = dict(eta=eta, momentum=momentum, weight_decay=weight_decay)
        super(BPGrad, self).__init__(params, defaults)
        self.eps = eps

        for group in self.param_groups:
            group['L'] = 1. / group['eta']
            if group['momentum']:
                for p in group['params']:
                    self.state[p]['v'] = torch.zeros_like(p.data, requires_grad=False) 
Example #6
Source File: deform_conv.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def backward(ctx, grad_output):
        if not grad_output.is_cuda:
            raise NotImplementedError
        input, offset, mask, weight, bias = ctx.saved_tensors
        grad_input = torch.zeros_like(input)
        grad_offset = torch.zeros_like(offset)
        grad_mask = torch.zeros_like(mask)
        grad_weight = torch.zeros_like(weight)
        grad_bias = torch.zeros_like(bias)
        deform_conv_ext.modulated_deform_conv_backward(
            input, weight, bias, ctx._bufs[0], offset, mask, ctx._bufs[1],
            grad_input, grad_weight, grad_bias, grad_offset, grad_mask,
            grad_output, weight.shape[2], weight.shape[3], ctx.stride,
            ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation,
            ctx.groups, ctx.deformable_groups, ctx.with_bias)
        if not ctx.with_bias:
            grad_bias = None

        return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias,
                None, None, None, None, None) 
Example #7
Source File: utils.py    From conv-social-pooling with MIT License 6 votes vote down vote up
def maskedNLL(y_pred, y_gt, mask):
    acc = torch.zeros_like(mask)
    muX = y_pred[:,:,0]
    muY = y_pred[:,:,1]
    sigX = y_pred[:,:,2]
    sigY = y_pred[:,:,3]
    rho = y_pred[:,:,4]
    ohr = torch.pow(1-torch.pow(rho,2),-0.5)
    x = y_gt[:,:, 0]
    y = y_gt[:,:, 1]
    # If we represent likelihood in feet^(-1):
    out = 0.5*torch.pow(ohr, 2)*(torch.pow(sigX, 2)*torch.pow(x-muX, 2) + torch.pow(sigY, 2)*torch.pow(y-muY, 2) - 2*rho*torch.pow(sigX, 1)*torch.pow(sigY, 1)*(x-muX)*(y-muY)) - torch.log(sigX*sigY*ohr) + 1.8379
    # If we represent likelihood in m^(-1):
    # out = 0.5 * torch.pow(ohr, 2) * (torch.pow(sigX, 2) * torch.pow(x - muX, 2) + torch.pow(sigY, 2) * torch.pow(y - muY, 2) - 2 * rho * torch.pow(sigX, 1) * torch.pow(sigY, 1) * (x - muX) * (y - muY)) - torch.log(sigX * sigY * ohr) + 1.8379 - 0.5160
    acc[:,:,0] = out
    acc[:,:,1] = out
    acc = acc*mask
    lossVal = torch.sum(acc)/torch.sum(mask)
    return lossVal

## NLL for sequence, outputs sequence of NLL values for each time-step, uses mask for variable output lengths, used for evaluation 
Example #8
Source File: deform_pool.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def backward(ctx, grad_output):
        if not grad_output.is_cuda:
            raise NotImplementedError

        data, rois, offset = ctx.saved_tensors
        output_count = ctx.output_count
        grad_input = torch.zeros_like(data)
        grad_rois = None
        grad_offset = torch.zeros_like(offset)

        deform_pool_ext.deform_psroi_pooling_backward(
            grad_output, data, rois, offset, output_count, grad_input,
            grad_offset, ctx.no_trans, ctx.spatial_scale, ctx.out_channels,
            ctx.group_size, ctx.out_size, ctx.part_size, ctx.sample_per_part,
            ctx.trans_std)
        return (grad_input, grad_rois, grad_offset, None, None, None, None,
                None, None, None, None) 
Example #9
Source File: carafe.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def backward(ctx, grad_output):
        assert grad_output.is_cuda

        features, masks, rfeatures = ctx.saved_tensors
        kernel_size = ctx.kernel_size
        group_size = ctx.group_size
        scale_factor = ctx.scale_factor

        rgrad_output = torch.zeros_like(grad_output, requires_grad=False)
        rgrad_input_hs = torch.zeros_like(grad_output, requires_grad=False)
        rgrad_input = torch.zeros_like(features, requires_grad=False)
        rgrad_masks = torch.zeros_like(masks, requires_grad=False)
        grad_input = torch.zeros_like(features, requires_grad=False)
        grad_masks = torch.zeros_like(masks, requires_grad=False)
        carafe_ext.backward(grad_output.contiguous(), rfeatures, masks,
                            kernel_size, group_size, scale_factor,
                            rgrad_output, rgrad_input_hs, rgrad_input,
                            rgrad_masks, grad_input, grad_masks)
        return grad_input, grad_masks, None, None, None, None 
Example #10
Source File: sir.py    From pyfilter with MIT License 6 votes vote down vote up
def __init__(self, theta, initial_dist, dt, num_steps=10):
        """
        Implements a SIR model where the number of sick has been replaced with the fraction of sick people of the entire
        population. Model taken from this article: https://arxiv.org/pdf/2004.06680.pdf
        :param theta: The parameters (beta, gamma, sigma)
        """

        if initial_dist.event_shape != torch.Size([3]):
            raise NotImplementedError('Must be of size 3!')

        def g(x, beta, gamma, sigma):
            g1 = -sigma * x[..., 0] * x[..., 1]
            g3 = torch.zeros_like(g1)

            return concater(g1, -g1, g3)

        inc_dist = Independent(Normal(torch.zeros(1), math.sqrt(dt) * torch.ones(1)), 1)

        super().__init__((f, g), theta, initial_dist, inc_dist, dt=dt, num_steps=num_steps) 
Example #11
Source File: crop_and_resize.py    From medicaldetectiontoolkit with Apache License 2.0 6 votes vote down vote up
def backward(self, grad_outputs):
        boxes, box_ind = self.saved_tensors

        grad_outputs = grad_outputs.contiguous()
        grad_image = torch.zeros_like(grad_outputs).resize_(*self.im_size)

        if grad_outputs.is_cuda:
            _backend.crop_and_resize_gpu_backward(
                grad_outputs, boxes, box_ind, grad_image
            )
        else:
            _backend.crop_and_resize_backward(
                grad_outputs, boxes, box_ind, grad_image
            )

        return grad_image, None, None 
Example #12
Source File: dfw.py    From Semantic-Aware-Scene-Recognition with MIT License 6 votes vote down vote up
def __init__(self, params, eta=required, momentum=0, weight_decay=0, eps=1e-5):
        if eta is not required and eta <= 0.0:
            raise ValueError("Invalid eta: {}".format(eta))
        if momentum < 0.0:
            raise ValueError("Invalid momentum value: {}".format(momentum))
        if weight_decay < 0.0:
            raise ValueError("Invalid weight_decay value: {}".format(weight_decay))

        defaults = dict(eta=eta, momentum=momentum, weight_decay=weight_decay)
        super(DFW, self).__init__(params, defaults)
        self.eps = eps

        for group in self.param_groups:
            if group['momentum']:
                for p in group['params']:
                    self.state[p]['momentum_buffer'] = torch.zeros_like(p.data, requires_grad=False) 
Example #13
Source File: mask_point_head.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def loss(self, point_pred, point_targets, labels):
        """Calculate loss for MaskPointHead.

        Args:
            point_pred (Tensor): Point predication result, shape
                (num_rois, num_classes, num_points).
            point_targets (Tensor): Point targets, shape (num_roi, num_points).
            labels (Tensor): Class label of corresponding boxes,
                shape (num_rois, )

        Returns:
            dict[str, Tensor]: a dictionary of point loss components
        """

        loss = dict()
        if self.class_agnostic:
            loss_point = self.loss_point(point_pred, point_targets,
                                         torch.zeros_like(labels))
        else:
            loss_point = self.loss_point(point_pred, point_targets, labels)
        loss['loss_point'] = loss_point
        return loss 
Example #14
Source File: free_anchor_retina_head.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def negative_bag_loss(self, cls_prob, box_prob):
        """Compute negative bag loss.

        :math:`FL((1 - P_{a_{j} \in A_{+}}) * (1 - P_{j}^{bg}))`.

        :math:`P_{a_{j} \in A_{+}}`: Box_probability of matched samples.

        :math:`P_{j}^{bg}`: Classification probability of negative samples.

        Args:
            cls_prob (Tensor): Classification probability, in shape
                (num_img, num_anchors, num_classes).
            box_prob (Tensor): Box probability, in shape
                (num_img, num_anchors, num_classes).

        Returns:
            Tensor: Negative bag loss in shape (num_img, num_anchors, num_classes).
        """  # noqa: E501, W605
        prob = cls_prob * (1 - box_prob)
        negative_bag_loss = prob**self.gamma * F.binary_cross_entropy(
            prob, torch.zeros_like(prob), reduction='none')
        return (1 - self.alpha) * negative_bag_loss 
Example #15
Source File: bpgrad.py    From Semantic-Aware-Scene-Recognition with MIT License 6 votes vote down vote up
def __init__(self, params, eta=required, momentum=0, weight_decay=0, eps=1e-5):
        if eta is not required and eta <= 0.0:
            raise ValueError("Invalid eta: {}".format(eta))
        if momentum < 0.0:
            raise ValueError("Invalid momentum value: {}".format(momentum))
        if weight_decay < 0.0:
            raise ValueError("Invalid weight_decay value: {}".format(weight_decay))

        defaults = dict(eta=eta, momentum=momentum, weight_decay=weight_decay)
        super(BPGrad, self).__init__(params, defaults)
        self.eps = eps

        for group in self.param_groups:
            group['L'] = 1. / group['eta']
            if group['momentum']:
                for p in group['params']:
                    self.state[p]['v'] = torch.zeros_like(p.data, requires_grad=False) 
Example #16
Source File: crop_and_resize.py    From medicaldetectiontoolkit with Apache License 2.0 6 votes vote down vote up
def forward(self, image, boxes, box_ind):
        crops = torch.zeros_like(image)
        if image.is_cuda:
            _backend.crop_and_resize_gpu_forward(
                image, boxes, box_ind,
                self.extrapolation_value, self.crop_height, self.crop_width, crops)
        else:
            _backend.crop_and_resize_forward(
                image, boxes, box_ind,
                self.extrapolation_value, self.crop_height, self.crop_width, crops)

        # save for backward
        self.im_size = image.size()
        self.save_for_backward(boxes, box_ind)

        return crops 
Example #17
Source File: utils_render_color2.py    From DIB-R with MIT License 6 votes vote down vote up
def backward(ctx, dldI_bxhxwxd, dldp_bxhxwx1):
        
        tfims_bxhxwxd, tfimprob_bxhxwx1, \
        tfimidxs_bxhxwx1, tfimweis_bxhxwx3, \
        tfpoints2dmul_bxfx6, tfcolors_bxfx3d, \
        tfprobface, tfprobcase, tfprobdis, tfprobdep, tfprobacc, \
        debug_im = ctx.saved_tensors

        dldp2 = torch.zeros_like(tfpoints2dmul_bxfx6)
        dldp2_prob = torch.zeros_like(tfpoints2dmul_bxfx6)
        dldc = torch.zeros_like(tfcolors_bxfx3d)

        dr_cuda_batch.backward(dldI_bxhxwxd.contiguous(), \
                               dldp_bxhxwx1.contiguous(), \
                               tfims_bxhxwxd, tfimprob_bxhxwx1, \
                               tfimidxs_bxhxwx1, tfimweis_bxhxwx3, \
                               tfprobface, tfprobcase, tfprobdis, tfprobdep, tfprobacc, \
                               tfpoints2dmul_bxfx6, tfcolors_bxfx3d, \
                               dldp2, dldc, dldp2_prob, \
                               debug_im, multiplier, delta)

        return None, dldp2 + dldp2_prob, None, dldc


############################################################### 
Example #18
Source File: crop_and_resize.py    From medicaldetectiontoolkit with Apache License 2.0 6 votes vote down vote up
def backward(self, grad_outputs):
        boxes, box_ind = self.saved_tensors

        grad_outputs = grad_outputs.contiguous()
        grad_image = torch.zeros_like(grad_outputs).resize_(*self.im_size)

        if grad_outputs.is_cuda:
            _backend.crop_and_resize_gpu_backward(
                grad_outputs, boxes, box_ind, grad_image
            )
        else:
            _backend.crop_and_resize_backward(
                grad_outputs, boxes, box_ind, grad_image
            )

        return grad_image, None, None 
Example #19
Source File: sisr.py    From pyfilter with MIT License 5 votes vote down vote up
def _filter(self, y):
        # ===== Resample among old ===== #
        # TODO: Not optimal as we normalize in several other functions, fix this
        old_normw = normalize(self._w_old)

        inds, mask = self._resample_state(self._w_old)
        to_prop = choose(self._x_cur, inds)
        self._proposal = self.proposal.construct(y, to_prop)

        # ===== Propagate ===== #
        self._x_cur = self.proposal.draw(self._rsample)
        weights = self.proposal.weight(y, self._x_cur, to_prop)

        self.proposal.resample(inds)

        # ===== Update weights ===== #
        tw = torch.zeros_like(weights)
        tw[~mask] = self._w_old[~mask]

        self._w_old = weights + tw

        normw = normalize(self._w_old)
        if self._sumaxis < -1:
            normw.unsqueeze_(-1)

        return (normw * self._x_cur).sum(self._sumaxis), loglikelihood(weights, old_normw) 
Example #20
Source File: math_util.py    From ConvLab with MIT License 5 votes vote down vote up
def calc_returns(rewards, dones, gamma):
    '''
    Calculate the simple returns (full rollout) i.e. sum discounted rewards up till termination
    '''
    T = len(rewards)
    rets = torch.zeros_like(rewards)
    future_ret = torch.tensor(0.0, dtype=rewards.dtype)
    not_dones = 1 - dones
    for t in reversed(range(T)):
        rets[t] = future_ret = rewards[t] + gamma * future_ret * not_dones[t]
    return rets 
Example #21
Source File: bert_basic_layer.py    From mrc-for-flat-nested-ner with Apache License 2.0 5 votes vote down vote up
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
        if attention_mask is None:
            attention_mask = torch.ones_like(input_ids)
        if token_type_ids is None:
            token_type_ids = torch.zeros_like(input_ids)

        # We create a 3D attention mask from a 2D tensor mask.
        # Sizes are [batch_size, 1, 1, to_seq_length]
        # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
        # this attention mask is more simple than the triangular masking of causal attention
        # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
        extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)

        # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
        # masked positions, this operation will create a tensor which is 0.0 for
        # positions we want to attend and -10000.0 for masked positions.
        # Since we are adding it to the raw scores before the softmax, this is
        # effectively the same as removing these entirely.
        extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype)  # fp16 compatibility
        extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0

        embedding_output = self.embeddings(input_ids, token_type_ids)
        encoded_layers, attns = self.encoder(embedding_output,
                                             extended_attention_mask,
                                             output_all_encoded_layers=output_all_encoded_layers)
        sequence_output = encoded_layers[-1]
        pooled_output = self.pooler(sequence_output)
        attn = attns[-1]
        if not output_all_encoded_layers:
            encoded_layers = encoded_layers[-1]
        return encoded_layers, pooled_output, attn 
Example #22
Source File: bert_basic_layer.py    From mrc-for-flat-nested-ner with Apache License 2.0 5 votes vote down vote up
def forward(self, input_ids, token_type_ids=None):
        seq_length = input_ids.size(1)
        position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
        position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
        if token_type_ids is None:
            token_type_ids = torch.zeros_like(input_ids)

        words_embeddings = self.word_embeddings(input_ids)
        position_embeddings = self.position_embeddings(position_ids)
        token_type_embeddings = self.token_type_embeddings(token_type_ids)

        embeddings = words_embeddings + position_embeddings + token_type_embeddings
        embeddings = self.LayerNorm(embeddings)
        embeddings = self.dropout(embeddings)
        return embeddings 
Example #23
Source File: set2set.py    From LanczosNetwork with MIT License 5 votes vote down vote up
def forward(self, input_set):
    """
      Args:
        input_set: shape N X D

      Returns:
        output_set: shape N X 1
    """
    num_element = input_set.shape[0]
    element_dim = input_set.shape[1]
    assert element_dim == self.element_dim
    hidden = torch.zeros(1, 2 * self.element_dim).to(input_set.device)
    memory = torch.zeros(1, self.element_dim).to(input_set.device)

    # encoding
    for tt in range(self.num_step_encoder):
      hidden, memory = self.LSTM_encoder(hidden, memory)
      energy = torch.tanh(torch.mm(hidden, self.W_1) + input_set).mm(self.W_2)
      att_weight = F.softmax(energy, dim=0)
      read = (input_set * att_weight).sum(dim=0, keepdim=True)
      hidden = torch.cat([hidden, read], dim=1)

    # decoding
    memory = torch.zeros_like(memory)
    output_set = []
    for tt in range(num_element):
      hidden, memory = self.LSTM_decoder(hidden, memory)
      energy = torch.tanh(torch.mm(hidden, self.W_3) + input_set).mm(self.W_4)
      att_weight = F.softmax(energy, dim=0)
      read = (input_set * att_weight).sum(dim=0, keepdim=True)
      hidden = torch.cat([hidden, read], dim=1)
      energy = torch.tanh(torch.mm(read, self.W_5) + torch.mm(
          input_set, self.W_6)).mm(self.W_7)
      output_set += [torch.argmax(energy)]

    return torch.stack(output_set) 
Example #24
Source File: ada_lanczos_net.py    From LanczosNetwork with MIT License 5 votes vote down vote up
def _get_graph_laplacian(self, node_feat, adj_mask):
    """ Compute graph Laplacian

      Args:
        node_feat: float tensor, shape B X N X D
        adj_mask: float tensor, shape B X N X N, binary mask, should contain self-loop

      Returns:
        L: float tensor, shape B X N X N
    """
    batch_size = node_feat.shape[0]
    num_node = node_feat.shape[1]
    dim_feat = node_feat.shape[2]

    # compute pairwise distance
    idx_row, idx_col = np.meshgrid(range(num_node), range(num_node))
    idx_row, idx_col = torch.Tensor(idx_row.reshape(-1)).long().to(node_feat.device), torch.Tensor(
        idx_col.reshape(-1)).long().to(node_feat.device)

    diff = node_feat[:, idx_row, :] - node_feat[:, idx_col, :]  # shape B X N^2 X D
    dist2 = (diff * diff).sum(dim=2)  # shape B X N^2
    
    # sigma2, _ = torch.median(dist2, dim=1, keepdim=True) # median is sometimes 0
    # sigma2 = sigma2 + 1.0e-7

    sigma2 = torch.mean(dist2, dim=1, keepdim=True)

    A = torch.exp(-dist2 / sigma2)  # shape B X N^2
    A = A.reshape(batch_size, num_node, num_node) * adj_mask  # shape B X N X N
    row_sum = torch.sum(A, dim=2, keepdim=True)
    pad_row_sum = torch.zeros_like(row_sum)
    pad_row_sum[row_sum == 0.0] = 1.0    
    alpha = 0.5
    D = 1.0 / (row_sum + pad_row_sum).pow(alpha)  # shape B X N X 1
    L = D * A * D.transpose(1, 2)  # shape B X N X N

    return L 
Example #25
Source File: so3.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def log(g):
    eps = 1.0e-7
    R = g.view(-1, 3, 3)
    tr = btrace(R)
    c = (tr - 1) / 2
    t = torch.acos(c)
    sc = sinc1(t)
    idx0 = (torch.abs(sc) <= eps)
    idx1 = (torch.abs(sc) > eps)
    sc = sc.view(-1, 1, 1)

    X = torch.zeros_like(R)
    if idx1.any():
        X[idx1] = (R[idx1] - R[idx1].transpose(1, 2)) / (2*sc[idx1])

    if idx0.any():
        # t[idx0] == math.pi
        t2 = t[idx0] ** 2
        A = (R[idx0] + torch.eye(3).type_as(R).unsqueeze(0)) * t2.view(-1, 1, 1) / 2
        aw1 = torch.sqrt(A[:, 0, 0])
        aw2 = torch.sqrt(A[:, 1, 1])
        aw3 = torch.sqrt(A[:, 2, 2])
        sgn_3 = torch.sign(A[:, 0, 2])
        sgn_3[sgn_3 == 0] = 1
        sgn_23 = torch.sign(A[:, 1, 2])
        sgn_23[sgn_23 == 0] = 1
        sgn_2 = sgn_23 * sgn_3
        w1 = aw1
        w2 = aw2 * sgn_2
        w3 = aw3 * sgn_3
        w = torch.stack((w1, w2, w3), dim=-1)
        W = mat(w)
        X[idx0] = W

    x = vec(X.view_as(g))
    return x 
Example #26
Source File: so3.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def mat(x):
    # size: [*, 3] -> [*, 3, 3]
    x_ = x.view(-1, 3)
    x1, x2, x3 = x_[:, 0], x_[:, 1], x_[:, 2]
    O = torch.zeros_like(x1)

    X = torch.stack((
        torch.stack((O, -x3, x2), dim=1),
        torch.stack((x3, O, -x1), dim=1),
        torch.stack((-x2, x1, O), dim=1)), dim=1)
    return X.view(*(x.size()[0:-1]), 3, 3) 
Example #27
Source File: sinc.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def sinc4(t):
    """ sinc4: t -> 1/t^2 * (1/2 - sinc2(t))
                  = 1/t^2 * (1/2 - (1 - cos(t))/t^2)
    """
    e = 0.01
    r = torch.zeros_like(t)
    a = torch.abs(t)

    s = a < e
    c = (s == 0)
    t2 = t ** 2
    r[s] = 1/24*(1-t2/30*(1-t2/56*(1-t2/90)))  # Taylor series O(t^8)
    r[c] = (0.5 - (1 - cos(t))/t2) / t2 
Example #28
Source File: sinc.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def sinc3_dt(t):
    """ d/dt(sinc3) """
    e = 0.01
    r = torch.zeros_like(t)
    a = torch.abs(t)

    s = a < e
    c = (s == 0)
    t2 = t[s] ** 2
    r[s] = -t[s]/60*(1 - t2/21*(1 - t2/24*(1.0/2 - t2/165)))  # Taylor series O(t^8)
    r[c] = (3*sin(t[c]) - t[c]*(cos(t[c]) + 2))/(t[c]**4)

    return r 
Example #29
Source File: sinc.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def sinc3(t):
    """ sinc3: t -> (t - sin(t)) / (t**3) """
    e = 0.01
    r = torch.zeros_like(t)
    a = torch.abs(t)

    s = a < e
    c = (s == 0)
    t2 = t[s] ** 2
    r[s] = 1/6*(1-t2/20*(1-t2/42*(1-t2/72)))  # Taylor series O(t^8)
    r[c] = (t[c]-sin(t[c]))/(t[c]**3)

    return r 
Example #30
Source File: sinc.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def sinc2(t):
    """ sinc2: t -> (1 - cos(t)) / (t**2) """
    e = 0.01
    r = torch.zeros_like(t)
    a = torch.abs(t)

    s = a < e
    c = (s == 0)
    t2 = t ** 2
    r[s] = 1/2*(1-t2[s]/12*(1-t2[s]/30*(1-t2[s]/56)))  # Taylor series O(t^8)
    r[c] = (1-cos(t[c]))/t2[c]

    return r