Python torch.nn.functional.l1_loss() Examples

The following are 30 code examples of torch.nn.functional.l1_loss(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn.functional , or try the search function .
Example #1
Source File: Srmd.py    From VideoSuperResolution with MIT License 6 votes vote down vote up
def train(self, inputs, labels, learning_rate=None):
    for opt in self.opts.values():
      if learning_rate:
        for param_group in opt.param_groups:
          param_group["lr"] = learning_rate
    lr = inputs[0]
    batch = lr.shape[0]
    noise, stddev = self.gen_random_noise(lr.shape)
    kernel = [self.gen_random_kernel() for _ in range(batch)]
    degpar = torch.tensor([pca.get_degradation(k) for k in kernel],
                          dtype=lr.dtype, device=lr.device)
    kernel = torch.tensor(kernel, dtype=lr.dtype, device=lr.device)
    noise = torch.tensor(noise, dtype=lr.dtype, device=lr.device)
    stddev = torch.tensor(stddev, dtype=lr.dtype, device=lr.device)
    lr = imfilter(lr, kernel) + noise
    sr = self.srmd(lr, degpar, stddev)
    loss = F.l1_loss(sr, labels[0])
    self.opt.zero_grad()
    loss.backward()
    self.opt.step()
    return {
      'loss': loss.detach().cpu().numpy()
    } 
Example #2
Source File: loss.py    From oft with MIT License 6 votes vote down vote up
def heatmap_loss(scores, labels, pos_weight=100):
    labels = labels.float()
    # loss = F.binary_cross_entropy_with_logits(scores, labels, reduction='none')
    loss = F.l1_loss(scores, labels, reduction='none')
    weighted = loss * (1. + (pos_weight - 1.) * labels)

    return weighted.sum()


# def uncertainty_loss(logvar, sqr_dists):
#     sqr_dists = sqr_dists.clamp(min=1.+1e-6)
#     c = (1 + torch.log(sqr_dists)) / sqr_dists
#     loss = torch.log1p(logvar.exp()) / sqr_dists + torch.sigmoid(-logvar) - c
#     print('dists', float(sqr_dists.min()), float(sqr_dists.max()))
#     print('logvar', float(logvar.min()), float(logvar.max()))
#     print('loss', float(loss.min()), float(loss.max()))

#     def hook(grad):
#         print('grad', float(grad.min()), float(grad.max()), float(grad.sum()))
#     logvar.register_hook(hook)

#     return loss.mean() 
Example #3
Source File: cbhg.py    From espnet with Apache License 2.0 6 votes vote down vote up
def forward(self, cbhg_outs, spcs, olens):
        """Calculate forward propagation.

        Args:
            cbhg_outs (Tensor): Batch of CBHG outputs (B, Lmax, spc_dim).
            spcs (Tensor): Batch of groundtruth of spectrogram (B, Lmax, spc_dim).
            olens (LongTensor): Batch of the lengths of each sequence (B,).

        Returns:
            Tensor: L1 loss value
            Tensor: Mean square error loss value.

        """
        # perform masking for padded values
        if self.use_masking:
            mask = make_non_pad_mask(olens).unsqueeze(-1).to(spcs.device)
            spcs = spcs.masked_select(mask)
            cbhg_outs = cbhg_outs.masked_select(mask)

        # calculate loss
        cbhg_l1_loss = F.l1_loss(cbhg_outs, spcs)
        cbhg_mse_loss = F.mse_loss(cbhg_outs, spcs)

        return cbhg_l1_loss, cbhg_mse_loss 
Example #4
Source File: mlp_dropout.py    From pytorch_DGCNN with MIT License 6 votes vote down vote up
def forward(self, x, y = None):
        h1 = self.h1_weights(x)
        h1 = F.relu(h1)

        if self.with_dropout:
            h1 = F.dropout(h1, training=self.training)
        pred = self.h2_weights(h1)[:, 0]

        if y is not None:
            y = Variable(y)
            mse = F.mse_loss(pred, y)
            mae = F.l1_loss(pred, y)
            mae = mae.cpu().detach()
            return pred, mae, mse
        else:
            return pred 
Example #5
Source File: trainer.py    From GCA-Matting with MIT License 6 votes vote down vote up
def regression_loss(logit, target, loss_type='l1', weight=None):
        """
        Alpha reconstruction loss
        :param logit:
        :param target:
        :param loss_type: "l1" or "l2"
        :param weight: tensor with shape [N,1,H,W] weights for each pixel
        :return:
        """
        if weight is None:
            if loss_type == 'l1':
                return F.l1_loss(logit, target)
            elif loss_type == 'l2':
                return F.mse_loss(logit, target)
            else:
                raise NotImplementedError("NotImplemented loss type {}".format(loss_type))
        else:
            if loss_type == 'l1':
                return F.l1_loss(logit * weight, target * weight, reduction='sum') / (torch.sum(weight) + 1e-8)
            elif loss_type == 'l2':
                return F.mse_loss(logit * weight, target * weight, reduction='sum') / (torch.sum(weight) + 1e-8)
            else:
                raise NotImplementedError("NotImplemented loss type {}".format(loss_type)) 
Example #6
Source File: network_utils.py    From 3d-vehicle-tracking with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def linear_motion_loss(outputs, mask):
    #batch_size = outputs.shape[0]
    s_len = outputs.shape[1]

    loss = outputs.new_zeros(1)
    for idx in range(2, s_len, 1):
        # mask loss to valid outputs
        # motion_mask: (B, 1), the mask of current frame
        motion_mask = mask[:, idx].view(mask.shape[0], 1)

        # Loss: |(loc_t - loc_t-1), (loc_t-1, loc_t-2)|_1 for t = [2, s_len]
        # If loc_t is empty, mask it out by motion_mask
        curr_motion = (outputs[:, idx] - outputs[:, idx - 1]) * motion_mask
        past_motion = (outputs[:, idx - 1] - outputs[:, idx - 2]) * motion_mask
        loss += torch.mean(1.0 - F.cosine_similarity(past_motion, curr_motion))
        loss += F.l1_loss(past_motion, curr_motion)
    return loss / (torch.sum(mask)) 
Example #7
Source File: Ffdnet.py    From VideoSuperResolution with MIT License 6 votes vote down vote up
def train(self, inputs, labels, learning_rate=None):
    for opt in self.opts.values():
      if learning_rate:
        for param_group in opt.param_groups:
          param_group["lr"] = learning_rate
    lr = inputs[0]
    sigma = torch.rand(1, device=lr.device) * 75 / 255
    noise = torch.randn_like(lr) * sigma
    hr = self.ffdnet((lr + noise).clamp(0, 1), sigma)
    loss = F.l1_loss(hr, labels[0])
    self.opt.zero_grad()
    loss.backward()
    self.opt.step()
    return {
      'loss': loss.detach().cpu().numpy()
    } 
Example #8
Source File: utility.py    From hmd with MIT License 6 votes vote down vote up
def photometricLossgray(colorImg_gray, depthImg, albedoImg_gray, 
                        mask, lighting_est, device, K, thres):
    
    N,C,H,W = colorImg_gray.size()
    
    # color loss
    normals, _ = lighting.depthToNormalBatch(depthImg, device, K, thres)
    SHs     = lighting.normalToSHBatch(normals,device)
    
    SHs    = torch.reshape(SHs, (N, H*W, 9))
    lighting_est = torch.reshape(lighting_est, (N, 9, 1))
    
    #SHs to [B, H*W,9] lighting [B, 9, 1] --[N, H*W] --[B,H,W,1]             
    color_shading = torch.bmm(SHs, lighting_est) # N H*W 1   
    color_shading = torch.reshape(color_shading, (N, H, W))
    
    mask1 = torch.reshape(mask[:,0,:,:], (N,H,W)) # one layer mask
    color_pre  = mask1 * (color_shading * albedoImg_gray) # N*H*W
    colorImg_gray_mask = mask1 * colorImg_gray # mask
    
    colorloss = F.l1_loss(color_pre, colorImg_gray_mask) # NHW size directly
        
    return colorloss, color_pre

# come from hmr-src/util/image.py 
Example #9
Source File: losses.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def forward(self, x, target, length):
        """
        Args:
            x: A Variable containing a FloatTensor of size
                (batch, max_len, dim) which contains the
                unnormalized probability for each class.
            target: A Variable containing a LongTensor of size
                (batch, max_len, dim) which contains the index of the true
                class for each corresponding step.
            length: A Variable containing a LongTensor of size (batch,)
                which contains the length of each data in a batch.
        Returns:
            loss: An average loss value in range [0, 1] masked by the length.
        """
        # mask: (batch, max_len, 1)
        target.requires_grad = False
        mask = sequence_mask(
            sequence_length=length, max_len=target.size(1)).unsqueeze(2).float()
        if self.seq_len_norm:
            norm_w = mask / mask.sum(dim=1, keepdim=True)
            out_weights = norm_w.div(target.shape[0] * target.shape[2])
            mask = mask.expand_as(x)
            loss = functional.l1_loss(
                x * mask, target * mask, reduction='none')
            loss = loss.mul(out_weights.to(loss.device)).sum()
        else:
            mask = mask.expand_as(x)
            loss = functional.l1_loss(
                x * mask, target * mask, reduction='sum')
            loss = loss / mask.sum()
        return loss 
Example #10
Source File: Edsr.py    From VideoSuperResolution with MIT License 5 votes vote down vote up
def train(self, inputs, labels, learning_rate=None):
    # TODO
    self.edsr.set_scale(2)
    sr = self.edsr(inputs[0] * self.rgb_range) / self.rgb_range
    loss = F.l1_loss(sr, labels[0])
    if learning_rate:
      for param_group in self.opt.param_groups:
        param_group["lr"] = learning_rate
    self.opt.zero_grad()
    loss.backward()
    self.opt.step()
    return {'l1': loss.detach().cpu().numpy()} 
Example #11
Source File: losses.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def forward(self, x, target, length):
        """
        Args:
            x: A Variable containing a FloatTensor of size
                (batch, max_len, dim) which contains the
                unnormalized probability for each class.
            target: A Variable containing a LongTensor of size
                (batch, max_len, dim) which contains the index of the true
                class for each corresponding step.
            length: A Variable containing a LongTensor of size (batch,)
                which contains the length of each data in a batch.
        Returns:
            loss: An average loss value in range [0, 1] masked by the length.
        """
        # mask: (batch, max_len, 1)
        target.requires_grad = False
        mask = sequence_mask(
            sequence_length=length, max_len=target.size(1)).unsqueeze(2).float()
        if self.seq_len_norm:
            norm_w = mask / mask.sum(dim=1, keepdim=True)
            out_weights = norm_w.div(target.shape[0] * target.shape[2])
            mask = mask.expand_as(x)
            loss = functional.l1_loss(
                x * mask, target * mask, reduction='none')
            loss = loss.mul(out_weights.to(loss.device)).sum()
        else:
            mask = mask.expand_as(x)
            loss = functional.l1_loss(
                x * mask, target * mask, reduction='sum')
            loss = loss / mask.sum()
        return loss 
Example #12
Source File: losses.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def forward(self, x, target, length):
        """
        Args:
            x: A Variable containing a FloatTensor of size
                (batch, max_len, dim) which contains the
                unnormalized probability for each class.
            target: A Variable containing a LongTensor of size
                (batch, max_len, dim) which contains the index of the true
                class for each corresponding step.
            length: A Variable containing a LongTensor of size (batch,)
                which contains the length of each data in a batch.
        Returns:
            loss: An average loss value in range [0, 1] masked by the length.
        """
        # mask: (batch, max_len, 1)
        target.requires_grad = False
        mask = sequence_mask(
            sequence_length=length, max_len=target.size(1)).unsqueeze(2).float()
        if self.seq_len_norm:
            norm_w = mask / mask.sum(dim=1, keepdim=True)
            out_weights = norm_w.div(target.shape[0] * target.shape[2])
            mask = mask.expand_as(x)
            loss = functional.l1_loss(
                x * mask, target * mask, reduction='none')
            loss = loss.mul(out_weights.to(loss.device)).sum()
        else:
            mask = mask.expand_as(x)
            loss = functional.l1_loss(
                x * mask, target * mask, reduction='sum')
            loss = loss / mask.sum()
        return loss 
Example #13
Source File: losses.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def forward(self, x, target, length):
        """
        Args:
            x: A Variable containing a FloatTensor of size
                (batch, max_len, dim) which contains the
                unnormalized probability for each class.
            target: A Variable containing a LongTensor of size
                (batch, max_len, dim) which contains the index of the true
                class for each corresponding step.
            length: A Variable containing a LongTensor of size (batch,)
                which contains the length of each data in a batch.
        Returns:
            loss: An average loss value in range [0, 1] masked by the length.
        """
        # mask: (batch, max_len, 1)
        target.requires_grad = False
        mask = sequence_mask(
            sequence_length=length, max_len=target.size(1)).unsqueeze(2).float()
        if self.seq_len_norm:
            norm_w = mask / mask.sum(dim=1, keepdim=True)
            out_weights = norm_w.div(target.shape[0] * target.shape[2])
            mask = mask.expand_as(x)
            loss = functional.l1_loss(
                x * mask, target * mask, reduction='none')
            loss = loss.mul(out_weights.to(loss.device)).sum()
        else:
            mask = mask.expand_as(x)
            loss = functional.l1_loss(
                x * mask, target * mask, reduction='sum')
            loss = loss / mask.sum()
        return loss 
Example #14
Source File: losses.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def forward(self, x, target, length):
        """
        Args:
            x: A Variable containing a FloatTensor of size
                (batch, max_len, dim) which contains the
                unnormalized probability for each class.
            target: A Variable containing a LongTensor of size
                (batch, max_len, dim) which contains the index of the true
                class for each corresponding step.
            length: A Variable containing a LongTensor of size (batch,)
                which contains the length of each data in a batch.
        Returns:
            loss: An average loss value in range [0, 1] masked by the length.
        """
        # mask: (batch, max_len, 1)
        target.requires_grad = False
        mask = sequence_mask(
            sequence_length=length, max_len=target.size(1)).unsqueeze(2).float()
        if self.seq_len_norm:
            norm_w = mask / mask.sum(dim=1, keepdim=True)
            out_weights = norm_w.div(target.shape[0] * target.shape[2])
            mask = mask.expand_as(x)
            loss = functional.l1_loss(
                x * mask, target * mask, reduction='none')
            loss = loss.mul(out_weights.to(loss.device)).sum()
        else:
            mask = mask.expand_as(x)
            loss = functional.l1_loss(
                x * mask, target * mask, reduction='sum')
            loss = loss / mask.sum()
        return loss 
Example #15
Source File: losses.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def forward(self, x, target, length):
        """
        Args:
            x: A Variable containing a FloatTensor of size
                (batch, max_len, dim) which contains the
                unnormalized probability for each class.
            target: A Variable containing a LongTensor of size
                (batch, max_len, dim) which contains the index of the true
                class for each corresponding step.
            length: A Variable containing a LongTensor of size (batch,)
                which contains the length of each data in a batch.
        Returns:
            loss: An average loss value in range [0, 1] masked by the length.
        """
        # mask: (batch, max_len, 1)
        target.requires_grad = False
        mask = sequence_mask(
            sequence_length=length, max_len=target.size(1)).unsqueeze(2).float()
        if self.seq_len_norm:
            norm_w = mask / mask.sum(dim=1, keepdim=True)
            out_weights = norm_w.div(target.shape[0] * target.shape[2])
            mask = mask.expand_as(x)
            loss = functional.l1_loss(
                x * mask, target * mask, reduction='none')
            loss = loss.mul(out_weights.to(loss.device)).sum()
        else:
            mask = mask.expand_as(x)
            loss = functional.l1_loss(
                x * mask, target * mask, reduction='sum')
            loss = loss / mask.sum()
        return loss 
Example #16
Source File: losses.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def forward(self, x, target, length):
        """
        Args:
            x: A Variable containing a FloatTensor of size
                (batch, max_len, dim) which contains the
                unnormalized probability for each class.
            target: A Variable containing a LongTensor of size
                (batch, max_len, dim) which contains the index of the true
                class for each corresponding step.
            length: A Variable containing a LongTensor of size (batch,)
                which contains the length of each data in a batch.
        Returns:
            loss: An average loss value in range [0, 1] masked by the length.
        """
        # mask: (batch, max_len, 1)
        target.requires_grad = False
        mask = sequence_mask(
            sequence_length=length, max_len=target.size(1)).unsqueeze(2).float()
        if self.seq_len_norm:
            norm_w = mask / mask.sum(dim=1, keepdim=True)
            out_weights = norm_w.div(target.shape[0] * target.shape[2])
            mask = mask.expand_as(x)
            loss = functional.l1_loss(
                x * mask, target * mask, reduction='none')
            loss = loss.mul(out_weights.to(loss.device)).sum()
        else:
            mask = mask.expand_as(x)
            loss = functional.l1_loss(
                x * mask, target * mask, reduction='sum')
            loss = loss / mask.sum()
        return loss 
Example #17
Source File: main.py    From PSMNet with MIT License 5 votes vote down vote up
def test(imgL,imgR,disp_true):

        model.eval()
  
        if args.cuda:
            imgL, imgR, disp_true = imgL.cuda(), imgR.cuda(), disp_true.cuda()
        #---------
        mask = disp_true < 192
        #----

        if imgL.shape[2] % 16 != 0:
            times = imgL.shape[2]//16       
            top_pad = (times+1)*16 -imgL.shape[2]
        else:
            top_pad = 0

        if imgL.shape[3] % 16 != 0:
            times = imgL.shape[3]//16                       
            right_pad = (times+1)*16-imgL.shape[3]
        else:
            right_pad = 0  

        imgL = F.pad(imgL,(0,right_pad, top_pad,0))
        imgR = F.pad(imgR,(0,right_pad, top_pad,0))

        with torch.no_grad():
            output3 = model(imgL,imgR)
            output3 = torch.squeeze(output3)
        
        if top_pad !=0:
            img = output3[:,top_pad:,:]
        else:
            img = output3

        if len(disp_true[mask])==0:
           loss = 0
        else:
           loss = F.l1_loss(img[mask],disp_true[mask]) #torch.mean(torch.abs(img[mask]-disp_true[mask]))  # end-point-error

        return loss.data.cpu() 
Example #18
Source File: losses.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def forward(self, x, target, length):
        """
        Args:
            x: A Variable containing a FloatTensor of size
                (batch, max_len, dim) which contains the
                unnormalized probability for each class.
            target: A Variable containing a LongTensor of size
                (batch, max_len, dim) which contains the index of the true
                class for each corresponding step.
            length: A Variable containing a LongTensor of size (batch,)
                which contains the length of each data in a batch.
        Returns:
            loss: An average loss value in range [0, 1] masked by the length.
        """
        # mask: (batch, max_len, 1)
        target.requires_grad = False
        mask = sequence_mask(
            sequence_length=length, max_len=target.size(1)).unsqueeze(2).float()
        if self.seq_len_norm:
            norm_w = mask / mask.sum(dim=1, keepdim=True)
            out_weights = norm_w.div(target.shape[0] * target.shape[2])
            mask = mask.expand_as(x)
            loss = functional.l1_loss(
                x * mask, target * mask, reduction='none')
            loss = loss.mul(out_weights.to(loss.device)).sum()
        else:
            mask = mask.expand_as(x)
            loss = functional.l1_loss(
                x * mask, target * mask, reduction='sum')
            loss = loss / mask.sum()
        return loss 
Example #19
Source File: Crdn.py    From VideoSuperResolution with MIT License 5 votes vote down vote up
def train(self, inputs, labels, learning_rate=None):
    sr = self.rsr(inputs[0])
    loss = F.l1_loss(sr, labels[0])
    if learning_rate:
      for param_group in self.opt.param_groups:
        param_group["lr"] = learning_rate
    self.opt.zero_grad()
    loss.backward()
    self.opt.step()
    return {'l1': loss.detach().cpu().numpy()} 
Example #20
Source File: Edsr.py    From VideoSuperResolution with MIT License 5 votes vote down vote up
def train(self, inputs, labels, learning_rate=None):
    sr = self.edsr(inputs[0] * self.rgb_range) / self.rgb_range
    loss = F.l1_loss(sr, labels[0])
    if learning_rate:
      for param_group in self.opt.param_groups:
        param_group["lr"] = learning_rate
    self.opt.zero_grad()
    loss.backward()
    self.opt.step()
    return {'l1': loss.detach().cpu().numpy()} 
Example #21
Source File: Esrgan.py    From VideoSuperResolution with MIT License 5 votes vote down vote up
def train(self, inputs, labels, learning_rate=None):
    sr = self.rrdb(inputs[0])
    for opt in self.opts.values():
      if learning_rate:
        for param_group in opt.param_groups:
          param_group["lr"] = learning_rate
    image_loss = F.l1_loss(sr, labels[0])
    loss = image_loss * self.w[0]
    if self.use_vgg:
      feature_loss = F.l1_loss(self.vgg[0](sr)[0], self.vgg[0](labels[0])[0])
      loss += feature_loss * self.w[1]
    if self.use_gan:
      # update G
      self.optg.zero_grad()
      fake = self.dnet(sr)
      gan_loss_g = gan_bce_loss(fake, True)
      loss += gan_loss_g * self.w[2]
      loss.backward()
      self.optg.step()
      # update D
      self.optd.zero_grad()
      real = self.dnet(labels[0])
      fake = self.dnet(sr.detach())
      loss_d = gan_bce_loss(real, True) + gan_bce_loss(fake, False)
      loss_d.backward()
      self.optd.step()
      return {
        'loss': loss.detach().cpu().numpy(),
        'image': image_loss.detach().cpu().numpy(),
        'loss_g': gan_loss_g.detach().cpu().numpy(),
        'loss_d': loss_d.detach().cpu().numpy()
      }
    else:
      self.optg.zero_grad()
      loss.backward()
      self.optg.step()
      return {
        'loss': loss.detach().cpu().numpy(),
        'image': image_loss.detach().cpu().numpy()
      } 
Example #22
Source File: Rcan.py    From VideoSuperResolution with MIT License 5 votes vote down vote up
def train(self, inputs, labels, learning_rate=None):
    sr = self.rcan(inputs[0] * self.rgb_range) / self.rgb_range
    loss = F.l1_loss(sr, labels[0])
    if learning_rate:
      for param_group in self.opt.param_groups:
        param_group["lr"] = learning_rate
    self.opt.zero_grad()
    loss.backward()
    self.opt.step()
    return {'l1': loss.detach().cpu().numpy()} 
Example #23
Source File: Drn.py    From VideoSuperResolution with MIT License 5 votes vote down vote up
def train(self, inputs, labels, learning_rate=None):
    x0 = inputs[0]
    metrics = {}
    if self.noise > 0:
      stddev = torch.rand(1) * self.noise / 255
      stddev = stddev.reshape([1, 1, 1, 1])
      noise_map = torch.randn(*x0.shape) * stddev
      noise_map = noise_map.to(x0.device)
      x0 = (x0 + noise_map).clamp(0, 1)
      noise = self.ne(x0)
      l2_noise = F.mse_loss(noise, noise_map)
      metrics['noise'] = l2_noise.detach().cpu().numpy()
    elif self.noise < 0:
      stddev = self.ns(x0)
      noise_map = torch.randn(*x0.shape, device=x0.device) * stddev
      noise = self.ne(x0) + noise_map
      l2_noise = 0
    else:
      noise = None
      l2_noise = 0

    y = self.drn(x0, noise)
    l1_image = F.l1_loss(y, labels[0])
    loss = l1_image + 10 * l2_noise
    if self.noise != 0:
      tv = total_variance(noise)
      loss += tv * 1.0e-3
      metrics['tv'] = tv.detach().cpu().numpy()
    if learning_rate:
      for param_group in self.opt.param_groups:
        param_group["lr"] = learning_rate
    self.opt.zero_grad()
    loss.backward()
    self.opt.step()
    metrics['loss'] = loss.detach().cpu().numpy()
    metrics['image'] = l1_image.detach().cpu().numpy()
    return metrics 
Example #24
Source File: back_projection_loss.py    From srntt-pytorch with Apache License 2.0 5 votes vote down vote up
def forward(self, x, y):
        assert x.shape[2] == y.shape[2] * self.scale_factor
        assert x.shape[3] == y.shape[3] * self.scale_factor
        x = F.interpolate(x, y.size()[-2:], mode='bicubic', align_corners=True)
        return F.l1_loss(x, y) 
Example #25
Source File: regression.py    From pytorch-lightning with Apache License 2.0 5 votes vote down vote up
def forward(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
        """
        Actual metric computation

        Args:
            pred: predicted labels
            target: ground truth labels

        Return:
            A Tensor with the mae loss.
        """
        return F.l1_loss(pred, target, self.reduction) 
Example #26
Source File: glo.py    From minimal_glo with MIT License 5 votes vote down vote up
def forward(self, input, target):
        if self._gauss_kernel is None or self._gauss_kernel.shape[1] != input.shape[1]:
            self._gauss_kernel = build_gauss_kernel(
                size=self.k_size, sigma=self.sigma, 
                n_channels=input.shape[1], cuda=input.is_cuda
            )
        pyr_input  = laplacian_pyramid( input, self._gauss_kernel, self.max_levels)
        pyr_target = laplacian_pyramid(target, self._gauss_kernel, self.max_levels)
        return sum(fnn.l1_loss(a, b) for a, b in zip(pyr_input, pyr_target)) 
Example #27
Source File: batch_metrics.py    From poutyne with GNU Lesser General Public License v3.0 5 votes vote down vote up
def l1(y_pred, y_true):
    return F.l1_loss(y_pred, y_true) 
Example #28
Source File: mlp_dropout.py    From GPF with MIT License 5 votes vote down vote up
def forward(self, x, y = None):
        h1 = self.h1_weights(x)
        h1 = F.relu(h1)

        pred = self.h2_weights(h1)

        if y is not None:
            y = Variable(y)
            mse = F.mse_loss(pred, y)
            mae = F.l1_loss(pred, y)
            return pred, mae, mse
        else:
            return pred 
Example #29
Source File: mlp.py    From GPF with MIT License 5 votes vote down vote up
def forward(self, x, y = None):
        h1 = self.h1_weights(x)
        h1 = F.relu(h1)

        pred = self.h2_weights(h1)
        
        if y is not None:
            y = Variable(y)
            mse = F.mse_loss(pred, y)
            mae = F.l1_loss(pred, y)
            return pred, mae, mse
        else:
            return pred 
Example #30
Source File: loss.py    From oft with MIT License 5 votes vote down vote up
def masked_l1_loss(input, target, mask):
    return (F.l1_loss(input, target, reduction='none') * mask.float()).sum()