Python torch.nn.functional.upsample_bilinear() Examples
The following are 29
code examples of torch.nn.functional.upsample_bilinear().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.nn.functional
, or try the search function
.
Example #1
Source File: UNet.py From PytorchConverter with BSD 2-Clause "Simplified" License | 6 votes |
def forward(self, x): dec1 = self.dec1(x) dec2 = self.dec2(dec1) dec3 = self.dec3(dec2) dec4 = self.dec4(dec3) center = self.center(dec4) enc4 = self.enc4(torch.cat([ center, F.upsample_bilinear(dec4, scale_factor=center.size()[2] / dec4.size()[2])], 1)) enc3 = self.enc3(torch.cat([ enc4, F.upsample_bilinear(dec3, scale_factor=enc4.size()[2] / dec3.size()[2])], 1)) enc2 = self.enc2(torch.cat([ enc3, F.upsample_bilinear(dec2, scale_factor=enc3.size()[2] / dec2.size()[2])], 1)) enc1 = self.enc1(torch.cat([ enc2, F.upsample_bilinear(dec1, scale_factor=enc2.size()[2] / dec1.size()[2])], 1)) return self.final(enc1)
Example #2
Source File: fcn.py From PLARD with MIT License | 6 votes |
def forward(self, x): conv1 = self.conv_block1(x) conv2 = self.conv_block2(conv1) conv3 = self.conv_block3(conv2) conv4 = self.conv_block4(conv3) conv5 = self.conv_block5(conv4) score = self.classifier(conv5) score_pool4 = self.score_pool4(conv4) score_pool3 = self.score_pool3(conv3) score = F.upsample_bilinear(score, score_pool4.size()[2:]) score += score_pool4 score = F.upsample_bilinear(score, score_pool3.size()[2:]) score += score_pool3 out = F.upsample_bilinear(score, x.size()[2:]) return out
Example #3
Source File: fcn.py From pytorch-semantic-segmentation with MIT License | 6 votes |
def forward(self, x): feats = self.feats(x) feat3 = self.feat3(feats) feat4 = self.feat4(feat3) feat5 = self.feat5(feat4) fconn = self.fconn(feat5) score_feat3 = self.score_feat3(feat3) score_feat4 = self.score_feat4(feat4) score_fconn = self.score_fconn(fconn) score = F.upsample_bilinear(score_fconn, score_feat4.size()[2:]) score += score_feat4 score = F.upsample_bilinear(score, score_feat3.size()[2:]) score += score_feat3 return F.upsample_bilinear(score, x.size()[2:])
Example #4
Source File: pspnet.py From binseg_pytoch with Apache License 2.0 | 6 votes |
def forward(self,x): input_size = x.size() x = self.layer0(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.ppm(x) x = self.final(x) upsample = F.upsample_bilinear(x, input_size[2:]) return self.activation(upsample)
Example #5
Source File: unet.py From binseg_pytoch with Apache License 2.0 | 6 votes |
def forward(self, x): en1 = self.down1(x) po1 = self.pool1(en1) en2 = self.down2(po1) po2 = self.pool2(en2) en3 = self.down3(po2) po3 = self.pool3(en3) en4 = self.down4(po3) po4 = self.pool4(en4) c1 = self.center(po4) dec1 = self.up1(torch.cat([c1, F.upsample_bilinear(en4, c1.size()[2:])], 1)) dec2 = self.up2(torch.cat([dec1, F.upsample_bilinear(en3, dec1.size()[2:])], 1)) dec3 = self.up3(torch.cat([dec2, F.upsample_bilinear(en2, dec2.size()[2:])], 1)) dec4 = self.up4(torch.cat([dec3, F.upsample_bilinear(en1, dec3.size()[2:])], 1)) out = self.output(dec4) return self.final(out) #The improved version of UNet model which replaces all poolings with convolution, skip conenction goes through convolutions, and residual convlutions
Example #6
Source File: segnet.py From pytorch-semantic-segmentation with MIT License | 6 votes |
def forward(self, x): ''' Attention, input size should be the 32x. ''' dec1 = self.dec1(x) dec2 = self.dec2(dec1) dec3 = self.dec3(dec2) dec4 = self.dec4(dec3) dec5 = self.dec5(dec4) enc5 = self.enc5(dec5) enc4 = self.enc4(torch.cat([dec4, enc5], 1)) enc3 = self.enc3(torch.cat([dec3, enc4], 1)) enc2 = self.enc2(torch.cat([dec2, enc3], 1)) enc1 = self.enc1(torch.cat([dec1, enc2], 1)) return F.upsample_bilinear(self.final(enc1), x.size()[2:])
Example #7
Source File: unet.py From pytorch-semantic-segmentation with MIT License | 6 votes |
def forward(self, x): dec1 = self.dec1(x) dec2 = self.dec2(dec1) dec3 = self.dec3(dec2) dec4 = self.dec4(dec3) center = self.center(dec4) enc4 = self.enc4(torch.cat([ center, F.upsample_bilinear(dec4, center.size()[2:])], 1)) enc3 = self.enc3(torch.cat([ enc4, F.upsample_bilinear(dec3, enc4.size()[2:])], 1)) enc2 = self.enc2(torch.cat([ enc3, F.upsample_bilinear(dec2, enc3.size()[2:])], 1)) enc1 = self.enc1(torch.cat([ enc2, F.upsample_bilinear(dec1, enc2.size()[2:])], 1)) return F.upsample_bilinear(self.final(enc1), x.size()[2:])
Example #8
Source File: network.py From piwise with BSD 3-Clause "New" or "Revised" License | 6 votes |
def forward(self, x): feats = self.feats(x) feat3 = self.feat3(feats) feat4 = self.feat4(feat3) feat5 = self.feat5(feat4) fconn = self.fconn(feat5) score_feat3 = self.score_feat3(feat3) score_feat4 = self.score_feat4(feat4) score_fconn = self.score_fconn(fconn) score = F.upsample_bilinear(score_fconn, score_feat4.size()[2:]) score += score_feat4 score = F.upsample_bilinear(score, score_feat3.size()[2:]) score += score_feat3 return F.upsample_bilinear(score, x.size()[2:])
Example #9
Source File: gcn.py From binseg_pytoch with Apache License 2.0 | 6 votes |
def forward(self,x): # Assuming input of size 240x320 x = self.layer0(x) ## 120x160x64 layer1 = self.layer1(x) ## 60x80x256 layer2 = self.layer2(layer1) ## 30x40x512 layer3 = self.layer3(layer2) ## 15x 20x1024 layer4 = self.layer4(layer3) ## 7x10x2048 enc1 = self.br256(self.gcn256(layer1)) enc2 = self.br512(self.gcn512(layer2)) enc3 = self.br1024(self.gcn1024(layer3)) enc4 = self.br2048(self.gcn2048(layer4)) ## 8x10x1 dec1 = self.br1(F.upsample_bilinear(enc4, size=enc3.size()[2:])+ enc3) dec2 = self.br2(F.upsample_bilinear(dec1, enc2.size()[2:]) + enc2) dec3 = self.br3(F.upsample_bilinear(dec2, enc1.size()[2:]) + enc1) dec4 = self.br4(self.deconv1(dec3)) score_map = self.br5(self.deconv2(dec4)) return self.activation(score_map)
Example #10
Source File: fcn.py From binseg_pytoch with Apache License 2.0 | 6 votes |
def forward(self, x): feats = self.feats(x) pool3 = self.pool3(feats) pool4 = self.pool4(pool3) pool5 = self.pool5(pool4) fconn = self.fconn(pool5) score_pool3 = self.score_pool3(pool3) score_pool4 = self.score_pool4(pool4) resized_score_pool4 = F.upsample_bilinear(score_pool4, pool3.size()[2:]) resized_score_fconn = F.upsample_bilinear(fconn, pool3.size()[2:]) prediction = resized_score_pool4 + resized_score_fconn + score_pool3 upsample = F.upsample_bilinear(prediction, x.size()[2:]) return self.activation(upsample)
Example #11
Source File: network.py From piwise with BSD 3-Clause "New" or "Revised" License | 6 votes |
def forward(self, x): dec1 = self.dec1(x) dec2 = self.dec2(dec1) dec3 = self.dec3(dec2) dec4 = self.dec4(dec3) center = self.center(dec4) enc4 = self.enc4(torch.cat([ center, F.upsample_bilinear(dec4, center.size()[2:])], 1)) enc3 = self.enc3(torch.cat([ enc4, F.upsample_bilinear(dec3, enc4.size()[2:])], 1)) enc2 = self.enc2(torch.cat([ enc3, F.upsample_bilinear(dec2, enc3.size()[2:])], 1)) enc1 = self.enc1(torch.cat([ enc2, F.upsample_bilinear(dec1, enc2.size()[2:])], 1)) return F.upsample_bilinear(self.final(enc1), x.size()[2:])
Example #12
Source File: network.py From piwise with BSD 3-Clause "New" or "Revised" License | 6 votes |
def forward(self, x): print('x', x.size()) x = self.conv1(x) print('conv1', x.size()) x = self.layer1(x) print('layer1', x.size()) x = self.layer2(x) print('layer2', x.size()) x = self.layer3(x) print('layer3', x.size()) x = self.layer4(x) print('layer4', x.size()) x = self.final(torch.cat([ x, self.layer5a(x), self.layer5b(x), self.layer5c(x), self.layer5d(x), ], 1)) print('final', x.size()) return F.upsample_bilinear(final, x.size()[2:])
Example #13
Source File: gcn.py From pytorch-semantic-segmentation with MIT License | 6 votes |
def forward(self, x): # if x: 512 fm0 = self.layer0(x) # 256 fm1 = self.layer1(fm0) # 128 fm2 = self.layer2(fm1) # 64 fm3 = self.layer3(fm2) # 32 fm4 = self.layer4(fm3) # 16 gcfm1 = self.brm1(self.gcm1(fm4)) # 16 gcfm2 = self.brm2(self.gcm2(fm3)) # 32 gcfm3 = self.brm3(self.gcm3(fm2)) # 64 gcfm4 = self.brm4(self.gcm4(fm1)) # 128 fs1 = self.brm5(F.upsample_bilinear(gcfm1, fm3.size()[2:]) + gcfm2) # 32 fs2 = self.brm6(F.upsample_bilinear(fs1, fm2.size()[2:]) + gcfm3) # 64 fs3 = self.brm7(F.upsample_bilinear(fs2, fm1.size()[2:]) + gcfm4) # 128 fs4 = self.brm8(F.upsample_bilinear(fs3, fm0.size()[2:])) # 256 out = self.brm9(F.upsample_bilinear(fs4, self.input_size)) # 512 return out
Example #14
Source File: deeplab_v2.py From SceneChangeDet with MIT License | 6 votes |
def forward(self,x): x = self.conv1(x) x = self.conv2(x) conv3_feature = self.conv3(x) conv4_feature = self.conv4(conv3_feature) conv5_feature = self.conv5(conv4_feature) fc6_1 = self.fc6_1(conv5_feature) fc7_1 = self.fc7_1(fc6_1) fc6_2 = self.fc6_2(conv5_feature) fc7_2 = self.fc7_2(fc6_2) fc6_3 = self.fc6_3(conv5_feature) fc7_3 = self.fc7_3(fc6_3) fc6_4 = self.fc6_4(conv5_feature) fc7_4 = self.fc7_4(fc6_4) fc_feature = fc7_1 + fc7_2 + fc7_3 + fc7_4 #conv5_feature = self.fc8(x) #fc7_feature = self.fc8(fc) embedding_feature = self.embedding_layer(fc_feature) #score_final_up = F.upsample_bilinear(score_final,size[2:]) #return conv4_feature,conv5_feature,fc_feature,embedding_feature return conv5_feature, fc_feature,embedding_feature #return fc_feature, embedding_feature #return embedding_feature
Example #15
Source File: upsampling_bilinear.py From pytorch2keras with MIT License | 5 votes |
def forward(self, x): from torch.nn import functional as F return F.upsample_bilinear(x, scale_factor=2)
Example #16
Source File: deeplab_msc_coco.py From SceneChangeDet with MIT License | 5 votes |
def forward(self,x): input_size = x.size()[2] self.interp1 = nn.Upsample(size=(int(input_size * 0.75) + 1, int(input_size * 0.75) + 1),mode='bilinear') self.interp2 = nn.Upsample(size=(int(input_size * 0.5) + 1, int(input_size * 0.5) + 1),mode='bilinear') self.interp3 = nn.Upsample(size=(outS(input_size), outS(input_size)),mode='bilinear') out = [] x75 = self.interp1(x) x50 = self.interp2(x) fc7_x = self.truck_branch(x) fc7_x75 = self.truck_branch(x75) fc7_x50 = self.truck_branch(x50) out.append(fc7_x) out.append(self.interp3(fc7_x75)) out.append(self.interp3(fc7_x50)) out_cat = torch.cat(out,dim=1) #out_cat = torch.stack(out,dim=1) #print out_cat.size() scale_att_mask = F.softmax(self.scale_attention_branch(out_cat)) score_x = self.fc8(fc7_x) score_x50 = self.interp3(self.fc8(fc7_x50)) score_x75 = self.interp3(self.fc8(fc7_x75)) assert score_x.size() == score_x50.size() score_att_x = torch.mul(score_x,scale_att_mask[:,0,:,:].expand_as(score_x)) score_att_x_075 = torch.mul(score_x75,scale_att_mask[:,1,:,:].expand_as(score_x75)) score_att_x_050 = torch.mul(score_x50,scale_att_mask[:,2,:,:].expand_as(score_x50)) score_final = score_att_x + score_att_x_075 + score_att_x_050 #out_final = F.upsample_bilinear(score_final, x.size()[2:]) return score_final,scale_att_mask
Example #17
Source File: fcn.py From binseg_pytoch with Apache License 2.0 | 5 votes |
def forward(self,x): feats = self.feats(x) fconn = self.fconn(feats) upsample = F.upsample_bilinear(fconn, x.size()[2:]) out = self.activation(upsample) return out
Example #18
Source File: wsdan.py From WS-DAN.PyTorch with MIT License | 5 votes |
def forward(self, features, attentions): B, C, H, W = features.size() _, M, AH, AW = attentions.size() # match size if AH != H or AW != W: attentions = F.upsample_bilinear(attentions, size=(H, W)) # feature_matrix: (B, M, C) -> (B, M * C) if self.pool is None: feature_matrix = (torch.einsum('imjk,injk->imn', (attentions, features)) / float(H * W)).view(B, -1) else: feature_matrix = [] for i in range(M): AiF = self.pool(features * attentions[:, i:i + 1, ...]).view(B, -1) feature_matrix.append(AiF) feature_matrix = torch.cat(feature_matrix, dim=1) # sign-sqrt feature_matrix = torch.sign(feature_matrix) * torch.sqrt(torch.abs(feature_matrix) + EPSILON) # l2 normalization along dimension M and C feature_matrix = F.normalize(feature_matrix, dim=-1) return feature_matrix # WS-DAN: Weakly Supervised Data Augmentation Network for FGVC
Example #19
Source File: DBCNN.py From DBCNN-PyTorch with MIT License | 5 votes |
def forward(self, X): """Forward pass of the network. """ N = X.size()[0] X1 = self.features1(X) H = X1.size()[2] W = X1.size()[3] assert X1.size()[1] == 512 X2 = self.features2(X) H2 = X2.size()[2] W2 = X2.size()[3] assert X2.size()[1] == 128 if (H != H2) | (W != W2): X2 = F.upsample_bilinear(X2,(H,W)) X1 = X1.view(N, 512, H*W) X2 = X2.view(N, 128, H*W) X = torch.bmm(X1, torch.transpose(X2, 1, 2)) / (H*W) # Bilinear assert X.size() == (N, 512, 128) X = X.view(N, 512*128) X = torch.sqrt(X + 1e-8) X = torch.nn.functional.normalize(X) X = self.fc(X) assert X.size() == (N, 1) return X
Example #20
Source File: RANet_lib.py From RANet with Apache License 2.0 | 5 votes |
def bbox_uncrop(img, bbox, size, crop_size): # 4D input img = F.upsample_bilinear(img, size=crop_size[2::]) msk = F.pad(img, (bbox[1], 864 - bbox[3], bbox[0], 480 - bbox[2], )) return msk
Example #21
Source File: ops.py From Semi-supervised-segmentation-cycleGAN with MIT License | 5 votes |
def forward(self, x): output_conv33 = self.conv_33(x) output_conv55 = self.conv_55(output_conv33) output_conv77 = self.conv_77(output_conv55) output_conv77_toC = self.conv_77_toC(output_conv77) output_conv77_upsample = self.upsample_times2(output_conv77_toC) output_conv55_toC = self.conv_55_toC(output_conv55) output_conv55_toC = output_conv55_toC + output_conv77_upsample output_conv55_upsample = self.upsample_times2(output_conv55_toC) output_conv33_toC = self.conv_33_toC(output_conv33) output_conv33_toC = output_conv33_toC + output_conv55_upsample output_conv33_upsample = self.upsample_times2(output_conv33_toC) output_conv11 = self.conv_11(x) output_global_pool = self.global_pool(x) output_pool_conv_toC = self.pool_conv_toC(output_global_pool) output_pool_upsample = self.upsample_globalPool(output_pool_conv_toC) output_conv11 = output_conv11 * output_conv33_upsample output_conv11 = output_conv11 + output_pool_upsample final_output = F.upsample_bilinear(output_conv11, scale_factor=8) return final_output
Example #22
Source File: fcn.py From binseg_pytoch with Apache License 2.0 | 5 votes |
def forward(self,x): feats = self.feats(x) pool4 = self.pool4(feats) pool5 = self.pool5(pool4) score_fconn = self.fconn(pool5) score_pool4 = self.score_pool4(pool4) resize_score_fconn = F.upsample_bilinear(score_fconn, score_pool4.size()[2:]) prediction = resize_score_fconn + score_pool4 upsample = F.upsample_bilinear(prediction, x.size()[2:]) return self.activation(upsample)
Example #23
Source File: fcn.py From PLARD with MIT License | 5 votes |
def forward(self, x): conv1 = self.conv_block1(x) conv2 = self.conv_block2(conv1) conv3 = self.conv_block3(conv2) conv4 = self.conv_block4(conv3) conv5 = self.conv_block5(conv4) score = self.classifier(conv5) out = F.upsample_bilinear(score, x.size()[2:]) return out
Example #24
Source File: network.py From piwise with BSD 3-Clause "New" or "Revised" License | 5 votes |
def forward(self, x): feats = self.feats(x) fconn = self.fconn(feats) score = self.score(fconn) return F.upsample_bilinear(score, x.size()[2:])
Example #25
Source File: network.py From piwise with BSD 3-Clause "New" or "Revised" License | 5 votes |
def forward(self, x): feats = self.feats(x) feat4 = self.feat4(feats) feat5 = self.feat5(feat4) fconn = self.fconn(feat5) score_feat4 = self.score_feat4(feat4) score_fconn = self.score_fconn(fconn) score = F.upsample_bilinear(score_fconn, score_feat4.size()[2:]) score += score_feat4 return F.upsample_bilinear(score, x.size()[2:])
Example #26
Source File: fcn.py From pytorch-semantic-segmentation with MIT License | 5 votes |
def forward(self, x): feats = self.feats(x) fconn = self.fconn(feats) score = self.score(fconn) return F.upsample_bilinear(score, x.size()[2:])
Example #27
Source File: fcn.py From pytorch-semantic-segmentation with MIT License | 5 votes |
def forward(self, x): feats = self.feats(x) feat4 = self.feat4(feats) feat5 = self.feat5(feat4) fconn = self.fconn(feat5) score_feat4 = self.score_feat4(feat4) score_fconn = self.score_fconn(fconn) score = F.upsample_bilinear(score_fconn, score_feat4.size()[2:]) score += score_feat4 return F.upsample_bilinear(score, x.size()[2:])
Example #28
Source File: fcn.py From PLARD with MIT License | 5 votes |
def forward(self, x): conv1 = self.conv_block1(x) conv2 = self.conv_block2(conv1) conv3 = self.conv_block3(conv2) conv4 = self.conv_block4(conv3) conv5 = self.conv_block5(conv4) score = self.classifier(conv5) score_pool4 = self.score_pool4(conv4) score = F.upsample_bilinear(score, score_pool4.size()[2:]) score += score_pool4 out = F.upsample_bilinear(score, x.size()[2:]) return out
Example #29
Source File: utils.py From WS-DAN.PyTorch with MIT License | 4 votes |
def batch_augment(images, attention_map, mode='crop', theta=0.5, padding_ratio=0.1): batches, _, imgH, imgW = images.size() if mode == 'crop': crop_images = [] for batch_index in range(batches): atten_map = attention_map[batch_index:batch_index + 1] if isinstance(theta, tuple): theta_c = random.uniform(*theta) * atten_map.max() else: theta_c = theta * atten_map.max() crop_mask = F.upsample_bilinear(atten_map, size=(imgH, imgW)) >= theta_c nonzero_indices = torch.nonzero(crop_mask[0, 0, ...]) height_min = max(int(nonzero_indices[:, 0].min().item() - padding_ratio * imgH), 0) height_max = min(int(nonzero_indices[:, 0].max().item() + padding_ratio * imgH), imgH) width_min = max(int(nonzero_indices[:, 1].min().item() - padding_ratio * imgW), 0) width_max = min(int(nonzero_indices[:, 1].max().item() + padding_ratio * imgW), imgW) crop_images.append( F.upsample_bilinear(images[batch_index:batch_index + 1, :, height_min:height_max, width_min:width_max], size=(imgH, imgW))) crop_images = torch.cat(crop_images, dim=0) return crop_images elif mode == 'drop': drop_masks = [] for batch_index in range(batches): atten_map = attention_map[batch_index:batch_index + 1] if isinstance(theta, tuple): theta_d = random.uniform(*theta) * atten_map.max() else: theta_d = theta * atten_map.max() drop_masks.append(F.upsample_bilinear(atten_map, size=(imgH, imgW)) < theta_d) drop_masks = torch.cat(drop_masks, dim=0) drop_images = images * drop_masks.float() return drop_images else: raise ValueError('Expected mode in [\'crop\', \'drop\'], but received unsupported augmentation method %s' % mode) ################################## # transform in dataset ##################################