Python torch.nn.Upsample() Examples
The following are 30
code examples of torch.nn.Upsample().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.nn
, or try the search function
.
Example #1
Source File: base_networks.py From STARnet with MIT License | 6 votes |
def __init__(self, input_size, output_size, bias=True, upsample='deconv', activation='relu', norm='batch'): super(Upsample2xBlock, self).__init__() scale_factor = 2 # 1. Deconvolution (Transposed convolution) if upsample == 'deconv': self.upsample = DeconvBlock(input_size, output_size, kernel_size=4, stride=2, padding=1, bias=bias, activation=activation, norm=norm) # 2. Sub-pixel convolution (Pixel shuffler) elif upsample == 'ps': self.upsample = PSBlock(input_size, output_size, scale_factor=scale_factor, bias=bias, activation=activation, norm=norm) # 3. Resize and Convolution elif upsample == 'rnc': self.upsample = nn.Sequential( nn.Upsample(scale_factor=scale_factor, mode='nearest'), ConvBlock(input_size, output_size, kernel_size=3, stride=1, padding=1, bias=bias, activation=activation, norm=norm) )
Example #2
Source File: senet_fpn.py From argus-tgs-salt with MIT License | 6 votes |
def __init__(self, in_channels, n_filters, is_deconv=False, scale=True): super().__init__() # B, C, H, W -> B, C/4, H, W self.conv1 = nn.Conv2d(in_channels, in_channels // 4, 1) self.norm1 = nn.BatchNorm2d(in_channels // 4) self.relu1 = nonlinearity(inplace=True) if scale: # B, C/4, H, W -> B, C/4, H, W if is_deconv: self.upscale = nn.ConvTranspose2d(in_channels // 4, in_channels // 4, 3, stride=2, padding=1, output_padding=1) else: self.upscale = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) else: self.upscale = nn.Conv2d(in_channels // 4, in_channels // 4, 3, padding=1) self.norm2 = nn.BatchNorm2d(in_channels // 4) self.relu2 = nonlinearity(inplace=True) # B, C/4, H, W -> B, C, H, W self.conv3 = nn.Conv2d(in_channels // 4, n_filters, 1) self.norm3 = nn.BatchNorm2d(n_filters) self.relu3 = nonlinearity(inplace=True)
Example #3
Source File: layers.py From argus-tgs-salt with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, scale_factor=2, xavier=False): super(FPNBlock, self).__init__() mid_channels = int(round((in_channels + out_channels) / 2)) self.conv1 = conv3x3(in_channels, mid_channels) self.relu1 = nonlinearity(inplace=True) self.bn1 = nn.BatchNorm2d(mid_channels) self.conv2 = conv3x3(mid_channels, out_channels) self.relu2 = nonlinearity(inplace=True) self.bn2 = nn.BatchNorm2d(out_channels) if scale_factor > 1: self.upscale = nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=True) else: self.upscale = lambda x: x if xavier: for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_uniform_(m.weight)
Example #4
Source File: senet.py From argus-tgs-salt with MIT License | 6 votes |
def __init__(self, in_channels, n_filters, is_deconv=False, scale=True): super().__init__() # B, C, H, W -> B, C/4, H, W self.conv1 = nn.Conv2d(in_channels, in_channels // 4, 1) self.norm1 = nn.BatchNorm2d(in_channels // 4) self.relu1 = nonlinearity(inplace=True) if scale: # B, C/4, H, W -> B, C/4, H, W if is_deconv: self.upscale = nn.ConvTranspose2d(in_channels // 4, in_channels // 4, 3, stride=2, padding=1, output_padding=1) else: self.upscale = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) else: self.upscale = nn.Conv2d(in_channels // 4, in_channels // 4, 3, padding=1) self.norm2 = nn.BatchNorm2d(in_channels // 4) self.relu2 = nonlinearity(inplace=True) # B, C/4, H, W -> B, C, H, W self.conv3 = nn.Conv2d(in_channels // 4, n_filters, 1) self.norm3 = nn.BatchNorm2d(n_filters) self.relu3 = nonlinearity(inplace=True)
Example #5
Source File: retinanet.py From EfficientDet-PyTorch with Apache License 2.0 | 6 votes |
def __init__(self, C3_size, C4_size, C5_size, feature_size=256): super(PyramidFeatures, self).__init__() # upsample C5 to get P5 from the FPN paper self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0) self.P5_upsampled = nn.Upsample(scale_factor=2, mode='nearest') self.P5_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1) # add P5 elementwise to C4 self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0) self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest') self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1) # add P4 elementwise to C3 self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0) self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1) # "P6 is obtained via a 3x3 stride-2 conv on C5" self.P6 = nn.Conv2d(C5_size, feature_size, kernel_size=3, stride=2, padding=1) # "P7 is computed by applying ReLU followed by a 3x3 stride-2 conv on P6" self.P7_1 = nn.ReLU() self.P7_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=2, padding=1)
Example #6
Source File: helper.py From torchscope with Apache License 2.0 | 6 votes |
def compute_flops(module, inp, out): if isinstance(module, nn.Conv2d): return compute_Conv2d_flops(module, inp, out) // 2 elif isinstance(module, nn.BatchNorm2d): return compute_BatchNorm2d_flops(module, inp, out) // 2 elif isinstance(module, (nn.AvgPool2d, nn.MaxPool2d)): return compute_Pool2d_flops(module, inp, out) // 2 elif isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU)): return compute_ReLU_flops(module, inp, out) // 2 elif isinstance(module, nn.Upsample): return compute_Upsample_flops(module, inp, out) // 2 elif isinstance(module, nn.Linear): return compute_Linear_flops(module, inp, out) // 2 else: return 0
Example #7
Source File: bifpn.py From EfficientDet-PyTorch with Apache License 2.0 | 6 votes |
def forward(self, inputs): p3_x, p4_x, p5_x, p6_x, p7_x = inputs # Calculate Top-Down Pathway w1 = self.w1_relu(self.w1) w1 /= torch.sum(w1, dim=0) + self.epsilon w2 = self.w2_relu(self.w2) w2 /= torch.sum(w2, dim=0) + self.epsilon p7_td = p7_x p6_td = self.p6_td(w1[0, 0] * p6_x + w1[1, 0] * F.interpolate(p7_td, scale_factor=2)) p5_td = self.p5_td(w1[0, 1] * p5_x + w1[1, 1] * F.interpolate(p6_td, scale_factor=2)) p4_td = self.p4_td(w1[0, 2] * p4_x + w1[1, 2] * F.interpolate(p5_td, scale_factor=2)) p3_td = self.p3_td(w1[0, 3] * p3_x + w1[1, 3] * F.interpolate(p4_td, scale_factor=2)) # Calculate Bottom-Up Pathway p3_out = p3_td p4_out = self.p4_out(w2[0, 0] * p4_x + w2[1, 0] * p4_td + w2[2, 0] * nn.Upsample(scale_factor=0.5)(p3_out)) p5_out = self.p5_out(w2[0, 1] * p5_x + w2[1, 1] * p5_td + w2[2, 1] * nn.Upsample(scale_factor=0.5)(p4_out)) p6_out = self.p6_out(w2[0, 2] * p6_x + w2[1, 2] * p6_td + w2[2, 2] * nn.Upsample(scale_factor=0.5)(p5_out)) p7_out = self.p7_out(w2[0, 3] * p7_x + w2[1, 3] * p7_td + w2[2, 3] * nn.Upsample(scale_factor=0.5)(p6_out)) return [p3_out, p4_out, p5_out, p6_out, p7_out]
Example #8
Source File: Decoder_networks.py From Talking-Face-Generation-DAVS with MIT License | 6 votes |
def __init__(self, opt): super(Decoder, self).__init__() self.opt = opt self.relu = nn.ReLU() self.upsample = nn.Upsample(scale_factor=2, mode='bilinear') self.deconv1_1_new = nn.ConvTranspose2d(512, 512, (4, 4), 1, 0) self.deconv1_1_bn = nn.BatchNorm2d(512) self.convblock1 = ConvBlock(512, 512, "1", nums=2) self.convblock2 = ConvBlock(512, 512, "2", nums=3) self.convblock3 = ConvBlock(512, 256, "3", nums=4) self.convblock4 = ConvBlock(256 + 160, 256, "4", nums=4) self.convblock5 = ConvBlock(256, 128, "5", nums=3) self.convblock6 = ConvBlock(128, 64, "6", nums=2) self.conv7_1 = nn.ConvTranspose2d(64, 32, 3, 1, 1) self.conv7_1_bn = nn.BatchNorm2d(32) self.conv7_2 = nn.ConvTranspose2d(32, 3, 3, 1, 1) self.tanh = nn.Tanh()
Example #9
Source File: networks.py From 2D-Motion-Retargeting with MIT License | 6 votes |
def __init__(self, channels, kernel_size=7): super(Decoder, self).__init__() model = [] pad = (kernel_size - 1) // 2 acti = nn.LeakyReLU(0.2) for i in range(len(channels) - 1): model.append(nn.Upsample(scale_factor=2, mode='nearest')) model.append(nn.ReflectionPad1d(pad)) model.append(nn.Conv1d(channels[i], channels[i + 1], kernel_size=kernel_size, stride=1)) if i == 0 or i == 1: model.append(nn.Dropout(p=0.2)) if not i == len(channels) - 2: model.append(acti) # whether to add tanh a last? #model.append(nn.Dropout(p=0.2)) self.model = nn.Sequential(*model)
Example #10
Source File: segnet.py From pytorch-semantic-segmentation with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, num_layers): super().__init__() layers = [ nn.Upsample(scale_factor=2,mode='bilinear'), nn.Conv2d(in_channels, in_channels // 2, 3, padding=1), nn.BatchNorm2d(in_channels // 2), nn.ReLU(inplace=True), ] layers += [ nn.Conv2d(in_channels // 2, in_channels // 2, 3, padding=1), nn.BatchNorm2d(in_channels // 2), nn.ReLU(inplace=True), ] * num_layers layers += [ nn.Conv2d(in_channels // 2, out_channels, 3, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), ] self.encode = nn.Sequential(*layers)
Example #11
Source File: sadecoder.py From ACAN with MIT License | 6 votes |
def __init__(self, in_channels=2048, key_channels=512, value_channels=2048, height=224, width=304): super(SADecoder, self).__init__() out_channels = 512 self.saconv = SelfAttentionBlock_(in_channels, key_channels, value_channels) self.image_context = nn.Sequential(OrderedDict([ ('avgpool', nn.AvgPool2d((height // 8, width // 8), padding=0)), ('dropout', nn.Dropout2d(0.5, inplace=True)), ('reshape1', Reshape(2048)), ('linear1', nn.Linear(2048, 512)), ('relu1', nn.ReLU(inplace=True)), ('linear2', nn.Linear(512, 512)), ('relu2', nn.ReLU(inplace=True)), ('reshape2', Reshape(512, 1, 1)), ('upsample', nn.Upsample(size=(height // 8, width // 8), mode='bilinear', align_corners=True))])) self.merge = nn.Sequential(OrderedDict([ ('dropout1', nn.Dropout2d(0.5, inplace=True)), ('conv1', nn.Conv2d(value_channels+out_channels, value_channels, kernel_size=1, stride=1)), ('relu', nn.ReLU(inplace=True)), ('dropout2', nn.Dropout2d(0.5, inplace=False))]))
Example #12
Source File: BEV_Unet.py From PolarSeg with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, in_ch, out_ch, circular_padding, bilinear=True, group_conv=False): super(up, self).__init__() # would be a nice idea if the upsampling could be learned too, # but my machine do not have enough memory to handle all those weights if bilinear: self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) elif group_conv: self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2,groups = in_ch//2) else: self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2) if circular_padding: self.conv = double_conv_circular(in_ch, out_ch,group_conv = group_conv) else: self.conv = double_conv(in_ch, out_ch,group_conv = group_conv)
Example #13
Source File: layers.py From pytorch_stacked_hourglass with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, n, f, bn=None, increase=0): super(Hourglass, self).__init__() nf = f + increase self.up1 = Residual(f, f) # Lower branch self.pool1 = Pool(2, 2) self.low1 = Residual(f, nf) self.n = n # Recursive hourglass if self.n > 1: self.low2 = Hourglass(n-1, nf, bn=bn) else: self.low2 = Residual(nf, nf) self.low3 = Residual(nf, f) self.up2 = nn.Upsample(scale_factor=2, mode='nearest')
Example #14
Source File: gan.py From MoePhoto with Apache License 2.0 | 5 votes |
def upconv_blcok(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True, pad_type='zero', norm_type=None, act_type='relu', mode='nearest'): # Up conv # described in https://distill.pub/2016/deconv-checkerboard/ upsample = nn.Upsample(scale_factor=upscale_factor, mode=mode) conv = conv_block(in_nc, out_nc, kernel_size, stride, bias=bias, pad_type=pad_type, norm_type=norm_type, act_type=act_type) return sequential(upsample, conv)
Example #15
Source File: models.py From ACME with GNU General Public License v3.0 | 5 votes |
def upBlock(in_planes, out_planes): block = nn.Sequential( nn.Upsample(scale_factor=2, mode='nearest'), conv3x3(in_planes, out_planes * 2), nn.BatchNorm2d(out_planes * 2), GLU() ) return block # Keep the spatial size
Example #16
Source File: hg.py From StarMap with GNU General Public License v3.0 | 5 votes |
def __init__(self, n, nModules, nFeats): super(Hourglass, self).__init__() self.n = n self.nModules = nModules self.nFeats = nFeats _up1_, _low1_, _low2_, _low3_ = [], [], [], [] for j in range(self.nModules): _up1_.append(Residual(self.nFeats, self.nFeats)) self.low1 = nn.MaxPool2d(kernel_size = 2, stride = 2) for j in range(self.nModules): _low1_.append(Residual(self.nFeats, self.nFeats)) if self.n > 1: self.low2 = Hourglass(n - 1, self.nModules, self.nFeats) else: for j in range(self.nModules): _low2_.append(Residual(self.nFeats, self.nFeats)) self.low2_ = nn.ModuleList(_low2_) for j in range(self.nModules): _low3_.append(Residual(self.nFeats, self.nFeats)) self.up1_ = nn.ModuleList(_up1_) self.low1_ = nn.ModuleList(_low1_) self.low3_ = nn.ModuleList(_low3_) #self.up2 = nn.Upsample(scale_factor = 2) self.up2 = nn.UpsamplingNearest2d(scale_factor = 2)
Example #17
Source File: unet_models.py From open-solution-salt-identification with MIT License | 5 votes |
def __init__(self, in_channels, middle_channels, out_channels, is_deconv=True): super(DecoderBlockV2, self).__init__() self.is_deconv = is_deconv self.deconv = nn.Sequential( ConvBnRelu(in_channels, middle_channels), nn.ConvTranspose2d(middle_channels, out_channels, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), ) self.upsample = nn.Sequential( ConvBnRelu(in_channels, out_channels), nn.Upsample(scale_factor=2, mode='bilinear'), )
Example #18
Source File: utils.py From torchbench with Apache License 2.0 | 5 votes |
def __init__(self, device): super().__init__() self.inception_model = inception_v3(pretrained=True).to(device=device) self.up = nn.Upsample(size=(299, 299), mode="bilinear").to( device=device )
Example #19
Source File: autoencoder_v4.py From STARnet with MIT License | 5 votes |
def __init__(self, in_ch, out_ch, bilinear=True): super(up, self).__init__() # would be a nice idea if the upsampling could be learned too, # but my machine do not have enough memory to handle all those weights if bilinear: self.up = nn.Upsample(scale_factor=2.0, mode='bilinear', align_corners=True) else: self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2) self.conv = double_conv(in_ch, out_ch)
Example #20
Source File: layers.py From argus-tgs-salt with MIT License | 5 votes |
def __init__(self, in_channels, n_filters, is_deconv=True, deflation=4, xavier=False): super(DecoderBlock, self).__init__() # B, C, H, W -> B, C/4, H, W assert in_channels % deflation == 0, "Incorrect deflation" self.conv1 = nn.Conv2d(in_channels, in_channels // deflation, 1) self.norm1 = nn.BatchNorm2d(in_channels // deflation) self.relu1 = nonlinearity(inplace=True) # B, C/4, H, W -> B, C/4, H, W if is_deconv: self.upscale = nn.ConvTranspose2d(in_channels // deflation, in_channels // deflation, 3, stride=2, padding=1, output_padding=1) else: self.upscale = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) self.norm2 = nn.BatchNorm2d(in_channels // deflation) self.relu2 = nonlinearity(inplace=True) # B, C/4, H, W -> B, C, H, W self.conv3 = nn.Conv2d(in_channels // deflation, n_filters, 1) self.norm3 = nn.BatchNorm2d(n_filters) self.relu3 = nonlinearity(inplace=True) if xavier: for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_uniform_(m.weight)
Example #21
Source File: deprecated.py From open-solution-salt-identification with MIT License | 5 votes |
def __init__(self, in_channels, middle_channels, out_channels): super(DecoderBlock, self).__init__() self.conv1 = Conv2dBnRelu(in_channels, middle_channels) self.conv2 = Conv2dBnRelu(middle_channels, out_channels) self.upsample = nn.Upsample(scale_factor=2, mode='bilinear') self.relu = nn.ReLU(inplace=True) self.channel_se = ChannelSELayer(out_channels, reduction=16) self.spatial_se = SpatialSELayer(out_channels)
Example #22
Source File: base.py From open-solution-salt-identification with MIT License | 5 votes |
def __init__(self, in_channels, middle_channels, out_channels): super(DecoderBlock, self).__init__() self.conv1 = Conv2dBnRelu(in_channels, middle_channels) self.conv2 = Conv2dBnRelu(middle_channels, out_channels) self.upsample = nn.Upsample(scale_factor=2, mode='bilinear') self.relu = nn.ReLU(inplace=True) self.channel_se = ChannelSELayer(out_channels, reduction=16) self.spatial_se = SpatialSELayer(out_channels)
Example #23
Source File: darknet.py From SlowFast-Network-pytorch with MIT License | 5 votes |
def __init__(self, stride=2): super(Upsample, self).__init__() self.stride = stride
Example #24
Source File: ops.py From Self-Supervised-Gans-Pytorch with MIT License | 5 votes |
def __init__(self, in_channels, out_channels = 256, kernel_size = 3, stride = 1, spectral_normed = False, up_sampling = False): super(Residual_G, self).__init__() self.up_sampling = up_sampling self.relu = nn.ReLU() self.batch_norm1 = nn.BatchNorm2d(in_channels) self.batch_norm2 = nn.BatchNorm2d(out_channels) self.upsample = nn.Upsample(scale_factor = 2, mode = 'nearest') self.conv1 = conv2d(in_channels, out_channels, spectral_normed = spectral_normed, kernel_size = kernel_size, stride = stride, padding = 1) self.conv2 = conv2d(out_channels, out_channels, spectral_normed= spectral_normed, kernel_size = kernel_size, stride = stride, padding = 1)
Example #25
Source File: modules.py From waveglow with Apache License 2.0 | 5 votes |
def __init__(self, upsample_factor, upsample_method='duplicate', squeeze_factor=8): super(UpsampleNet, self).__init__() self.upsample_factor = upsample_factor self.upsample_method = upsample_method self.squeeze_factor = squeeze_factor if upsample_method == 'duplicate': upsample_factor = int(np.prod(upsample_factor)) # Repeat for upsampling. self.upsample = nn.Upsample(scale_factor=upsample_factor, mode='nearest') elif upsample_method == 'transposed_conv2d': if not isinstance(upsample_factor, list): raise ValueError("You must specify upsample_factor as a list " "when used with transposed_conv2d") freq_axis_kernel_size = 3 self.upsample_conv = nn.ModuleList() for s in upsample_factor: freq_axis_padding = (freq_axis_kernel_size - 1) // 2 # The filter sizes (in time) are doubled from strides # to avoid the checkerboard artifacts. conv = nn.ConvTranspose2d(1, 1, (freq_axis_kernel_size, 2 * s), padding=(freq_axis_padding, s // 2), dilation=1, stride=(1, s)) self.upsample_conv.append(conv) self.upsample_conv.append(nn.LeakyReLU(negative_slope=0.4, inplace=True)) else: raise ValueError('{} upsampling is not supported'.format(self._upsample_method)) self.squeeze_layer = SqueezeLayer(squeeze_factor)
Example #26
Source File: darknet.py From pytorch-0.4-yolov3 with MIT License | 5 votes |
def __init__(self, stride=2): super(Upsample, self).__init__() self.stride = stride
Example #27
Source File: gcnet.py From DSMnet with Apache License 2.0 | 5 votes |
def __init__(self, num_F=32): super(feature3d, self).__init__() self.F = num_F self.l19 = conv3d_bn(self.F*2, self.F, kernel_size=3, stride=1, flag_bias=flag_bias_t, bn=flag_bn, activefun=activefun_t) self.l20 = conv3d_bn(self.F, self.F, kernel_size=3, stride=1, flag_bias=flag_bias_t, bn=flag_bn, activefun=activefun_t) self.l21 = conv3d_bn(self.F*2, self.F*2, kernel_size=3, stride=2, flag_bias=flag_bias_t, bn=flag_bn, activefun=activefun_t) self.l22 = conv3d_bn(self.F*2, self.F*2, kernel_size=3, stride=1, flag_bias=flag_bias_t, bn=flag_bn, activefun=activefun_t) self.l23 = conv3d_bn(self.F*2, self.F*2, kernel_size=3, stride=1, flag_bias=flag_bias_t, bn=flag_bn, activefun=activefun_t) self.l24 = conv3d_bn(self.F*2, self.F*2, kernel_size=3, stride=2, flag_bias=flag_bias_t, bn=flag_bn, activefun=activefun_t) self.l25 = conv3d_bn(self.F*2, self.F*2, kernel_size=3, stride=1, flag_bias=flag_bias_t, bn=flag_bn, activefun=activefun_t) self.l26 = conv3d_bn(self.F*2, self.F*2, kernel_size=3, stride=1, flag_bias=flag_bias_t, bn=flag_bn, activefun=activefun_t) self.l27 = conv3d_bn(self.F*2, self.F*2, kernel_size=3, stride=2, flag_bias=flag_bias_t, bn=flag_bn, activefun=activefun_t) self.l28 = conv3d_bn(self.F*2, self.F*2, kernel_size=3, stride=1, flag_bias=flag_bias_t, bn=flag_bn, activefun=activefun_t) self.l29 = conv3d_bn(self.F*2, self.F*2, kernel_size=3, stride=1, flag_bias=flag_bias_t, bn=flag_bn, activefun=activefun_t) self.l30 = conv3d_bn(self.F*2, self.F*4, kernel_size=3, stride=2, flag_bias=flag_bias_t, bn=flag_bn, activefun=activefun_t) self.l31 = conv3d_bn(self.F*4, self.F*4, kernel_size=3, stride=1, flag_bias=flag_bias_t, bn=flag_bn, activefun=activefun_t) self.l32 = conv3d_bn(self.F*4, self.F*4, kernel_size=3, stride=1, flag_bias=flag_bias_t, bn=flag_bn, activefun=activefun_t) self.l33 = deconv3d_bn(self.F*4, self.F*2, kernel_size=3, stride=2, flag_bias=flag_bias_t, bn=flag_bn, activefun=activefun_t) self.l34 = deconv3d_bn(self.F*2, self.F*2, kernel_size=3, stride=2, flag_bias=flag_bias_t, bn=flag_bn, activefun=activefun_t) self.l35 = deconv3d_bn(self.F*2, self.F*2, kernel_size=3, stride=2, flag_bias=flag_bias_t, bn=flag_bn, activefun=activefun_t) self.l36 = deconv3d_bn(self.F*2, self.F, kernel_size=3, stride=2, flag_bias=flag_bias_t, bn=flag_bn, activefun=activefun_t) self.l37 = deconv3d_bn(self.F, 1, kernel_size=3, stride=2, bn=False, activefun=None) self.softmax = nn.Softmax2d() # self.m = nn.Upsample(scale_factor=2, mode='bilinear')
Example #28
Source File: segnet.py From pytorch-semantic-segmentation with MIT License | 5 votes |
def __init__(self, num_classes): super().__init__() decoders = list(models.vgg16(pretrained=True).features.children()) self.dec1 = nn.Sequential(*decoders[:5]) self.dec2 = nn.Sequential(*decoders[5:10]) self.dec3 = nn.Sequential(*decoders[10:17]) self.dec4 = nn.Sequential(*decoders[17:24]) self.dec5 = nn.Sequential(*decoders[24:]) for m in self.modules(): if isinstance(m, nn.Conv2d): m.requires_grad = False self.enc5 = SegNetEnc(512, 512, 1) self.enc4 = SegNetEnc(1024, 256, 1) self.enc3 = SegNetEnc(512, 128, 1) self.enc2 = SegNetEnc(256, 64, 0) self.enc1 = nn.Sequential( nn.Upsample(scale_factor=2,mode='bilinear'), nn.Conv2d(128, 64, 3, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace=True), ) self.final = nn.Conv2d(64, num_classes, 3, padding=1)
Example #29
Source File: unet_utils.py From UnsupervisedGeometryAwareRepresentationLearning with GNU General Public License v3.0 | 5 votes |
def __init__(self, in_size, out_size, is_deconv, padding): super(unetUpNoSKipXXXXXXXX, self).__init__() self.conv = unetConv2(in_size, out_size, False, padding) # note, changed to out_size, out_size if is_deconv: self.up = nn.ConvTranspose2d(in_size, in_size, kernel_size=2, stride=2) else: self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True), #self.up = nn.UpsamplingBilinear2d(scale_factor=2) #nn.Sequential( # nn.UpsamplingBilinear2d(scale_factor=2), # nn.Conv2d(in_size, out_size, 3, stride=1, padding=1), # nn.BatchNorm2d(out_size), # nn.ReLU() # ) #self.upX = nn.ConvTranspose2d(in_size, in_size, kernel_size=2, stride=2)
Example #30
Source File: unet_utils.py From UnsupervisedGeometryAwareRepresentationLearning with GNU General Public License v3.0 | 5 votes |
def __init__(self, in_size, out_size, is_deconv, padding): super(unetUpNoSKip, self).__init__() self.conv = unetConv2(out_size, out_size, False, padding) # note, changed to out_size, out_size for no skip if is_deconv: self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2) else: #self.up = nn.UpsamplingBilinear2d(scale_factor=2) self.up = nn.Sequential( nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True), #nn.UpsamplingBilinear2d(scale_factor=2), nn.Conv2d(in_size, out_size, 3, stride=1, padding=1), nn.BatchNorm2d(out_size), nn.ReLU() )