Python torch.nn.PixelShuffle() Examples
The following are 30
code examples of torch.nn.PixelShuffle().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.nn
, or try the search function
.
Example #1
Source File: common.py From 3D_Appearance_SR with MIT License | 6 votes |
def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True): m = [] if (scale & (scale - 1)) == 0: # Is scale = 2^n? for _ in range(int(math.log(scale, 2))): m.append(conv(n_feat, 4 * n_feat, 3, bias)) m.append(nn.PixelShuffle(2)) if bn: m.append(nn.BatchNorm2d(n_feat)) if act: m.append(nn.PReLU()) elif scale == 3: m.append(conv(n_feat, 9 * n_feat, 3, bias)) m.append(nn.PixelShuffle(3)) if bn: m.append(nn.BatchNorm2d(n_feat)) if act: m.append(nn.PReLU()) else: raise NotImplementedError super(Upsampler, self).__init__(*m)
Example #2
Source File: network_ffdnet.py From KAIR with MIT License | 6 votes |
def __init__(self, in_nc=1, out_nc=1, nc=64, nb=15, act_mode='R'): """ # ------------------------------------ in_nc: channel number of input out_nc: channel number of output nc: channel number nb: total number of conv layers act_mode: batch norm + activation function; 'BR' means BN+ReLU. # ------------------------------------ # ------------------------------------ """ super(FFDNet, self).__init__() assert 'R' in act_mode or 'L' in act_mode, 'Examples of activation function: R, L, BR, BL, IR, IL' bias = True sf = 2 self.m_down = B.PixelUnShuffle(upscale_factor=sf) m_head = B.conv(in_nc*sf*sf+1, nc, mode='C'+act_mode[-1], bias=bias) m_body = [B.conv(nc, nc, mode='C'+act_mode, bias=bias) for _ in range(nb-2)] m_tail = B.conv(nc, out_nc*sf*sf, mode='C', bias=bias) self.model = B.sequential(m_head, *m_body, m_tail) self.m_up = nn.PixelShuffle(upscale_factor=sf)
Example #3
Source File: base_networks.py From STARnet with MIT License | 6 votes |
def __init__(self, input_size, output_size, scale_factor, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm='batch'): super(PSBlock, self).__init__() self.conv = nn.Conv2d(input_size, output_size * scale_factor**2, kernel_size, stride, padding, bias=bias) self.ps = nn.PixelShuffle(scale_factor) self.norm = norm if self.norm == 'batch': self.bn = nn.BatchNorm2d(output_size) elif norm == 'instance': self.bn = nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = nn.ReLU(True) elif self.activation == 'prelu': self.act = nn.PReLU() elif self.activation == 'lrelu': self.act = nn.LeakyReLU(0.1, True) elif self.activation == 'tanh': self.act = nn.Tanh() elif self.activation == 'sigmoid': self.act = nn.Sigmoid()
Example #4
Source File: base_networks.py From STARnet with MIT License | 6 votes |
def __init__(self, scale, n_feat, bn=False, act='prelu', bias=True): super(Upsampler, self).__init__() modules = [] for _ in range(int(math.log(scale, 2))): modules.append(ConvBlock(n_feat, 4 * n_feat, 3, 1, 1, bias, activation=None, norm=None)) modules.append(nn.PixelShuffle(2)) if bn: modules.append(nn.BatchNorm2d(n_feat)) #modules.append(nn.PReLU()) self.up = nn.Sequential(*modules) self.activation = act if self.activation == 'relu': self.act = nn.ReLU(True) elif self.activation == 'prelu': self.act = nn.PReLU() elif self.activation == 'lrelu': self.act = nn.LeakyReLU(0.1, True) elif self.activation == 'tanh': self.act = nn.Tanh() elif self.activation == 'sigmoid': self.act = nn.Sigmoid()
Example #5
Source File: rdn.py From ai-platform with MIT License | 6 votes |
def __init__(self,channel=1,growth_rate=64,rdb_number=16,rdb_conv_layers=8,upscale_factor=3): super(RDN,self).__init__() self.SFF1 = nn.Conv2d(in_channels = channel,out_channels = 64,kernel_size = 3,padding = 1 , stride = 1) self.SFF2 = nn.Conv2d(in_channels = 64,out_channels = 64,kernel_size = 3,padding = 1 , stride = 1) rdb_layers = [] for _ in range(rdb_number): rdb_layers.append(RDB(nb_layers = rdb_conv_layers,input_dim=64,growth_rate=64)) self.RDB_layers = nn.ModuleList(rdb_layers) # self.RDB1 = RDB(nb_layers = rdb_number,input_dim=64,growth_rate=64) # self.RDB2 = RDB(nb_layers = rdb_number,input_dim=64,growth_rate=64) # self.RDB3 = RDB(nb_layers = rdb_number,input_dim=64,growth_rate=64) self.GFF1 = nn.Conv2d(in_channels = 64*rdb_number,out_channels = 64,kernel_size = 1,padding = 0 ) self.GFF2 = nn.Conv2d(in_channels = 64,out_channels = 64,kernel_size = 3,padding = 1 ) self.upconv = nn.Conv2d(in_channels = 64, out_channels=(64*upscale_factor*upscale_factor),kernel_size = 3,padding = 1) self.pixelshuffle = nn.PixelShuffle(upscale_factor) self.conv2 = nn.Conv2d(in_channels = 64,out_channels = channel,kernel_size = 3,padding = 1 )
Example #6
Source File: sft_arch.py From BasicSR with Apache License 2.0 | 6 votes |
def __init__(self): super(SFT_Net, self).__init__() self.conv0 = nn.Conv2d(3, 64, 3, 1, 1) sft_branch = [] for i in range(16): sft_branch.append(ResBlock_SFT()) sft_branch.append(SFTLayer()) sft_branch.append(nn.Conv2d(64, 64, 3, 1, 1)) self.sft_branch = nn.Sequential(*sft_branch) self.HR_branch = nn.Sequential(nn.Conv2d(64, 256, 3, 1, 1), nn.PixelShuffle(2), nn.ReLU(True), nn.Conv2d(64, 256, 3, 1, 1), nn.PixelShuffle(2), nn.ReLU(True), nn.Conv2d(64, 64, 3, 1, 1), nn.ReLU(True), nn.Conv2d(64, 3, 3, 1, 1)) self.CondNet = nn.Sequential(nn.Conv2d(8, 128, 4, 4), nn.LeakyReLU(0.1, True), nn.Conv2d(128, 128, 1), nn.LeakyReLU(0.1, True), nn.Conv2d(128, 128, 1), nn.LeakyReLU(0.1, True), nn.Conv2d(128, 128, 1), nn.LeakyReLU(0.1, True), nn.Conv2d(128, 32, 1))
Example #7
Source File: common.py From TENet with MIT License | 6 votes |
def __init__(self, scale, n_feats, norm_type=False, act_type='relu', bias=False): m = [] act = act_layer(act_type) if act_type else None norm = norm_layer(norm_type, n_feats) if norm_type else None if (scale & (scale - 1)) == 0: # Is scale = 2^n? for _ in range(int(math.log(scale, 2))): m.append(default_conv(n_feats, 4 * n_feats, 3, bias=bias)) m.append(nn.PixelShuffle(2)) if norm: m.append(norm) if act is not None: m.append(act) elif scale == 3: m.append(default_conv(n_feats, 9 * n_feats, 3, bias=bias)) m.append(nn.PixelShuffle(3)) if norm: m.append(norm) if act is not None: m.append(act) else: raise NotImplementedError super(Upsampler, self).__init__(*m)
Example #8
Source File: DuRN_U.py From DualResidualNetworks with MIT License | 6 votes |
def __init__(self, in_dim, out_dim, res_dim, f_size=3, dilation=1, norm_type="instance", with_relu=True): super(DualUpDownLayer, self).__init__() self.conv1 = ConvLayer(in_dim, in_dim, 3, 1) self.norm1 = FeatNorm(norm_type, in_dim) self.conv2 = ConvLayer(in_dim, in_dim, 3, 1) self.norm2 = FeatNorm(norm_type, in_dim) # T^{l}_{1}: (Up+conv+insnorm) #-- Up -- self.conv_pre = ConvLayer(in_dim, 2*in_dim, 1, 1) self.norm_pre = FeatNorm(norm_type, 2*in_dim) self.upsamp = nn.PixelShuffle(2) #-------- self.up_conv = ConvLayer(res_dim, res_dim, kernel_size=f_size, stride=1, dilation=dilation) self.up_norm = FeatNorm(norm_type, res_dim) # T^{l}_{2}: (conv+insnorm), stride=2 for down-scaling. self.down_conv = ConvLayer(res_dim, out_dim, kernel_size=3, stride=2) self.down_norm = FeatNorm(norm_type, out_dim) self.with_relu = with_relu self.relu = nn.ReLU()
Example #9
Source File: sft_arch.py From real-world-sr with MIT License | 6 votes |
def __init__(self): super(SFT_Net, self).__init__() self.conv0 = nn.Conv2d(3, 64, 3, 1, 1) sft_branch = [] for i in range(16): sft_branch.append(ResBlock_SFT()) sft_branch.append(SFTLayer()) sft_branch.append(nn.Conv2d(64, 64, 3, 1, 1)) self.sft_branch = nn.Sequential(*sft_branch) self.HR_branch = nn.Sequential(nn.Conv2d(64, 256, 3, 1, 1), nn.PixelShuffle(2), nn.ReLU(True), nn.Conv2d(64, 256, 3, 1, 1), nn.PixelShuffle(2), nn.ReLU(True), nn.Conv2d(64, 64, 3, 1, 1), nn.ReLU(True), nn.Conv2d(64, 3, 3, 1, 1)) self.CondNet = nn.Sequential(nn.Conv2d(8, 128, 4, 4), nn.LeakyReLU(0.1, True), nn.Conv2d(128, 128, 1), nn.LeakyReLU(0.1, True), nn.Conv2d(128, 128, 1), nn.LeakyReLU(0.1, True), nn.Conv2d(128, 128, 1), nn.LeakyReLU(0.1, True), nn.Conv2d(128, 32, 1))
Example #10
Source File: network.py From DMIT with MIT License | 6 votes |
def __init__(self, in_dim, out_dim, type='Trp', norm_layer=None, nl_layer=None): super(Upsampling2dBlock, self).__init__() if type=='transpose': self.upsample = TrConv2dBlock(in_dim,out_dim,kernel_size=4,stride=2, padding=1,bias=False,norm_layer=norm_layer,nl_layer=nl_layer) elif type=='nearest': self.upsample = nn.Sequential( nn.Upsample(scale_factor=2, mode='nearest'), Conv2dBlock(in_dim,out_dim,kernel_size=3, stride=1, padding=1, pad_type='reflect', bias=False,norm_layer=norm_layer,nl_layer=nl_layer) ) elif type=='pixelshuffle': self.upsample = nn.Sequential( Conv2dBlock(in_dim,out_dim*4,kernel_size=3, stride=1, padding=1, pad_type='reflect', bias=False,norm_layer=norm_layer,nl_layer=nl_layer), nn.PixelShuffle(2) ) else: raise NotImplementedError('Upsampling layer [%s] is not found' % type)
Example #11
Source File: sft_arch.py From IKC with Apache License 2.0 | 6 votes |
def __init__(self): super(SFT_Net, self).__init__() self.conv0 = nn.Conv2d(3, 64, 3, 1, 1) sft_branch = [] for i in range(16): sft_branch.append(ResBlock_SFT()) sft_branch.append(SFTLayer()) sft_branch.append(nn.Conv2d(64, 64, 3, 1, 1)) self.sft_branch = nn.Sequential(*sft_branch) self.HR_branch = nn.Sequential(nn.Conv2d(64, 256, 3, 1, 1), nn.PixelShuffle(2), nn.ReLU(True), nn.Conv2d(64, 256, 3, 1, 1), nn.PixelShuffle(2), nn.ReLU(True), nn.Conv2d(64, 64, 3, 1, 1), nn.ReLU(True), nn.Conv2d(64, 3, 3, 1, 1)) self.CondNet = nn.Sequential(nn.Conv2d(8, 128, 4, 4), nn.LeakyReLU(0.1, True), nn.Conv2d(128, 128, 1), nn.LeakyReLU(0.1, True), nn.Conv2d(128, 128, 1), nn.LeakyReLU(0.1, True), nn.Conv2d(128, 128, 1), nn.LeakyReLU(0.1, True), nn.Conv2d(128, 32, 1))
Example #12
Source File: layers01.py From packnet-sfm with MIT License | 6 votes |
def packing(x, r=2): """ Takes a [B,C,H,W] tensor and returns a [B,(r^2)C,H/r,W/r] tensor, by concatenating neighbor spatial pixels as extra channels. It is the inverse of nn.PixelShuffle (if you apply both sequentially you should get the same tensor) Parameters ---------- x : torch.Tensor [B,C,H,W] Input tensor r : int Packing ratio Returns ------- out : torch.Tensor [B,(r^2)C,H/r,W/r] Packed tensor """ b, c, h, w = x.shape out_channel = c * (r ** 2) out_h, out_w = h // r, w // r x = x.contiguous().view(b, c, out_h, r, out_w, r) return x.permute(0, 1, 3, 5, 2, 4).contiguous().view(b, out_channel, out_h, out_w) ########################################################################################################################
Example #13
Source File: layers01.py From packnet-sfm with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, kernel_size, r=2): """ Initializes a UnpackLayerConv2d object. Parameters ---------- in_channels : int Number of input channels out_channels : int Number of output channels kernel_size : int Kernel size r : int Packing ratio """ super().__init__() self.conv = Conv2D(in_channels, out_channels * (r ** 2), kernel_size, 1) self.unpack = nn.PixelShuffle(r)
Example #14
Source File: layers01.py From packnet-sfm with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, kernel_size, r=2, d=8): """ Initializes a UnpackLayerConv3d object. Parameters ---------- in_channels : int Number of input channels out_channels : int Number of output channels kernel_size : int Kernel size r : int Packing ratio d : int Number of 3D features """ super().__init__() self.conv = Conv2D(in_channels, out_channels * (r ** 2) // d, kernel_size, 1) self.unpack = nn.PixelShuffle(r) self.conv3d = nn.Conv3d(1, d, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1))
Example #15
Source File: ops.py From 3D_Appearance_SR with MIT License | 6 votes |
def __init__(self, n_channels, scale, group=1): super(_UpsampleBlock, self).__init__() modules = [] if scale == 2 or scale == 4 or scale == 8: for _ in range(int(math.log(scale, 2))): modules += [nn.Conv2d(n_channels, 4*n_channels, 3, 1, 1, groups=group), nn.ReLU(inplace=True)] modules += [nn.PixelShuffle(2)] elif scale == 3: modules += [nn.Conv2d(n_channels, 9*n_channels, 3, 1, 1, groups=group), nn.ReLU(inplace=True)] modules += [nn.PixelShuffle(3)] self.body = nn.Sequential(*modules) init_weights(self.modules)
Example #16
Source File: models.py From PyTorch-GAN with MIT License | 6 votes |
def __init__(self, channels, filters=64, num_res_blocks=16, num_upsample=2): super(GeneratorRRDB, self).__init__() # First layer self.conv1 = nn.Conv2d(channels, filters, kernel_size=3, stride=1, padding=1) # Residual blocks self.res_blocks = nn.Sequential(*[ResidualInResidualDenseBlock(filters) for _ in range(num_res_blocks)]) # Second conv layer post residual blocks self.conv2 = nn.Conv2d(filters, filters, kernel_size=3, stride=1, padding=1) # Upsampling layers upsample_layers = [] for _ in range(num_upsample): upsample_layers += [ nn.Conv2d(filters, filters * 4, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(), nn.PixelShuffle(upscale_factor=2), ] self.upsampling = nn.Sequential(*upsample_layers) # Final output block self.conv3 = nn.Sequential( nn.Conv2d(filters, filters, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(), nn.Conv2d(filters, channels, kernel_size=3, stride=1, padding=1), )
Example #17
Source File: DuRN_US.py From DualResidualNetworks with MIT License | 6 votes |
def __init__(self, in_dim, out_dim, res_dim, f_size=3, dilation=1, norm_type="instance", with_relu=True): super(DualUpDownLayer, self).__init__() self.conv1 = ConvLayer(in_dim, in_dim, 3, 1) self.conv2 = ConvLayer(in_dim, in_dim, 3, 1) # T^{l}_{1}: (up+conv.) # -- Up -- self.conv_pre = ConvLayer(in_dim, 4*in_dim, 3, 1) self.upsamp = nn.PixelShuffle(2) # -------- self.up_conv = ConvLayer(res_dim, res_dim, kernel_size=f_size, stride=1, dilation=dilation) # T^{l}_{2}: (se+conv.), stride=2 for down-scaling. self.se = se_nets.SEBasicBlock(res_dim, res_dim, reduction=32) self.down_conv = ConvLayer(res_dim, out_dim, kernel_size=3, stride=2) self.with_relu = with_relu self.relu = nn.ReLU()
Example #18
Source File: model.py From ICCV2019-LearningToPaint with MIT License | 5 votes |
def __init__(self): super(FCN, self).__init__() self.fc1 = (nn.Linear(10, 512)) self.fc2 = (nn.Linear(512, 1024)) self.fc3 = (nn.Linear(1024, 2048)) self.fc4 = (nn.Linear(2048, 4096)) self.conv1 = (nn.Conv2d(16, 32, 3, 1, 1)) self.conv2 = (nn.Conv2d(32, 32, 3, 1, 1)) self.conv3 = (nn.Conv2d(8, 16, 3, 1, 1)) self.conv4 = (nn.Conv2d(16, 16, 3, 1, 1)) self.conv5 = (nn.Conv2d(4, 8, 3, 1, 1)) self.conv6 = (nn.Conv2d(8, 4, 3, 1, 1)) self.pixel_shuffle = nn.PixelShuffle(2)
Example #19
Source File: model_illNet.py From DocProj with MIT License | 5 votes |
def __init__(self, in_channels, up_scale): super(UpsampleBLock, self).__init__() self.conv = nn.Conv2d(in_channels, in_channels * up_scale ** 2, kernel_size=3, padding=1) self.pixel_shuffle = nn.PixelShuffle(up_scale) self.prelu = nn.PReLU()
Example #20
Source File: network_msrresnet.py From KAIR with MIT License | 5 votes |
def __init__(self, in_nc=3, out_nc=3, nc=64, nb=16, upscale=4, act_mode='R', upsample_mode='upconv'): super(MSRResNet1, self).__init__() self.upscale = upscale self.conv_first = nn.Conv2d(in_nc, nc, 3, 1, 1, bias=True) basic_block = functools.partial(ResidualBlock_noBN, nc=nc) self.recon_trunk = make_layer(basic_block, nb) # upsampling if self.upscale == 2: self.upconv1 = nn.Conv2d(nc, nc * 4, 3, 1, 1, bias=True) self.pixel_shuffle = nn.PixelShuffle(2) elif self.upscale == 3: self.upconv1 = nn.Conv2d(nc, nc * 9, 3, 1, 1, bias=True) self.pixel_shuffle = nn.PixelShuffle(3) elif self.upscale == 4: self.upconv1 = nn.Conv2d(nc, nc * 4, 3, 1, 1, bias=True) self.upconv2 = nn.Conv2d(nc, nc * 4, 3, 1, 1, bias=True) self.pixel_shuffle = nn.PixelShuffle(2) self.HRconv = nn.Conv2d(nc, nc, 3, 1, 1, bias=True) self.conv_last = nn.Conv2d(nc, out_nc, 3, 1, 1, bias=True) # activation function self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True) # initialization initialize_weights([self.conv_first, self.upconv1, self.HRconv, self.conv_last], 0.1) if self.upscale == 4: initialize_weights(self.upconv2, 0.1)
Example #21
Source File: common.py From OISR-PyTorch with BSD 2-Clause "Simplified" License | 5 votes |
def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True): m = [] if (scale & (scale - 1)) == 0: # Is scale = 2^n? for _ in range(int(math.log(scale, 2))): m.append(conv(n_feats, 4 * n_feats, 3, bias)) m.append(nn.PixelShuffle(2)) if bn: m.append(nn.BatchNorm2d(n_feats)) if act == 'relu': m.append(nn.ReLU(True)) elif act == 'prelu': m.append(nn.PReLU(n_feats)) elif scale == 3: m.append(conv(n_feats, 9 * n_feats, 3, bias)) m.append(nn.PixelShuffle(3)) if bn: m.append(nn.BatchNorm2d(n_feats)) if act == 'relu': m.append(nn.ReLU(True)) elif act == 'prelu': m.append(nn.PReLU(n_feats)) else: raise NotImplementedError super(Upsampler, self).__init__(*m)
Example #22
Source File: SRResNet_arch.py From IKC with Apache License 2.0 | 5 votes |
def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, upscale=4): super(MSRResNet, self).__init__() self.upscale = upscale self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True) basic_block = functools.partial(mutil.ResidualBlock_noBN, nf=nf) self.recon_trunk = mutil.make_layer(basic_block, nb) # upsampling if self.upscale == 2: self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True) self.pixel_shuffle = nn.PixelShuffle(2) elif self.upscale == 3: self.upconv1 = nn.Conv2d(nf, nf * 9, 3, 1, 1, bias=True) self.pixel_shuffle = nn.PixelShuffle(3) elif self.upscale == 4: self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True) self.upconv2 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True) self.pixel_shuffle = nn.PixelShuffle(2) self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True) # activation function self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True) # initialization mutil.initialize_weights([self.conv_first, self.upconv1, self.HRconv, self.conv_last], 0.1) if self.upscale == 4: mutil.initialize_weights(self.upconv2, 0.1)
Example #23
Source File: common.py From OISR-PyTorch with BSD 2-Clause "Simplified" License | 5 votes |
def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True): m = [] if (scale & (scale - 1)) == 0: # Is scale = 2^n? for _ in range(int(math.log(scale, 2))): m.append(conv(n_feats, 4 * n_feats, 3, bias)) m.append(nn.PixelShuffle(2)) if bn: m.append(nn.BatchNorm2d(n_feats)) if act == 'relu': m.append(nn.ReLU(True)) elif act == 'prelu': m.append(nn.PReLU(n_feats)) elif scale == 3: m.append(conv(n_feats, 9 * n_feats, 3, bias)) m.append(nn.PixelShuffle(3)) if bn: m.append(nn.BatchNorm2d(n_feats)) if act == 'relu': m.append(nn.ReLU(True)) elif act == 'prelu': m.append(nn.PReLU(n_feats)) else: raise NotImplementedError super(Upsampler, self).__init__(*m)
Example #24
Source File: models.py From PyTorch-GAN with MIT License | 5 votes |
def __init__(self, in_channels=3, out_channels=3, n_residual_blocks=16): super(GeneratorResNet, self).__init__() # First layer self.conv1 = nn.Sequential(nn.Conv2d(in_channels, 64, kernel_size=9, stride=1, padding=4), nn.PReLU()) # Residual blocks res_blocks = [] for _ in range(n_residual_blocks): res_blocks.append(ResidualBlock(64)) self.res_blocks = nn.Sequential(*res_blocks) # Second conv layer post residual blocks self.conv2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(64, 0.8)) # Upsampling layers upsampling = [] for out_features in range(2): upsampling += [ # nn.Upsample(scale_factor=2), nn.Conv2d(64, 256, 3, 1, 1), nn.BatchNorm2d(256), nn.PixelShuffle(upscale_factor=2), nn.PReLU(), ] self.upsampling = nn.Sequential(*upsampling) # Final output layer self.conv3 = nn.Sequential(nn.Conv2d(64, out_channels, kernel_size=9, stride=1, padding=4), nn.Tanh())
Example #25
Source File: edsr_arch.py From SRFBN_CVPR19 with MIT License | 5 votes |
def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True): m = [] if (scale & (scale - 1)) == 0: # Is scale = 2^n? for _ in range(int(math.log(scale, 2))): m.append(conv(n_feats, 4 * n_feats, 3, bias)) m.append(nn.PixelShuffle(2)) if bn: m.append(nn.BatchNorm2d(n_feats)) if act == 'relu': m.append(nn.ReLU(True)) elif act == 'prelu': m.append(nn.PReLU(n_feats)) elif scale == 3: m.append(conv(n_feats, 9 * n_feats, 3, bias)) m.append(nn.PixelShuffle(3)) if bn: m.append(nn.BatchNorm2d(n_feats)) if act == 'relu': m.append(nn.ReLU(True)) elif act == 'prelu': m.append(nn.PReLU(n_feats)) else: raise NotImplementedError super(Upsampler, self).__init__(*m)
Example #26
Source File: common.py From OISR-PyTorch with BSD 2-Clause "Simplified" License | 5 votes |
def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True): m = [] if (scale & (scale - 1)) == 0: # Is scale = 2^n? for _ in range(int(math.log(scale, 2))): m.append(conv(n_feats, 4 * n_feats, 3, bias)) m.append(nn.PixelShuffle(2)) if bn: m.append(nn.BatchNorm2d(n_feats)) if act == 'relu': m.append(nn.ReLU(True)) elif act == 'prelu': m.append(nn.PReLU(n_feats)) elif scale == 3: m.append(conv(n_feats, 9 * n_feats, 3, bias)) m.append(nn.PixelShuffle(3)) if bn: m.append(nn.BatchNorm2d(n_feats)) if act == 'relu': m.append(nn.ReLU(True)) elif act == 'prelu': m.append(nn.PReLU(n_feats)) else: raise NotImplementedError super(Upsampler, self).__init__(*m)
Example #27
Source File: duc.py From binseg_pytoch with Apache License 2.0 | 5 votes |
def __init__(self, in_channels, factor, num_classes=1): super(DUC, self).__init__() self.layer = nn.Sequential(nn.Conv2d(in_channels, (factor**2)*num_classes, kernel_size=3, padding=1), nn.PixelShuffle(factor))
Example #28
Source File: SRResNet_arch.py From real-world-sr with MIT License | 5 votes |
def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, upscale=4): super(MSRResNet, self).__init__() self.upscale = upscale self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True) basic_block = functools.partial(mutil.ResidualBlock_noBN, nf=nf) self.recon_trunk = mutil.make_layer(basic_block, nb) # upsampling if self.upscale == 2: self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True) self.pixel_shuffle = nn.PixelShuffle(2) elif self.upscale == 3: self.upconv1 = nn.Conv2d(nf, nf * 9, 3, 1, 1, bias=True) self.pixel_shuffle = nn.PixelShuffle(3) elif self.upscale == 4: self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True) self.upconv2 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True) self.pixel_shuffle = nn.PixelShuffle(2) self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True) # activation function self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True) # initialization mutil.initialize_weights([self.conv_first, self.upconv1, self.HRconv, self.conv_last], 0.1) if self.upscale == 4: mutil.initialize_weights(self.upconv2, 0.1)
Example #29
Source File: models.py From dvdnet with GNU General Public License v3.0 | 5 votes |
def __init__(self): super(DVDnet_spatial, self).__init__() self.down_kernel_size = (2, 2) self.down_stride = 2 self.kernel_size = 3 self.padding = 1 # RGB image self.num_input_channels = 6 self.middle_features = 96 self.num_conv_layers = 12 self.down_input_channels = 12 self.downsampled_channels = 15 self.output_features = 12 self.downscale = nn.Unfold(kernel_size=self.down_kernel_size, stride=self.down_stride) layers = [] layers.append(nn.Conv2d(in_channels=self.downsampled_channels,\ out_channels=self.middle_features,\ kernel_size=self.kernel_size,\ padding=self.padding,\ bias=False)) layers.append(nn.ReLU(inplace=True)) for _ in range(self.num_conv_layers-2): layers.append(nn.Conv2d(in_channels=self.middle_features,\ out_channels=self.middle_features,\ kernel_size=self.kernel_size,\ padding=self.padding,\ bias=False)) layers.append(nn.BatchNorm2d(self.middle_features)) layers.append(nn.ReLU(inplace=True)) layers.append(nn.Conv2d(in_channels=self.middle_features,\ out_channels=self.output_features,\ kernel_size=self.kernel_size,\ padding=self.padding,\ bias=False)) self.conv_relu_bn = nn.Sequential(*layers) self.pixelshuffle = nn.PixelShuffle(2) # Init weights self.reset_params()
Example #30
Source File: model.py From FeatureFlow with MIT License | 5 votes |
def __init__(self, nf=64, n_res=10): super(Reconstruct, self).__init__() #### reconstruction self.recon_trunk = nn.Sequential(*([ResnetBlock(nf)] * n_res)) #### upsampling self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True) self.upconv2 = nn.Conv2d(nf, 64 * 4, 3, 1, 1, bias=True) self.pixel_shuffle = nn.PixelShuffle(2) self.down = nn.AvgPool2d(2, 2) self.HRconv = nn.Conv2d(64, 64, 3, 1, 1, bias=True) self.conv_last = nn.Conv2d(64, 3, 3, 1, 1, bias=True) self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True) self.tanh = nn.Tanh()