Python chainer.links.Deconvolution2D() Examples
The following are 30
code examples of chainer.links.Deconvolution2D().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.links
, or try the search function
.
Example #1
Source File: test_caffe_function.py From chainer with MIT License | 6 votes |
def test_deconvolution(self): self.init_func() self.assertEqual(len(self.func.layers), 1) f = self.func.l1 self.assertIsInstance(f, links.Deconvolution2D) for i in range(3): # 3 == group in_slice = slice(i * 4, (i + 1) * 4) # 4 == channels out_slice = slice(i * 2, (i + 1) * 2) # 2 == num / group w = f.W.data[out_slice, in_slice] numpy.testing.assert_array_equal( w.flatten(), range(i * 32, (i + 1) * 32)) numpy.testing.assert_array_equal( f.b.data, range(12)) self.call(['x'], ['y']) self.mock.assert_called_once_with(self.inputs[0])
Example #2
Source File: sr_model.py From become-yukarin with MIT License | 6 votes |
def __init__(self, ch0, ch1, bn=True, sample='down', activation=F.relu, dropout=False) -> None: super().__init__() self.bn = bn self.activation = activation self.dropout = dropout w = chainer.initializers.Normal(0.02) with self.init_scope(): if sample == 'down': self.c = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w) elif sample == 'up': self.c = L.Deconvolution2D(ch0, ch1, 4, 2, 1, initialW=w) else: self.c = L.Convolution2D(ch0, ch1, 1, 1, 0, initialW=w) if bn: self.batchnorm = L.BatchNormalization(ch1)
Example #3
Source File: video_generator.py From tgan with MIT License | 6 votes |
def __init__(self, z_slow_dim, z_fast_dim, out_channels, bottom_width, conv_ch=512, wscale=0.02): self.ch = conv_ch self.bottom_width = bottom_width slow_mid_dim = bottom_width * bottom_width * conv_ch // 2 fast_mid_dim = bottom_width * bottom_width * conv_ch // 2 super(VideoGeneratorInitDefault, self).__init__() w = None with self.init_scope(): self.l0s = L.Linear(z_slow_dim, slow_mid_dim, initialW=w, nobias=True) self.l0f = L.Linear(z_fast_dim, fast_mid_dim, initialW=w, nobias=True) self.dc1 = L.Deconvolution2D(conv_ch, conv_ch // 2, 4, 2, 1, initialW=w, nobias=True) self.dc2 = L.Deconvolution2D(conv_ch // 2, conv_ch // 4, 4, 2, 1, initialW=w, nobias=True) self.dc3 = L.Deconvolution2D(conv_ch // 4, conv_ch // 8, 4, 2, 1, initialW=w, nobias=True) self.dc4 = L.Deconvolution2D(conv_ch // 8, conv_ch // 16, 4, 2, 1, initialW=w, nobias=True) self.dc5 = L.Deconvolution2D(conv_ch // 16, out_channels, 3, 1, 1, initialW=w, nobias=False) self.bn0s = L.BatchNormalization(slow_mid_dim) self.bn0f = L.BatchNormalization(fast_mid_dim) self.bn1 = L.BatchNormalization(conv_ch // 2) self.bn2 = L.BatchNormalization(conv_ch // 4) self.bn3 = L.BatchNormalization(conv_ch // 8) self.bn4 = L.BatchNormalization(conv_ch // 16)
Example #4
Source File: video_generator.py From tgan with MIT License | 6 votes |
def __init__(self, z_slow_dim, z_fast_dim, out_channels, bottom_width, conv_ch=512, wscale=0.01): self.ch = conv_ch self.bottom_width = bottom_width slow_mid_dim = bottom_width * bottom_width * conv_ch // 2 fast_mid_dim = bottom_width * bottom_width * conv_ch // 2 super(VideoGeneratorInitUniform, self).__init__() w = chainer.initializers.Uniform(wscale) with self.init_scope(): self.l0s = L.Linear(z_slow_dim, slow_mid_dim, initialW=w, nobias=True) self.l0f = L.Linear(z_fast_dim, fast_mid_dim, initialW=w, nobias=True) self.dc1 = L.Deconvolution2D(conv_ch, conv_ch // 2, 4, 2, 1, initialW=w, nobias=True) self.dc2 = L.Deconvolution2D(conv_ch // 2, conv_ch // 4, 4, 2, 1, initialW=w, nobias=True) self.dc3 = L.Deconvolution2D(conv_ch // 4, conv_ch // 8, 4, 2, 1, initialW=w, nobias=True) self.dc4 = L.Deconvolution2D(conv_ch // 8, conv_ch // 16, 4, 2, 1, initialW=w, nobias=True) self.dc5 = L.Deconvolution2D(conv_ch // 16, out_channels, 3, 1, 1, initialW=w, nobias=False) self.bn0s = L.BatchNormalization(slow_mid_dim) self.bn0f = L.BatchNormalization(fast_mid_dim) self.bn1 = L.BatchNormalization(conv_ch // 2) self.bn2 = L.BatchNormalization(conv_ch // 4) self.bn3 = L.BatchNormalization(conv_ch // 8) self.bn4 = L.BatchNormalization(conv_ch // 16)
Example #5
Source File: net.py From chainer-fast-neuralstyle with MIT License | 6 votes |
def __init__(self): super(FastStyleNet, self).__init__( c1=L.Convolution2D(3, 32, 9, stride=1, pad=4), c2=L.Convolution2D(32, 64, 4, stride=2, pad=1), c3=L.Convolution2D(64, 128, 4,stride=2, pad=1), r1=ResidualBlock(128, 128), r2=ResidualBlock(128, 128), r3=ResidualBlock(128, 128), r4=ResidualBlock(128, 128), r5=ResidualBlock(128, 128), d1=L.Deconvolution2D(128, 64, 4, stride=2, pad=1), d2=L.Deconvolution2D(64, 32, 4, stride=2, pad=1), d3=L.Deconvolution2D(32, 3, 9, stride=1, pad=4), b1=L.BatchNormalization(32), b2=L.BatchNormalization(64), b3=L.BatchNormalization(128), b4=L.BatchNormalization(64), b5=L.BatchNormalization(32), )
Example #6
Source File: net.py From pixcaler with MIT License | 6 votes |
def __init__(self, ch0, ch1, bn=True, sample='down', activation=F.relu, dropout=False): self.bn = bn self.activation = activation self.dropout = dropout layers = {} w = chainer.initializers.Normal(0.02) if sample=='down': layers['c'] = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w) elif sample=='up': layers['c'] = L.Deconvolution2D(ch0, ch1, 4, 2, 1, initialW=w) elif sample=='up-nn': layers['c'] = NNConvolution2D(ch0, ch1, 2, 3, 1, 1, initialW=w) elif sample=='none': layers['c'] = L.Convolution2D(ch0, ch1, 3, 1, 1, initialW=w) elif sample=='none-5': layers['c'] = L.Convolution2D(ch0, ch1, 5, 1, 2, initialW=w) else: assert False, 'unknown sample {}'.format(sample) if bn: layers['batchnorm'] = L.BatchNormalization(ch1) super(CBR, self).__init__(**layers)
Example #7
Source File: net.py From chainer-gan-lib with MIT License | 6 votes |
def __init__(self, n_hidden=128, bottom_width=4, ch=512, wscale=0.02): super(Generator, self).__init__() self.n_hidden = n_hidden self.ch = ch self.bottom_width = bottom_width with self.init_scope(): w = chainer.initializers.Normal(wscale) self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch, initialW=w) self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w) self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w) self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w) self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w) self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch) self.bn1 = L.BatchNormalization(ch // 2) self.bn2 = L.BatchNormalization(ch // 4) self.bn3 = L.BatchNormalization(ch // 8)
Example #8
Source File: net.py From chainer-gan-lib with MIT License | 6 votes |
def __init__(self, n_hidden=128, bottom_width=4, ch=512, wscale=0.02, z_distribution="uniform", hidden_activation=F.relu, output_activation=F.tanh, use_bn=True): super(DCGANGenerator, self).__init__() self.n_hidden = n_hidden self.ch = ch self.bottom_width = bottom_width self.z_distribution = z_distribution self.hidden_activation = hidden_activation self.output_activation = output_activation self.use_bn = use_bn with self.init_scope(): w = chainer.initializers.Normal(wscale) self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch, initialW=w) self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w) self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w) self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w) self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w) if self.use_bn: self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch) self.bn1 = L.BatchNormalization(ch // 2) self.bn2 = L.BatchNormalization(ch // 4) self.bn3 = L.BatchNormalization(ch // 8)
Example #9
Source File: test_deconvolution_2d.py From chainer with MIT License | 6 votes |
def create_link(self, initializers): initialW, initial_bias = initializers if self.nobias: link = L.Deconvolution2D( self.in_channels, self.out_channels, self.ksize, stride=self.stride, pad=self.pad, nobias=self.nobias, dilate=self.dilate, groups=self.groups, initialW=initialW) else: link = L.Deconvolution2D( self.in_channels, self.out_channels, self.ksize, stride=self.stride, pad=self.pad, nobias=self.nobias, dilate=self.dilate, groups=self.groups, initialW=initialW, initial_bias=initial_bias) return link
Example #10
Source File: test_caffe.py From chainer with MIT License | 6 votes |
def test_caffe_export_model(self): class Model(chainer.Chain): def __init__(self): super(Model, self).__init__() with self.init_scope(): self.l1 = L.Convolution2D(None, 1, 1, 1, 0, groups=1) self.b2 = L.BatchNormalization(1, eps=1e-2) self.l3 = L.Deconvolution2D(None, 1, 1, 1, 0, groups=1) self.l4 = L.Linear(None, 1) def forward(self, x): h = F.relu(self.l1(x)) h = self.b2(h) h = self.l3(h) return self.l4(h) assert_export_import_match(Model(), self.x)
Example #11
Source File: net.py From chainer with MIT License | 6 votes |
def __init__(self, n_hidden, bottom_width=4, ch=512, wscale=0.02): super(Generator, self).__init__() self.n_hidden = n_hidden self.ch = ch self.bottom_width = bottom_width with self.init_scope(): w = chainer.initializers.Normal(wscale) self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch, initialW=w) self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w) self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w) self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w) self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w) self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch) self.bn1 = L.BatchNormalization(ch // 2) self.bn2 = L.BatchNormalization(ch // 4) self.bn3 = L.BatchNormalization(ch // 8)
Example #12
Source File: net.py From chainer with MIT License | 6 votes |
def __init__(self, n_hidden, bottom_width=4, ch=512, wscale=0.02): super(Generator, self).__init__() self.n_hidden = n_hidden self.ch = ch self.bottom_width = bottom_width with self.init_scope(): w = chainer.initializers.Normal(wscale) self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch, initialW=w) self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w) self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w) self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w) self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w) self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch) self.bn1 = L.BatchNormalization(ch // 2) self.bn2 = L.BatchNormalization(ch // 4) self.bn3 = L.BatchNormalization(ch // 8)
Example #13
Source File: model.py From GP-GAN with MIT License | 6 votes |
def __init__(self, isize, nc, ngf, conv_init=None, bn_init=None): cngf, tisize = ngf // 2, 4 while tisize != isize: cngf = cngf * 2 tisize = tisize * 2 layers = [] # input is Z, going into a convolution layers.append(L.Deconvolution2D(None, cngf, ksize=4, stride=1, pad=0, initialW=conv_init, nobias=True)) layers.append(L.BatchNormalization(cngf, initial_gamma=bn_init)) layers.append(ReLU()) csize, cndf = 4, cngf while csize < isize // 2: layers.append(L.Deconvolution2D(None, cngf // 2, ksize=4, stride=2, pad=1, initialW=conv_init, nobias=True)) layers.append(L.BatchNormalization(cngf // 2, initial_gamma=bn_init)) layers.append(ReLU()) cngf = cngf // 2 csize = csize * 2 layers.append(L.Deconvolution2D(None, nc, ksize=4, stride=2, pad=1, initialW=conv_init, nobias=True)) layers.append(Tanh()) super(DCGAN_G, self).__init__(*layers)
Example #14
Source File: net.py From tensorboardX with MIT License | 6 votes |
def __init__(self, n_hidden, bottom_width=4, ch=512, wscale=0.02): super(Generator, self).__init__() self.n_hidden = n_hidden self.ch = ch self.bottom_width = bottom_width with self.init_scope(): w = chainer.initializers.Normal(wscale) self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch, initialW=w) self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w) self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w) self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w) self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w) self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch) self.bn1 = L.BatchNormalization(ch // 2) self.bn2 = L.BatchNormalization(ch // 4) self.bn3 = L.BatchNormalization(ch // 8)
Example #15
Source File: FCN_32s.py From ssai-cnn with MIT License | 6 votes |
def __init__(self): super(FCN_32s, self).__init__( conv1_1=L.Convolution2D(3, 64, 3, pad=100), conv1_2=L.Convolution2D(64, 64, 3), conv2_1=L.Convolution2D(64, 128, 3), conv2_2=L.Convolution2D(128, 128, 3), conv3_1=L.Convolution2D(128, 256, 3), conv3_2=L.Convolution2D(256, 256, 3), conv4_1=L.Convolution2D(256, 512, 3), conv4_2=L.Convolution2D(512, 512, 3), conv4_3=L.Convolution2D(512, 512, 3), conv5_1=L.Convolution2D(512, 512, 3), conv5_2=L.Convolution2D(512, 512, 3), conv5_3=L.Convolution2D(512, 512, 3), fc6=L.Convolution2D(512, 4096, 7), fc7=L.Convolution2D(4096, 4096, 1), score_fr=L.Convolution2D(4096, 21, 1), upsample=L.Deconvolution2D(21, 21, 64, 32), ) self.train = True
Example #16
Source File: fcn32s.py From Semantic-Segmentation-using-Adversarial-Networks with MIT License | 6 votes |
def __init__(self, n_class=21): self.train=True super(FCN32s, self).__init__( conv1_1=L.Convolution2D(3, 64, 3, stride=1, pad=100), conv1_2=L.Convolution2D(64, 64, 3, stride=1, pad=1), conv2_1=L.Convolution2D(64, 128, 3, stride=1, pad=1), conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1), conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1), conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1), conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1), conv4_1=L.Convolution2D(256, 512, 3, stride=1, pad=1), conv4_2=L.Convolution2D(512, 512, 3, stride=1, pad=1), conv4_3=L.Convolution2D(512, 512, 3, stride=1, pad=1), conv5_1=L.Convolution2D(512, 512, 3, stride=1, pad=1), conv5_2=L.Convolution2D(512, 512, 3, stride=1, pad=1), conv5_3=L.Convolution2D(512, 512, 3, stride=1, pad=1), fc6=L.Convolution2D(512, 4096, 7, stride=1, pad=0), fc7=L.Convolution2D(4096, 4096, 1, stride=1, pad=0), score_fr=L.Convolution2D(4096, n_class, 1, stride=1, pad=0, nobias=True, initialW=np.zeros((n_class, 4096, 1, 1))), upscore=L.Deconvolution2D(n_class, n_class, 64, stride=32, pad=0, nobias=True, initialW=f.bilinear_interpolation_kernel(n_class, n_class, ksize=64)), )
Example #17
Source File: mask_head.py From chainercv with MIT License | 5 votes |
def __init__(self, n_class, scales): super(MaskHead, self).__init__() initialW = HeNormal(1, fan_option='fan_out') with self.init_scope(): self.conv1 = Conv2DActiv(256, 3, pad=1, initialW=initialW) self.conv2 = Conv2DActiv(256, 3, pad=1, initialW=initialW) self.conv3 = Conv2DActiv(256, 3, pad=1, initialW=initialW) self.conv4 = Conv2DActiv(256, 3, pad=1, initialW=initialW) self.conv5 = L.Deconvolution2D( 256, 2, pad=0, stride=2, initialW=initialW) self.seg = L.Convolution2D(n_class, 1, pad=0, initialW=initialW) self._n_class = n_class self._scales = scales
Example #18
Source File: srcnn.py From waifu2x-chainer with MIT License | 5 votes |
def __init__(self, ch): super(UpResNet10, self).__init__() with self.init_scope(): self.conv_pre = L.Convolution2D(ch, 64, 3) self.res1 = ResBlock(64, 64, r=4, se=True) self.res2 = ResBlock(64, 64, r=4, se=True) self.res3 = ResBlock(64, 64, r=4, se=True) self.res4 = ResBlock(64, 64, r=4, se=True) self.res5 = ResBlock(64, 64, r=4, se=True) self.conv_bridge = L.Convolution2D(64, 64, 3) self.conv_post = L.Deconvolution2D(64, ch, 4, 2, 3, nobias=True) self.ch = ch self.offset = 26 self.inner_scale = 2
Example #19
Source File: models.py From chainer-wasserstein-gan with MIT License | 5 votes |
def __init__(self): super().__init__( dc1=L.Deconvolution2D(None, 256, 4, stride=1, pad=0, nobias=True), dc2=L.Deconvolution2D(256, 128, 4, stride=2, pad=1, nobias=True), dc3=L.Deconvolution2D(128, 64, 4, stride=2, pad=1, nobias=True), dc4=L.Deconvolution2D(64, 3, 4, stride=2, pad=1, nobias=True), bn_dc1=L.BatchNormalization(256), bn_dc2=L.BatchNormalization(128), bn_dc3=L.BatchNormalization(64) )
Example #20
Source File: srcnn.py From waifu2x-chainer with MIT License | 5 votes |
def __init__(self, ch): super(UpConv7, self).__init__() with self.init_scope(): self.conv1 = L.Convolution2D(ch, 16, 3) self.conv2 = L.Convolution2D(16, 32, 3) self.conv3 = L.Convolution2D(32, 64, 3) self.conv4 = L.Convolution2D(64, 128, 3) self.conv5 = L.Convolution2D(128, 256, 3) self.conv6 = L.Convolution2D(256, 256, 3) self.conv7 = L.Deconvolution2D(256, ch, 4, 2, 3, nobias=True) self.ch = ch self.offset = 14 self.inner_scale = 2
Example #21
Source File: pggan.py From chainer-stylegan with MIT License | 5 votes |
def __init__(self, in_ch, out_ch, ksize, stride, pad, nobias=False, gain=np.sqrt(2), lrmul=1): w = chainer.initializers.Normal(1.0/lrmul) # equalized learning rate self.inv_c = gain * np.sqrt(1.0 / (in_ch)) self.inv_c = self.inv_c * lrmul super(EqualizedDeconv2d, self).__init__() with self.init_scope(): self.c = L.Deconvolution2D(in_ch, out_ch, ksize, stride, pad, initialW=w, nobias=nobias)
Example #22
Source File: mask_rcnn_resnet.py From chainer-mask-rcnn with MIT License | 5 votes |
def __init__(self, n_layers, n_class, roi_size, spatial_scale, pretrained_model='auto', res_initialW=None, loc_initialW=None, score_initialW=None, mask_initialW=None, pooling_func=functions.roi_align_2d, ): # n_class includes the background super(ResNetRoIHead, self).__init__() with self.init_scope(): self.res5 = BuildingBlock( 3, 1024, 512, 2048, stride=roi_size // 7, initialW=res_initialW) self.cls_loc = L.Linear(2048, n_class * 4, initialW=loc_initialW) self.score = L.Linear(2048, n_class, initialW=score_initialW) # 7 x 7 x 2048 -> 14 x 14 x 256 self.deconv6 = L.Deconvolution2D( 2048, 256, 2, stride=2, initialW=mask_initialW) # 14 x 14 x 256 -> 14 x 14 x 20 n_fg_class = n_class - 1 self.mask = L.Convolution2D( 256, n_fg_class, 1, initialW=mask_initialW) self.n_class = n_class self.roi_size = roi_size self.spatial_scale = spatial_scale self.pooling_func = pooling_func _convert_bn_to_affine(self) if pretrained_model == 'auto': self._copy_imagenet_pretrained_resnet(n_layers) else: assert pretrained_model is None, \ 'Unsupported pretrained_model: {}'.format(pretrained_model)
Example #23
Source File: vgg.py From chainer-visualization with MIT License | 5 votes |
def check_add_deconv_layers(self, nobias=True): """Add a deconvolutional layer for each convolutional layer already defined in the network.""" if len(self.deconv_blocks) == len(self.conv_blocks): return for conv_block in self.conv_blocks: deconv_block = [] for conv in conv_block: out_channels, in_channels, kh, kw = conv.W.data.shape if isinstance(conv.W.data, cuda.ndarray): initialW = cuda.cupy.asnumpy(conv.W.data) else: initialW = conv.W.data deconv = L.Deconvolution2D(out_channels, in_channels, (kh, kw), stride=conv.stride, pad=conv.pad, initialW=initialW, nobias=nobias) if isinstance(conv.W.data, cuda.ndarray): deconv.to_gpu() self.add_link('de{}'.format(conv.name), deconv) deconv_block.append(deconv) self.deconv_blocks.append(deconv_block)
Example #24
Source File: rec_multibp_resnet.py From nips17-adversarial-attack with MIT License | 5 votes |
def __init__(self, in_ch_dec, in_ch_enc, out_ch): super(UpBlock, self).__init__() with self.init_scope(): self.d0 = L.Deconvolution2D(in_ch_dec, out_ch, 2, 2) self.b0 = L.BatchNormalization(out_ch) self.c1 = L.Convolution2D(out_ch + in_ch_enc, out_ch, 3, 1, 1) self.b1 = L.BatchNormalization(out_ch) self.c2 = L.Convolution2D(out_ch, out_ch, 3, 1, 1) self.b2 = L.BatchNormalization(out_ch)
Example #25
Source File: fcn16s.py From Semantic-Segmentation-using-Adversarial-Networks with MIT License | 5 votes |
def __init__(self, n_class=21): self.train=True super(FCN16s, self).__init__( conv1_1=L.Convolution2D(3, 64, 3, stride=1, pad=100), conv1_2=L.Convolution2D(64, 64, 3, stride=1, pad=1), conv2_1=L.Convolution2D(64, 128, 3, stride=1, pad=1), conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1), conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1), conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1), conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1), conv4_1=L.Convolution2D(256, 512, 3, stride=1, pad=1), conv4_2=L.Convolution2D(512, 512, 3, stride=1, pad=1), conv4_3=L.Convolution2D(512, 512, 3, stride=1, pad=1), conv5_1=L.Convolution2D(512, 512, 3, stride=1, pad=1), conv5_2=L.Convolution2D(512, 512, 3, stride=1, pad=1), conv5_3=L.Convolution2D(512, 512, 3, stride=1, pad=1), fc6=L.Convolution2D(512, 4096, 7, stride=1, pad=0), fc7=L.Convolution2D(4096, 4096, 1, stride=1, pad=0), score_fr=L.Convolution2D(4096, n_class, 1, stride=1, pad=0, nobias=True, initialW=np.zeros((n_class, 4096, 1, 1))), score_pool4=L.Convolution2D(512, n_class, 1, stride=1, pad=0, nobias=True, initialW=np.zeros((n_class, 512, 1, 1))), upscore2=L.Deconvolution2D(n_class, n_class, 4, stride=2, nobias=True, initialW=f.bilinear_interpolation_kernel(n_class, n_class, ksize=4), use_cudnn=False), upscore16=L.Deconvolution2D(n_class, n_class, 32, stride=16, nobias=True, initialW=f.bilinear_interpolation_kernel(n_class, n_class, ksize=32), use_cudnn=False), )
Example #26
Source File: net.py From chainer-gan-lib with MIT License | 5 votes |
def __init__(self, ch=512, wscale=0.02): w = chainer.initializers.Normal(wscale) self.ch = ch super(Discriminator, self).__init__() with self.init_scope(): self.c0 = L.Convolution2D(3, ch // 8, 3, 1, 1, initialW=w) self.c1 = L.Convolution2D(ch // 8, ch // 4, 4, 2, 1, initialW=w) self.c2 = L.Convolution2D(ch // 4, ch // 2, 4, 2, 1, initialW=w) self.c3 = L.Convolution2D(ch // 2, ch // 1, 4, 2, 1, initialW=w) self.l4 = L.Linear(4*4*ch, 128, initialW=w) self.l5 = L.Linear(128, 4*4*ch, initialW=w) self.dc3 = L.Deconvolution2D(ch // 1, ch // 2, 4, 2, 1, initialW=w) self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w) self.dc1 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w) self.dc0 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w)
Example #27
Source File: net.py From chainer-gan-lib with MIT License | 5 votes |
def __init__(self, in_ch, out_ch, ksize, stride, pad): w = chainer.initializers.Normal(1.0) # equalized learning rate self.inv_c = np.sqrt(2.0/(in_ch)) super(EqualizedDeconv2d, self).__init__() with self.init_scope(): self.c = L.Deconvolution2D(in_ch, out_ch, ksize, stride, pad, initialW=w)
Example #28
Source File: common.py From imgclsmob with MIT License | 5 votes |
def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate=1, groups=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: F.relu), **kwargs): super(DeconvBlock, self).__init__(**kwargs) self.activate = (activation is not None) self.use_bn = use_bn with self.init_scope(): self.conv = L.Deconvolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=(not use_bias), dilate=dilate, groups=groups) if self.use_bn: self.bn = L.BatchNormalization( size=out_channels, eps=bn_eps) if self.activate: self.activ = get_activation_layer(activation)
Example #29
Source File: mask_refine.py From models with MIT License | 5 votes |
def __init__(self): super(MaskRefine, self).__init__() with self.init_scope(): self.v0 = chainer.Sequential( Conv2DActiv(64, 16, ksize=3, pad=1), Conv2DActiv(16, 4, ksize=3, pad=1), ) self.v1 = chainer.Sequential( Conv2DActiv(256, 64, ksize=3, pad=1), Conv2DActiv(64, 16, ksize=3, pad=1), ) self.v2 = chainer.Sequential( Conv2DActiv(512, 128, ksize=3, pad=1), Conv2DActiv(128, 32, ksize=3, pad=1), ) self.h2 = chainer.Sequential( Conv2DActiv(32, 32, ksize=3, pad=1), Conv2DActiv(32, 32, ksize=3, pad=1), ) self.h1 = chainer.Sequential( Conv2DActiv(16, 16, ksize=3, pad=1), Conv2DActiv(16, 16, ksize=3, pad=1), ) self.h0 = chainer.Sequential( Conv2DActiv(4, 4, ksize=3, pad=1), Conv2DActiv(4, 4, ksize=3, pad=1), ) self.deconv = L.Deconvolution2D(256, 32, ksize=15, stride=15) self.post0 = L.Convolution2D(32, 16, ksize=3, pad=1) self.post1 = L.Convolution2D(16, 4, ksize=3, pad=1) self.post2 = L.Convolution2D(4, 1, ksize=3, pad=1)
Example #30
Source File: net.py From pfio with MIT License | 5 votes |
def __init__(self, ch0, ch1, use_bn=True, sample='down', activation=F.relu, dropout=False): self.use_bn = use_bn self.activation = activation self.dropout = dropout w = chainer.initializers.Normal(0.02) super(ConvBNR, self).__init__() with self.init_scope(): if sample == 'down': self.c = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w) else: self.c = L.Deconvolution2D(ch0, ch1, 4, 2, 1, initialW=w) if use_bn: self.bn = L.BatchNormalization(ch1)