Python chainer.functions.deconvolution_2d() Examples
The following are 15
code examples of chainer.functions.deconvolution_2d().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.functions
, or try the search function
.
Example #1
Source File: test_deconvolution_nd.py From chainer with MIT License | 6 votes |
def check_forward_consistency_regression(self, backend_config): inputs = self.generate_inputs() if self.nobias: x, W = inputs b = None else: x, W, b = inputs x = chainer.Variable(backend_config.get_array(x)) W = chainer.Variable(backend_config.get_array(W)) if b is not None: b = chainer.Variable(backend_config.get_array(b)) use_cudnn = backend_config.use_cudnn with chainer.using_config('use_cudnn', use_cudnn): y_nd = F.deconvolution_nd(x, W, b, stride=self.stride, pad=self.pad, outsize=self.outsize, dilate=self.dilate) y_2d = F.deconvolution_2d(x, W, b, stride=self.stride, pad=self.pad, outsize=self.outsize, dilate=self.dilate) testing.assert_allclose( y_nd.array, y_2d.array, **self.check_forward_options)
Example #2
Source File: test_deconvolution_2d.py From chainer with MIT License | 6 votes |
def forward_expected(self, inputs): """ Current forward_expected implementation depends on F.deconvolution_2d itself and thus it's only capable of checking consistency between backends, not absolute correctness of computations """ if self.nobias: x, W = inputs b = None else: x, W, b = inputs y_expected = F.deconvolution_2d( x, W, b, stride=self.stride, pad=self.pad, outsize=self.outsize, dilate=self.dilate, groups=self.groups) return y_expected.array,
Example #3
Source File: visual_backprop.py From see with GNU General Public License v3.0 | 6 votes |
def scale_layer(self, feature_map, node): input_data = node.inputs[0].data _, _, in_height, in_width = input_data.shape _, _, feature_height, feature_width = feature_map.shape kernel_height = in_height + 2 * node.ph - node.sy * (feature_height - 1) kernel_width = in_width + 2 * node.pw - node.sx * (feature_width - 1) scaled_feature = F.deconvolution_2d( feature_map, self.xp.ones((1, 1, kernel_height, kernel_width)), stride=(node.sy, node.sx), pad=(node.ph, node.pw), outsize=(in_height, in_width), ) averaged_feature_map = F.average(input_data, axis=1, keepdims=True) feature_map = scaled_feature * averaged_feature_map return feature_map
Example #4
Source File: visual_backprop.py From kiss with GNU General Public License v3.0 | 6 votes |
def scale_layer(self, feature_map, node): input_data = node.inputs[0].data _, _, in_height, in_width = input_data.shape _, _, feature_height, feature_width = feature_map.shape kernel_height = in_height + 2 * node.ph - node.sy * (feature_height - 1) kernel_width = in_width + 2 * node.pw - node.sx * (feature_width - 1) scaled_feature = F.deconvolution_2d( feature_map, self.xp.ones((1, 1, kernel_height, kernel_width)), stride=(node.sy, node.sx), pad=(node.ph, node.pw), outsize=(in_height, in_width), ) averaged_feature_map = F.average(input_data, axis=1, keepdims=True) feature_map = scaled_feature * averaged_feature_map return feature_map
Example #5
Source File: gen_extra_test.py From chainer-compiler with MIT License | 5 votes |
def gen_convtranspose_bn(test_name): gb = onnx_script.GraphBuilder(test_name) bsize = 2 ichan = 3 ochan = 4 ksize = 3 isize = 7 x = aranges(bsize, ochan, isize, isize) w = aranges(ochan, ichan, ksize, ksize) * 0.01 scale = aranges(ichan) * 0.1 + 1 bias = aranges(ichan) * 0.1 + 2 mean = aranges(ichan) * 0.1 + 3 var = aranges(ichan) * 0.1 + 4 conv = F.deconvolution_2d(x, w, pad=1, outsize=(isize, isize)) y = F.fixed_batch_normalization(conv, scale, bias, mean, var) x_v = gb.input('x', x) w_v = gb.param('w', w) scale_v = gb.param('scale', scale) bias_v = gb.param('bias', bias) mean_v = gb.param('mean', mean) var_v = gb.param('var', var) conv_v = gb.ConvTranspose([x_v, w_v], kernel_shape=[ksize, ksize], pads=[1, 1, 1, 1], output_shape=[isize, isize]) y_v = gb.BatchNormalization([conv_v, scale_v, bias_v, mean_v, var_v]) gb.output(y_v, y) gb.gen_test()
Example #6
Source File: test_deconvolution_2d.py From chainer with MIT License | 5 votes |
def forward_expected(self, link, inputs): x, = inputs W = link.W if self.nobias: y = F.deconvolution_2d( x, W, stride=self.stride, pad=self.pad, dilate=self.dilate, groups=self.groups) else: b = link.b y = F.deconvolution_2d( x, W, b, stride=self.stride, pad=self.pad, dilate=self.dilate, groups=self.groups) return y.array,
Example #7
Source File: test_deconvolution_2d.py From chainer with MIT License | 5 votes |
def forward(self, inputs, device): if self.nobias: x, W = inputs b = None else: x, W, b = inputs y = F.deconvolution_2d( x, W, b, stride=self.stride, pad=self.pad, outsize=self.outsize, dilate=self.dilate, groups=self.groups) return y,
Example #8
Source File: test_deconvolution_2d.py From chainer with MIT License | 5 votes |
def forward(self): x = chainer.Variable(self.x) W = chainer.Variable(self.W) return F.deconvolution_2d(x, W, None, stride=1, pad=1, groups=self.groups)
Example #9
Source File: test_deconvolution_2d.py From chainer with MIT License | 5 votes |
def check_invalid_dilation(self, x_data, w_data): x = chainer.Variable(x_data) w = chainer.Variable(w_data) F.deconvolution_2d(x, w, dilate=self.dilate)
Example #10
Source File: net.py From chainer-gan-lib with MIT License | 5 votes |
def backward_convolution(x_in, x, l): y = F.deconvolution_2d(x, l.W, None, l.stride, l.pad, (x_in.data.shape[2], x_in.data.shape[3])) return y
Example #11
Source File: backwards.py From chainer-gan-experiments with MIT License | 5 votes |
def backward_convolution(x_in, x, l): y = F.deconvolution_2d(x, l.W, None, l.stride, l.pad, None)#(x_in.data.shape[2], x_in.data.shape[3])) return y
Example #12
Source File: gpu_test_deconv2d.py From deep-learning-from-scratch-3 with MIT License | 5 votes |
def test_forward1(self): n, c_i, c_o = 10, 1, 3 h_i, w_i = 5, 10 h_k, w_k = 10, 10 h_p, w_p = 5, 5 s_y, s_x = 5, 5 x = np.random.uniform(0, 1, (n, c_i, h_i, w_i)).astype(np.float32) W = np.random.uniform(0, 1, (c_i, c_o, h_k, w_k)).astype(np.float32) b = np.random.uniform(0, 1, c_o).astype(np.float32) expected = CF.deconvolution_2d(x, W, b, stride=(s_y, s_x), pad=(h_p, w_p)) y = F.deconv2d(x, W, b, stride=(s_y, s_x), pad=(h_p, w_p)) self.assertTrue(array_allclose(expected.data, y.data))
Example #13
Source File: gpu_test_deconv2d.py From deep-learning-from-scratch-3 with MIT License | 5 votes |
def test_forward2(self): n, c_i, c_o = 10, 1, 3 h_i, w_i = 5, 10 h_k, w_k = 10, 10 h_p, w_p = 5, 5 s_y, s_x = 5, 5 x = np.random.uniform(0, 1, (n, c_i, h_i, w_i)).astype(np.float32) W = np.random.uniform(0, 1, (c_i, c_o, h_k, w_k)).astype(np.float32) b = None expected = CF.deconvolution_2d(x, W, b, stride=(s_y, s_x), pad=(h_p, w_p)) y = F.deconv2d(x, W, b, stride=(s_y, s_x), pad=(h_p, w_p)) self.assertTrue(array_allclose(expected.data, y.data))
Example #14
Source File: test_deconv2d.py From deep-learning-from-scratch-3 with MIT License | 5 votes |
def test_forward1(self): n, c_i, c_o = 10, 1, 3 h_i, w_i = 5, 10 h_k, w_k = 10, 10 h_p, w_p = 5, 5 s_y, s_x = 5, 5 x = np.random.uniform(0, 1, (n, c_i, h_i, w_i)).astype(np.float32) W = np.random.uniform(0, 1, (c_i, c_o, h_k, w_k)).astype(np.float32) b = np.random.uniform(0, 1, c_o).astype(np.float32) expected = CF.deconvolution_2d(x, W, b, stride=(s_y, s_x), pad=(h_p, w_p)) y = F.deconv2d(x, W, b, stride=(s_y, s_x), pad=(h_p, w_p)) self.assertTrue(array_allclose(expected.data, y.data))
Example #15
Source File: test_deconv2d.py From deep-learning-from-scratch-3 with MIT License | 5 votes |
def test_forward2(self): n, c_i, c_o = 10, 1, 3 h_i, w_i = 5, 10 h_k, w_k = 10, 10 h_p, w_p = 5, 5 s_y, s_x = 5, 5 x = np.random.uniform(0, 1, (n, c_i, h_i, w_i)).astype(np.float32) W = np.random.uniform(0, 1, (c_i, c_o, h_k, w_k)).astype(np.float32) b = None expected = CF.deconvolution_2d(x, W, b, stride=(s_y, s_x), pad=(h_p, w_p)) y = F.deconv2d(x, W, b, stride=(s_y, s_x), pad=(h_p, w_p)) self.assertTrue(array_allclose(expected.data, y.data))