Python chainer.functions.pad() Examples
The following are 30
code examples of chainer.functions.pad().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.functions
, or try the search function
.
Example #1
Source File: chainer_functions.py From chainer-compiler with MIT License | 6 votes |
def infer_return(self, x_type, ksize, stride, pad): pad = make_pair(pad) ksize = make_pair(ksize) stride = make_pair(stride) shape_0 = x_type.shape[0] shape_1 = x_type.shape[1] if self.cover_all: shape_2 = math.ceil((x_type.shape[2] + pad[0] * 2 - ksize[0]) / stride[0]) + 1 shape_3 = math.ceil((x_type.shape[3] + pad[1] * 2 - ksize[1]) / stride[1]) + 1 else: shape_2 = (x_type.shape[2] + pad[0] * 2 - ksize[0]) // stride[0] + 1 shape_3 = (x_type.shape[3] + pad[1] * 2 - ksize[1]) // stride[1] + 1 return TyChainerVariable(x_type.dtype, shape=(shape_0, shape_1, shape_2, shape_3))
Example #2
Source File: mask_refine.py From models with MIT License | 6 votes |
def forward(self, f, corr_feature, pos): p0 = F.pad(f[0], ((0, 0), (0, 0), (16, 16), (16, 16)), 'constant') p0 = p0[:, :, 4*pos[0]:4*pos[0]+61, 4*pos[1]:4*pos[1]+61] p1 = F.pad(f[1], ((0, 0), (0, 0), (8, 8), (8, 8)), 'constant') p1 = p1[:, :, 2*pos[0]:2*pos[0]+31, 2*pos[1]:2*pos[1]+31] p2 = F.pad(f[2], ((0, 0), (0, 0), (4, 4), (4, 4)), 'constant') p2 = p2[:, :, pos[0]:pos[0]+15, pos[1]:pos[1]+15] p3 = corr_feature[:, :, pos[0], pos[1]].reshape((-1, 256, 1, 1)) out = self.deconv(p3) # NOTE: In the original Torch, resize_images uses 'nearest' interpolation out = self.h2(out) + self.v2(p2) out = self.post0(resize_images( out, (31, 31), align_corners=False, mode='nearest')) out = self.h1(out) + self.v1(p1) out = self.post1( resize_images(out, (61, 61), align_corners=False, mode='nearest')) out = self.h0(out) + self.v0(p0) out = self.post2( resize_images(out, (127, 127), align_corners=False, mode='nearest')) return out.reshape((-1, 127 ** 2))
Example #3
Source File: nasnet.py From imgclsmob with MIT License | 6 votes |
def dws_branch_k5_s1_p2(in_channels, out_channels, extra_padding=False): """ 5x5/1/2 version of the NASNet specific depthwise separable convolution branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. extra_padding : bool, default False Whether to use extra padding. """ return DwsBranch( in_channels=in_channels, out_channels=out_channels, ksize=5, stride=1, pad=2, extra_padding=extra_padding)
Example #4
Source File: nasnet.py From imgclsmob with MIT License | 6 votes |
def dws_branch_k3_s1_p1(in_channels, out_channels, extra_padding=False): """ 3x3/1/1 version of the NASNet specific depthwise separable convolution branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. extra_padding : bool, default False Whether to use extra padding. """ return DwsBranch( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=1, pad=1, extra_padding=extra_padding)
Example #5
Source File: nasnet.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, ksize, stride, pad, extra_padding=False): super(NasDwsConv, self).__init__() self.extra_padding = extra_padding with self.init_scope(): self.activ = F.relu self.conv = DwsConv( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, use_bias=False) self.bn = nasnet_batch_norm(channels=out_channels)
Example #6
Source File: nasnet.py From imgclsmob with MIT License | 6 votes |
def nas_conv1x1(in_channels, out_channels): """ 1x1 version of the NASNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ return NasConv( in_channels=in_channels, out_channels=out_channels, ksize=1, stride=1, pad=0, groups=1)
Example #7
Source File: nasnet.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, ksize, stride, pad, groups): super(NasConv, self).__init__() with self.init_scope(): self.activ = F.relu self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=True, groups=groups) self.bn = nasnet_batch_norm(channels=out_channels)
Example #8
Source File: nasnet.py From imgclsmob with MIT License | 6 votes |
def process_with_padding(x, process=(lambda x: x), pad_width=((0, 0), (0, 0), (1, 0), (1, 0))): """ Auxiliary decorator for layer with NASNet specific extra padding. Parameters: ---------- x : chainer.Variable or numpy.ndarray or cupy.ndarray Input tensor. process : function, default (lambda x: x) a decorated layer pad_width : tuple of tuple of int, default ((0, 0), (0, 0), (1, 0), (1, 0)) Whether the layer uses a bias vector. Returns ------- chainer.Variable or numpy.ndarray or cupy.ndarray Resulted tensor. """ x = F.pad(x, pad_width=pad_width, mode="constant", constant_values=0) x = process(x) x = x[:, :, 1:, 1:] return x
Example #9
Source File: pyramidnet.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels): super(PyrInitBlock, self).__init__() with self.init_scope(): self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=7, stride=2, pad=3, nobias=True) self.bn = L.BatchNormalization( size=out_channels, eps=1e-5) self.activ = F.relu self.pool = partial( F.max_pooling_2d, ksize=3, stride=2, pad=1, cover_all=False)
Example #10
Source File: wrn1bit_cifar.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, stride, binarized=False): super(PreResUnit1bit, self).__init__() self.resize_identity = (stride != 1) with self.init_scope(): self.body = PreResBlock1bit( in_channels=in_channels, out_channels=out_channels, stride=stride, binarized=binarized) if self.resize_identity: self.identity_pool = partial( F.average_pooling_2d, ksize=3, stride=2, pad=1)
Example #11
Source File: efficientnet.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, bn_eps, activation, tf_mode): super(EffiInitBlock, self).__init__() self.tf_mode = tf_mode with self.init_scope(): self.conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2, pad=(0 if tf_mode else 1), bn_eps=bn_eps, activation=activation)
Example #12
Source File: irevnet.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, stride, preactivate): super(IRevUnit, self).__init__() if not preactivate: in_channels = in_channels // 2 padding = 2 * (out_channels - in_channels) self.do_padding = (padding != 0) and (stride == 1) self.do_downscale = (stride != 1) with self.init_scope(): if self.do_padding: self.pad = IRevInjectivePad(padding) self.bottleneck = IRevBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, preactivate=preactivate) if self.do_downscale: self.psi = IRevDownscale(stride)
Example #13
Source File: chainer_functions.py From chainer-compiler with MIT License | 6 votes |
def __call__(self, ty_args, ty_kwargs): x_type, pad_width_type, mode_type = ty_args assert isinstance(mode_type, TyString), \ "chainer.functions.pad: mode_type should be string" self.check_type_forward(make_multiple_tc_variable(ty_args[:1], ('x',))) if lacks_value(pad_width_type): return TyChainerVariable(x_type.dtype, ndim=x_type.ndim) assert pad_width_type.size() > 0, \ "chainer.functions.pad: pad_width is not specified" pad_width = extract_value_from_ty(pad_width_type) if isinstance(pad_width, int): pad_width = make_pair(pad_width) if isinstance(pad_width[0], int): pad_width = pad_width * x_type.ndim for pad in pad_width: assert len(pad) == 2, "chainer.functions.pad: pad_width is invalid" return self.infer_return(x_type, pad_width)
Example #14
Source File: test_pad.py From chainer with MIT License | 5 votes |
def forward(self, inputs, device): x, = inputs y = functions.pad(x, self.pad_width, self.mode) return y,
Example #15
Source File: irevnet.py From imgclsmob with MIT License | 5 votes |
def __call__(self, x): return F.pad(x, pad_width=((0, 0), (0, self.padding), (0, 0), (0, 0)), mode="constant", constant_values=0)
Example #16
Source File: nasnet.py From imgclsmob with MIT License | 5 votes |
def __init__(self, in_channels, out_channels, ksize, stride, pad, extra_padding=False, stem=False): super(DwsBranch, self).__init__() assert (not stem) or (not extra_padding) mid_channels = out_channels if stem else in_channels with self.init_scope(): self.conv1 = NasDwsConv( in_channels=in_channels, out_channels=mid_channels, ksize=ksize, stride=stride, pad=pad, extra_padding=extra_padding) self.conv2 = NasDwsConv( in_channels=mid_channels, out_channels=out_channels, ksize=ksize, stride=1, pad=pad)
Example #17
Source File: chainer_functions.py From chainer-compiler with MIT License | 5 votes |
def infer_return(self, conv, x_type): ksize = make_pair(conv.ksize) stride = make_pair(conv.stride) pad = make_pair(conv.pad) dilate = make_pair(conv.dilate) shape_2 = get_conv_outsize( x_type.shape[2], ksize[0], stride[0], pad[0], d=dilate[0]) shape_3 = get_conv_outsize( x_type.shape[3], ksize[1], stride[1], pad[1], d=dilate[1]) ret_shape = (x_type.shape[0], conv.out_channels, shape_2, shape_3) return TyChainerVariable(x_type.dtype, shape=ret_shape)
Example #18
Source File: nasnet.py From imgclsmob with MIT License | 5 votes |
def dws_branch_k5_s2_p2(in_channels, out_channels, extra_padding=False, stem=False): """ 5x5/2/2 version of the NASNet specific depthwise separable convolution branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. extra_padding : bool, default False Whether to use extra padding. stem : bool, default False Whether to use squeeze reduction if False. """ return DwsBranch( in_channels=in_channels, out_channels=out_channels, ksize=5, stride=2, pad=2, extra_padding=extra_padding, stem=stem)
Example #19
Source File: nasnet.py From imgclsmob with MIT License | 5 votes |
def __init__(self, in_channels, out_channels): super(NASNetInitBlock, self).__init__() with self.init_scope(): self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=2, pad=0, nobias=True) self.bn = nasnet_batch_norm(channels=out_channels)
Example #20
Source File: ntsnet_cub.py From imgclsmob with MIT License | 5 votes |
def __call__(self, x): raw_pre_features = self.backbone(x) rpn_score = self.navigator_unit(raw_pre_features) rpn_score.to_cpu() all_cdds = [np.concatenate((y.reshape(-1, 1), self.edge_anchors.copy()), axis=1) for y in rpn_score.array] top_n_cdds = [hard_nms(y, top_n=self.top_n, iou_thresh=0.25) for y in all_cdds] top_n_cdds = np.array(top_n_cdds) top_n_index = top_n_cdds[:, :, -1].astype(np.int64) top_n_index = np.array(top_n_index, dtype=np.int64) top_n_prob = np.take_along_axis(rpn_score.array, top_n_index, axis=1) batch = x.shape[0] x_pad = F.pad(x, pad_width=self.pad_width, mode="constant", constant_values=0) part_imgs = [] for i in range(batch): for j in range(self.top_n): y0, x0, y1, x1 = tuple(top_n_cdds[i][j, 1:5].astype(np.int64)) x_res = F.resize_images( x_pad[i:i + 1, :, y0:y1, x0:x1], output_shape=(224, 224)) part_imgs.append(x_res) part_imgs = F.concat(tuple(part_imgs), axis=0) part_features = self.backbone_tail(self.backbone(part_imgs)) part_feature = part_features.reshape((batch, self.top_n, -1)) part_feature = part_feature[:, :self.num_cat, :] part_feature = part_feature.reshape((batch, -1)) raw_features = self.backbone_tail(raw_pre_features) concat_out = F.concat((part_feature, raw_features), axis=1) concat_logits = self.concat_net(concat_out) if self.aux: raw_logits = self.backbone_classifier(raw_features) part_logits = self.partcls_net(part_features).reshape((batch, self.top_n, -1)) return concat_logits, raw_logits, part_logits, top_n_prob else: return concat_logits
Example #21
Source File: tf_convolution_2d.py From chainercv with MIT License | 5 votes |
def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad='SAME', nobias=False, initialW=None, initial_bias=None, **kwargs): super(TFConvolution2D, self).__init__() if ksize is None: out_channels, ksize, in_channels = in_channels, out_channels, None if pad in ('SAME', 'VALID'): # TF compatible pad self.padding = lambda x: _tf_padding(x, _pair(self.conv.ksize), _pair(self.conv.stride), pad) conv_pad = 0 else: self.padding = None assert isinstance(pad, int) conv_pad = pad with self.init_scope(): self.conv = Convolution2D(in_channels, out_channels, ksize, stride, conv_pad, nobias, initialW, initial_bias, **kwargs)
Example #22
Source File: test_pad.py From chainer with MIT License | 5 votes |
def forward_expected(self, inputs): x, = inputs y_expected = numpy.pad(x, self.pad_width, self.mode) return y_expected.astype(self.dtype),
Example #23
Source File: test_pad.py From chainer with MIT License | 5 votes |
def forward_expected(self, inputs): x, = inputs y_expected = numpy.pad(x, self.pad_width, mode=self.mode, constant_values=self.constant_values) return y_expected,
Example #24
Source File: test_pad.py From chainer with MIT License | 5 votes |
def forward(self, inputs, device): x, = inputs y = functions.pad(x, self.pad_width, mode=self.mode, constant_values=self.constant_values) return y,
Example #25
Source File: mask_refine.py From models with MIT License | 5 votes |
def __init__(self): super(MaskRefine, self).__init__() with self.init_scope(): self.v0 = chainer.Sequential( Conv2DActiv(64, 16, ksize=3, pad=1), Conv2DActiv(16, 4, ksize=3, pad=1), ) self.v1 = chainer.Sequential( Conv2DActiv(256, 64, ksize=3, pad=1), Conv2DActiv(64, 16, ksize=3, pad=1), ) self.v2 = chainer.Sequential( Conv2DActiv(512, 128, ksize=3, pad=1), Conv2DActiv(128, 32, ksize=3, pad=1), ) self.h2 = chainer.Sequential( Conv2DActiv(32, 32, ksize=3, pad=1), Conv2DActiv(32, 32, ksize=3, pad=1), ) self.h1 = chainer.Sequential( Conv2DActiv(16, 16, ksize=3, pad=1), Conv2DActiv(16, 16, ksize=3, pad=1), ) self.h0 = chainer.Sequential( Conv2DActiv(4, 4, ksize=3, pad=1), Conv2DActiv(4, 4, ksize=3, pad=1), ) self.deconv = L.Deconvolution2D(256, 32, ksize=15, stride=15) self.post0 = L.Convolution2D(32, 16, ksize=3, pad=1) self.post1 = L.Convolution2D(16, 4, ksize=3, pad=1) self.post2 = L.Convolution2D(4, 1, ksize=3, pad=1)
Example #26
Source File: chainer_functions.py From chainer-compiler with MIT License | 5 votes |
def __call__(self, ty_args, ty_kwargs): self.check_type_forward(make_multiple_tc_variable(ty_args, ('x', 'ksize'))) x_type, ksize_type = ty_args ksize = extract_value_from_ty(ksize_type) stride, _ = get_kwarg(ty_kwargs, 'stride', default=ksize) pad, _ = get_kwarg(ty_kwargs, 'pad', default=0) if self.cover_all is None: self.cover_all, _ = get_kwarg(ty_kwargs, 'cover_all', default=True) return self.infer_return(x_type, ksize, stride, pad)
Example #27
Source File: tf_convolution_2d.py From chainercv with MIT License | 5 votes |
def _get_pad(in_size, ksize, stride, tf_padding): if tf_padding == 'SAME': tf_out_size = int(np.ceil(float(in_size) / stride)) elif tf_padding == 'VALID': tf_out_size = int(np.ceil(float(in_size - ksize + 1) / stride)) pad = int(stride * tf_out_size - in_size + ksize - stride) assert conv.get_conv_outsize(in_size + pad, ksize, stride, 0) == tf_out_size return pad
Example #28
Source File: tf_convolution_2d.py From chainercv with MIT License | 5 votes |
def _tf_padding(x, ksize, stride, tf_padding): pad_2 = _get_pad(x.shape[2], ksize[0], stride[0], tf_padding) pad_3 = _get_pad(x.shape[3], ksize[1], stride[1], tf_padding) if pad_2 or pad_3: return pad( x, ((0, 0), (0, 0), (pad_2 // 2, int(np.ceil(float(pad_2) / 2))), (pad_3 // 2, int(np.ceil(float(pad_3) / 2)))), mode='constant') else: return x
Example #29
Source File: efficientnet.py From imgclsmob with MIT License | 5 votes |
def __call__(self, x): if self.residual: identity = x x = self.conv1(x) if self.tf_mode: x = F.pad(x, pad_width=calc_tf_padding(x, kernel_size=self.kernel_size, stride=self.stride), mode="constant", constant_values=0) x = self.conv2(x) if self.use_se: x = self.se(x) x = self.conv3(x) if self.residual: x = x + identity return x
Example #30
Source File: irevnet.py From imgclsmob with MIT License | 5 votes |
def __call__(self, x1, x2): if self.do_padding: x = F.concat((x1, x2), axis=1) x = self.pad(x) x1, x2 = F.split_axis(x, indices_or_sections=2, axis=1) fx2 = self.bottleneck(x2) if self.do_downscale: x1 = self.psi(x1) x2 = self.psi(x2) y1 = fx2 + x1 return x2, y1