Python chainer.links.BatchNormalization() Examples
The following are 30
code examples of chainer.links.BatchNormalization().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.links
, or try the search function
.
Example #1
Source File: polynet.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, ksize, stride, pad, num_blocks): super(PolyConv, self).__init__() with self.init_scope(): self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=True) for i in range(num_blocks): setattr(self, "bn{}".format(i + 1), L.BatchNormalization( size=out_channels, eps=1e-5)) self.activ = F.relu
Example #2
Source File: darts.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate): super(DartsDwsConv, self).__init__() with self.init_scope(): self.activ = F.relu self.conv = DwsConv( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, dilate=dilate, use_bias=False) self.bn = L.BatchNormalization( size=out_channels, eps=1e-5)
Example #3
Source File: darts.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, stride=2): super(DartsReduceBranch, self).__init__() assert (out_channels % 2 == 0) mid_channels = out_channels // 2 with self.init_scope(): self.activ = F.relu self.conv1 = conv1x1( in_channels=in_channels, out_channels=mid_channels, stride=stride) self.conv2 = conv1x1( in_channels=in_channels, out_channels=mid_channels, stride=stride) self.bn = L.BatchNormalization( size=out_channels, eps=1e-5)
Example #4
Source File: copy_param.py From chainer-stylegan with MIT License | 6 votes |
def soft_copy_param(target_link, source_link, tau, layers_in_use=None): """Soft-copy parameters of a link to another link.""" target_params = dict(target_link.namedparams()) for param_name, param in source_link.namedparams(): if layers_in_use is not None: skip = True for name in layers_in_use: if param_name.startswith(name): skip = False break if skip: continue target_params[param_name].data[:] *= (1 - tau) target_params[param_name].data[:] += tau * param.data # Soft-copy Batch Normalization's statistics target_links = dict(target_link.namedlinks()) for link_name, link in source_link.namedlinks(): if isinstance(link, L.BatchNormalization): target_bn = target_links[link_name] target_bn.avg_mean[:] *= (1 - tau) target_bn.avg_mean[:] += tau * link.avg_mean target_bn.avg_var[:] *= (1 - tau) target_bn.avg_var[:] += tau * link.avg_var
Example #5
Source File: xception.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, kernel_size, stride, pad, activate): super(DwsConvBlock, self).__init__() self.activate = activate with self.init_scope(): if self.activate: self.activ = F.relu self.conv = DwsConv( in_channels=in_channels, out_channels=out_channels, ksize=kernel_size, stride=stride, pad=pad) self.bn = L.BatchNormalization( size=out_channels, eps=1e-5)
Example #6
Source File: copy_param.py From chainerrl with MIT License | 6 votes |
def soft_copy_param(target_link, source_link, tau): """Soft-copy parameters of a link to another link.""" target_params = dict(target_link.namedparams()) for param_name, param in source_link.namedparams(): if target_params[param_name].array is None: raise TypeError( 'target_link parameter {} is None. Maybe the model params are ' 'not initialized.\nPlease try to forward dummy input ' 'beforehand to determine parameter shape of the model.'.format( param_name)) target_params[param_name].array[...] *= (1 - tau) target_params[param_name].array[...] += tau * param.array # Soft-copy Batch Normalization's statistics target_links = dict(target_link.namedlinks()) for link_name, link in source_link.namedlinks(): if isinstance(link, L.BatchNormalization): target_bn = target_links[link_name] target_bn.avg_mean[...] *= (1 - tau) target_bn.avg_mean[...] += tau * link.avg_mean target_bn.avg_var[...] *= (1 - tau) target_bn.avg_var[...] += tau * link.avg_var
Example #7
Source File: copy_param.py From chainerrl with MIT License | 6 votes |
def copy_param(target_link, source_link): """Copy parameters of a link to another link.""" target_params = dict(target_link.namedparams()) for param_name, param in source_link.namedparams(): if target_params[param_name].array is None: raise TypeError( 'target_link parameter {} is None. Maybe the model params are ' 'not initialized.\nPlease try to forward dummy input ' 'beforehand to determine parameter shape of the model.'.format( param_name)) target_params[param_name].array[...] = param.array # Copy Batch Normalization's statistics target_links = dict(target_link.namedlinks()) for link_name, link in source_link.namedlinks(): if isinstance(link, L.BatchNormalization): target_bn = target_links[link_name] target_bn.avg_mean[...] = link.avg_mean target_bn.avg_var[...] = link.avg_var
Example #8
Source File: block_1d.py From Deep_VoiceChanger with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=F.relu, mode='none', bn=True, dr=None): super(ResBlock, self).__init__() initializer = chainer.initializers.GlorotUniform() initializer_sc = chainer.initializers.GlorotUniform() self.activation = activation self.mode = _downsample if mode == 'down' else _upsample if mode == 'up' else None self.learnable_sc = in_channels != out_channels self.dr = dr self.bn = bn with self.init_scope(): self.c1 = L.Convolution1D(in_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn) self.c2 = L.Convolution1D(out_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn) if bn: self.b1 = L.BatchNormalization(out_channels) self.b2 = L.BatchNormalization(out_channels) if self.learnable_sc: self.c_sc = L.Convolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
Example #9
Source File: pyramidnet.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels): super(PyrInitBlock, self).__init__() with self.init_scope(): self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=7, stride=2, pad=3, nobias=True) self.bn = L.BatchNormalization( size=out_channels, eps=1e-5) self.activ = F.relu self.pool = partial( F.max_pooling_2d, ksize=3, stride=2, pad=1, cover_all=False)
Example #10
Source File: block.py From Deep_VoiceChanger with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=F.leaky_relu, mode='none', bn=False, dr=None): super(ResBlock, self).__init__() initializer = chainer.initializers.GlorotUniform() initializer_sc = chainer.initializers.GlorotUniform() self.activation = activation self.mode = _downsample if mode == 'down' else _upsample if mode == 'up' else None self.learnable_sc = in_channels != out_channels self.dr = dr self.bn = bn with self.init_scope(): self.c1 = L.Convolution2D(in_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn) self.c2 = L.Convolution2D(out_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn) if bn: self.b1 = L.BatchNormalization(out_channels) self.b2 = L.BatchNormalization(out_channels) if self.learnable_sc: self.c_sc = L.Convolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
Example #11
Source File: inceptionv3.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, ksize, stride, pad): super(InceptConv, self).__init__() with self.init_scope(): self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=True) self.bn = L.BatchNormalization( size=out_channels, eps=1e-3) self.activ = F.relu
Example #12
Source File: train_agent_chainer.py From gym-malware with MIT License | 6 votes |
def __init__(self, obs_size, n_actions, n_hidden_channels=[1024,256]): super(QFunction,self).__init__() net = [] inpdim = obs_size for i,n_hid in enumerate(n_hidden_channels): net += [ ('l{}'.format(i), L.Linear( inpdim, n_hid ) ) ] net += [ ('norm{}'.format(i), L.BatchNormalization( n_hid ) ) ] net += [ ('_act{}'.format(i), F.relu ) ] inpdim = n_hid net += [('output', L.Linear( inpdim, n_actions) )] with self.init_scope(): for n in net: if not n[0].startswith('_'): setattr(self, n[0], n[1]) self.forward = net
Example #13
Source File: darts.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, ksize, stride, pad, activate=True): super(DartsConv, self).__init__() self.activate = activate with self.init_scope(): if self.activate: self.activ = F.relu self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=True) self.bn = L.BatchNormalization( size=out_channels, eps=1e-5)
Example #14
Source File: rec_multibp_resnet.py From nips17-adversarial-attack with MIT License | 6 votes |
def __init__(self): super(Mix, self).__init__() enc_ch = [3, 64, 256, 512, 1024, 2048] ins_ch = [6, 128, 384, 640, 2176, 3072] self.conv = [None] * 6 self.bn = [None] * 6 for i in range(1, 6): c = L.Convolution2D(enc_ch[i] + ins_ch[i], enc_ch[i], 1, nobias=True) b = L.BatchNormalization(enc_ch[i]) self.conv[i] = c self.bn[i] = b self.add_link('c{}'.format(i), c) self.add_link('b{}'.format(i), b)
Example #15
Source File: rec_multibp_resnet.py From nips17-adversarial-attack with MIT License | 6 votes |
def __init__(self, out_ch): super(Decoder, self).__init__() with self.init_scope(): self.mix = Mix() self.bot1 = BottleNeckB(2048, 1024) self.bot2 = BottleNeckB(2048, 1024) self.bot3 = BottleNeckB(2048, 1024) self.b5 = UpBlock(2048, 1024, 1024) self.b4 = UpBlock(1024, 512, 512) self.b3 = UpBlock(512, 256, 256) self.b2 = UpBlock(256, 64, 128) self.b1 = UpBlock(128, 3 + (6 + 3 * 13), 64) self.last_b = L.BatchNormalization(64) self.last_c = L.Convolution2D(64, out_ch * 2, 1, nobias=True)
Example #16
Source File: shufflenet.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels): super(ShuffleInitBlock, self).__init__() with self.init_scope(): self.conv = conv3x3( in_channels=in_channels, out_channels=out_channels, stride=2) self.bn = L.BatchNormalization(size=out_channels) self.activ = F.relu self.pool = partial( F.max_pooling_2d, ksize=3, stride=2, pad=1, cover_all=False)
Example #17
Source File: shakeshakeresnet_cifar.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, stride): super(ShakeShakeShortcut, self).__init__() assert (out_channels % 2 == 0) mid_channels = out_channels // 2 with self.init_scope(): self.pool = partial( F.average_pooling_2d, ksize=1, stride=stride) self.conv1 = conv1x1( in_channels=in_channels, out_channels=mid_channels) self.conv2 = conv1x1( in_channels=in_channels, out_channels=mid_channels) self.bn = L.BatchNormalization( size=out_channels, eps=1e-5)
Example #18
Source File: initializer.py From chainer-compiler with MIT License | 6 votes |
def collect_inits(lk, pathname): res = [] for na, pa in lk.namedparams(): if isinstance(pa.data, type(None)): continue if na.count('/') == 1: res.append((pathname + na, pa)) if isinstance(lk, L.BatchNormalization): res.append((pathname + '/avg_mean', lk.avg_mean)) # TODO(satos) このままだと、nodeのテストは通るがResNetのテストがつらい # lk.avg_var = np.ones(lk.avg_var.shape).astype(np.float32) * 4.0 res.append((pathname + '/avg_var', lk.avg_var)) elif isinstance(lk, L.NStepLSTM) or isinstance(lk, L.NStepBiLSTM): # 先にこちらで集めてしまう for i, clk in enumerate(lk.children()): for param in clk.params(): res.append((pathname + '/%d/%s' % (i, param.name), param)) return res for clk in lk.children(): res += collect_inits(clk, pathname + '/' + clk.name) return res
Example #19
Source File: initializer.py From chainer-compiler with MIT License | 6 votes |
def collect_inits(lk, pathname): res = [] for na, pa in lk.namedparams(): if isinstance(pa.data, type(None)): continue if na.count('/') == 1: res.append((pathname + na, pa)) if isinstance(lk, L.BatchNormalization): res.append((pathname + '/avg_mean', lk.avg_mean)) # TODO(satos) このままだと、nodeのテストは通るがResNetのテストがつらい # lk.avg_var = np.ones(lk.avg_var.shape).astype(np.float32) * 4.0 res.append((pathname + '/avg_var', lk.avg_var)) elif isinstance(lk, L.NStepLSTM) or isinstance(lk, L.NStepBiLSTM): # 先にこちらで集めてしまう for i, clk in enumerate(lk.children()): for param in clk.params(): res.append((pathname + '/%d/%s' % (i, param.name), param)) return res for clk in lk.children(): res += collect_inits(clk, pathname + '/' + clk.name) return res
Example #20
Source File: Resnet_with_loss.py From chainer-compiler with MIT License | 6 votes |
def __init__(self, in_size, ch, out_size, stride=2, groups=1): super(BottleNeckA, self).__init__() initialW = initializers.HeNormal() with self.init_scope(): self.conv1 = L.Convolution2D( in_size, ch, 1, stride, 0, initialW=initialW, nobias=True) self.bn1 = L.BatchNormalization(ch) self.conv2 = L.Convolution2D( ch, ch, 3, 1, 1, initialW=initialW, nobias=True, groups=groups) self.bn2 = L.BatchNormalization(ch) self.conv3 = L.Convolution2D( ch, out_size, 1, 1, 0, initialW=initialW, nobias=True) self.bn3 = L.BatchNormalization(out_size) self.conv4 = L.Convolution2D( in_size, out_size, 1, stride, 0, initialW=initialW, nobias=True) self.bn4 = L.BatchNormalization(out_size)
Example #21
Source File: inceptionresnetv2.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, ksize, stride, pad): super(InceptConv, self).__init__() with self.init_scope(): self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=True) self.bn = L.BatchNormalization( size=out_channels, decay=0.1, eps=1e-3) self.activ = F.relu
Example #22
Source File: conv_2d_bn_activ.py From chainer-compiler with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, dilate=1, groups=1, nobias=True, initialW=None, initial_bias=None, activ=relu, bn_kwargs={}): if ksize is None: out_channels, ksize, in_channels = in_channels, out_channels, None self.activ = activ super(Conv2DBNActiv, self).__init__() with self.init_scope(): self.conv = Convolution2D( in_channels, out_channels, ksize, stride, pad, nobias, initialW, initial_bias, dilate=dilate, groups=groups) if 'comm' in bn_kwargs: with flags.ignore_branch(): self.bn = MultiNodeBatchNormalization( out_channels, **bn_kwargs) else: self.bn = BatchNormalization(out_channels, **bn_kwargs)
Example #23
Source File: Resnet_with_loss.py From chainer-compiler with MIT License | 6 votes |
def __init__(self, in_size, ch, out_size, stride=2, groups=1): super(BottleNeckA, self).__init__() initialW = initializers.HeNormal() with self.init_scope(): self.conv1 = L.Convolution2D( in_size, ch, 1, stride, 0, initialW=initialW, nobias=True) self.bn1 = L.BatchNormalization(ch) self.conv2 = L.Convolution2D( ch, ch, 3, 1, 1, initialW=initialW, nobias=True, groups=groups) self.bn2 = L.BatchNormalization(ch) self.conv3 = L.Convolution2D( ch, out_size, 1, 1, 0, initialW=initialW, nobias=True) self.bn3 = L.BatchNormalization(out_size) self.conv4 = L.Convolution2D( in_size, out_size, 1, stride, 0, initialW=initialW, nobias=True) self.bn4 = L.BatchNormalization(out_size)
Example #24
Source File: fishnet.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, reduction=16): super(PreSEAttBlock, self).__init__() mid_cannels = out_channels // reduction with self.init_scope(): self.bn = L.BatchNormalization( size=in_channels, eps=1e-5) self.conv1 = conv1x1( in_channels=in_channels, out_channels=mid_cannels, use_bias=True) self.conv2 = conv1x1( in_channels=mid_cannels, out_channels=out_channels, use_bias=True)
Example #25
Source File: resnet50.py From chainer-compiler with MIT License | 6 votes |
def __init__(self, in_size, ch, out_size, stride=2, groups=1): super(BottleNeckA, self).__init__() initialW = initializers.HeNormal() with self.init_scope(): self.conv1 = L.Convolution2D( in_size, ch, 1, stride, 0, initialW=initialW, nobias=True) self.bn1 = L.BatchNormalization(ch) self.conv2 = L.Convolution2D( ch, ch, 3, 1, 1, initialW=initialW, nobias=True, groups=groups) self.bn2 = L.BatchNormalization(ch) self.conv3 = L.Convolution2D( ch, out_size, 1, 1, 0, initialW=initialW, nobias=True) self.bn3 = L.BatchNormalization(out_size) self.conv4 = L.Convolution2D( in_size, out_size, 1, stride, 0, initialW=initialW, nobias=True) self.bn4 = L.BatchNormalization(out_size)
Example #26
Source File: inceptionv4.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, ksize, stride, pad): super(InceptConv, self).__init__() with self.init_scope(): self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=True) self.bn = L.BatchNormalization( size=out_channels, decay=0.1, eps=1e-3) self.activ = F.relu
Example #27
Source File: condensenet.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, ksize, stride, pad, groups): super(CondenseSimpleConv, self).__init__() with self.init_scope(): self.bn = L.BatchNormalization(size=in_channels) self.activ = F.relu self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=True, groups=groups)
Example #28
Source File: links.py From chainer-compiler with MIT License | 6 votes |
def __init__(self, ch): super(Link_BatchNormalization, self).__init__( L.BatchNormalization(1)) self.n_out = ch.beta.shape[0] self.scale = helper.make_tensor_value_info( '/gamma', TensorProto.FLOAT, [self.n_out]) self.B = helper.make_tensor_value_info( '/beta', TensorProto.FLOAT, [self.n_out]) self.mean = helper.make_tensor_value_info( '/avg_mean', TensorProto.FLOAT, [self.n_out]) self.var = helper.make_tensor_value_info( '/avg_var', TensorProto.FLOAT, [self.n_out]) self.eps = ch.eps self.momentum = ch.decay
Example #29
Source File: condensenet.py From imgclsmob with MIT License | 5 votes |
def __init__(self, in_channels): super(PostActivation, self).__init__() with self.init_scope(): self.bn = L.BatchNormalization(size=in_channels) self.activ = F.relu
Example #30
Source File: voca.py From imgclsmob with MIT License | 5 votes |
def __init__(self, audio_features, audio_window_size, base_persons, encoder_features, **kwargs): super(VocaEncoder, self).__init__(**kwargs) self.audio_window_size = audio_window_size channels = (32, 32, 64, 64) fc1_channels = 128 with self.init_scope(): self.bn = L.BatchNormalization( size=1, eps=1e-5) in_channels = audio_features + base_persons self.branch = SimpleSequential() with self.branch.init_scope(): for i, out_channels in enumerate(channels): setattr(self.branch, "conv{}".format(i + 1), ConvBlock( in_channels=in_channels, out_channels=out_channels, ksize=(3, 1), stride=(2, 1), pad=(1, 0), use_bias=True, use_bn=False)) in_channels = out_channels in_channels += base_persons self.fc1 = L.Linear( in_size=in_channels, out_size=fc1_channels) self.fc2 = L.Linear( in_size=fc1_channels, out_size=encoder_features)