Python chainer.functions.leaky_relu() Examples
The following are 30
code examples of chainer.functions.leaky_relu().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.functions
, or try the search function
.
Example #1
Source File: net.py From pfio with MIT License | 6 votes |
def __init__(self, in_ch): w = chainer.initializers.Normal(0.02) super(Encoder, self).__init__() with self.init_scope(): self.c0 = L.Convolution2D(in_ch, 64, 3, 1, 1, initialW=w) self.c1 = ConvBNR(64, 128, use_bn=True, sample='down', activation=F.leaky_relu, dropout=False) self.c2 = ConvBNR(128, 256, use_bn=True, sample='down', activation=F.leaky_relu, dropout=False) self.c3 = ConvBNR(256, 512, use_bn=True, sample='down', activation=F.leaky_relu, dropout=False) self.c4 = ConvBNR(512, 512, use_bn=True, sample='down', activation=F.leaky_relu, dropout=False) self.c5 = ConvBNR(512, 512, use_bn=True, sample='down', activation=F.leaky_relu, dropout=False) self.c6 = ConvBNR(512, 512, use_bn=True, sample='down', activation=F.leaky_relu, dropout=False) self.c7 = ConvBNR(512, 512, use_bn=True, sample='down', activation=F.leaky_relu, dropout=False)
Example #2
Source File: net.py From chainer-gan-lib with MIT License | 6 votes |
def __call__(self, x): N = x.data.shape[0] h = F.leaky_relu(self.c0_0(x)) h = F.leaky_relu(self.bn0_1(self.c0_1(h))) h = F.leaky_relu(self.bn1_0(self.c1_0(h))) h = F.leaky_relu(self.bn1_1(self.c1_1(h))) h = F.leaky_relu(self.bn2_0(self.c2_0(h))) h = F.leaky_relu(self.bn2_1(self.c2_1(h))) feature = F.reshape(F.leaky_relu(self.c3_0(h)), (N, 8192)) m = F.reshape(self.md(feature), (N, self.B * self.C, 1)) m0 = F.broadcast_to(m, (N, self.B * self.C, N)) m1 = F.transpose(m0, (2, 1, 0)) d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N))) d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1 h = F.concat([feature, d]) return self.l4(h)
Example #3
Source File: darknet53.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, alpha): super(DarkUnit, self).__init__() assert (out_channels % 2 == 0) mid_channels = out_channels // 2 with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, activation=partial( F.leaky_relu, slope=alpha)) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, activation=partial( F.leaky_relu, slope=alpha))
Example #4
Source File: net_pre-trained.py From chainer-partial_convolution_image_inpainting with MIT License | 6 votes |
def __init__(self,ch0=3,input_size=256,layer_size=7): #input_size=512(2^9) in original paper but 256(2^8) in this implementation if 2**(layer_size+1) != input_size: raise AssertionError enc_layers = {} dec_layers = {} #encoder layers enc_layers['PConv_00'] = PConv(ch0, 64, bn=False, sample='down-8') #(1/2)^1 enc_layers['PConv_01'] = PConv(64, 128, sample='down-4') #(1/2)^2 enc_layers['PConv_02'] = PConv(128, 256, sample='down-4') #(1/2)^3 enc_layers['PConv_03'] = PConv(256, 512, sample='down-4') #(1/2)^3 for i in range(4,layer_size): enc_layers['PConv_0'+str(i)] = PConv(512, 512, sample='down-4') #(1/2)^5 #decoder layers for i in range(4,layer_size): dec_layers['PConv_1'+str(i)] = PConv(512*2, 512, activation=F.leaky_relu) dec_layers['PConv_13'] = PConv(512+256, 256, activation=F.leaky_relu) dec_layers['PConv_12'] = PConv(256+128, 128, activation=F.leaky_relu) dec_layers['PConv_11'] = PConv(128+64, 64, activation=F.leaky_relu) dec_layers['PConv_10'] = PConv(64+ch0, ch0, bn=False, activation=None) self.layer_size = layer_size self.enc_layers = enc_layers self.dec_layers = dec_layers super(PartialConvCompletion, self).__init__(**enc_layers,**dec_layers)
Example #5
Source File: net.py From chainer-partial_convolution_image_inpainting with MIT License | 6 votes |
def __init__(self,ch0=3,input_size=256,layer_size=7): #input_size=512(2^9) in original paper but 256(2^8) in this implementation if 2**(layer_size+1) != input_size: raise AssertionError enc_layers = {} dec_layers = {} #encoder layers enc_layers['PConv_00'] = PConv(ch0, 64, bn=False, sample='down-7') #(1/2)^1 enc_layers['PConv_01'] = PConv(64, 128, sample='down-5') #(1/2)^2 enc_layers['PConv_02'] = PConv(128, 256, sample='down-5') #(1/2)^3 enc_layers['PConv_03'] = PConv(256, 512, sample='down-3') #(1/2)^3 for i in range(4,layer_size): enc_layers['PConv_0'+str(i)] = PConv(512, 512, sample='down-3') #(1/2)^5 #decoder layers for i in range(4,layer_size): dec_layers['PConv_1'+str(i)] = PConv(512*2, 512, activation=F.leaky_relu) dec_layers['PConv_13'] = PConv(512+256, 256, activation=F.leaky_relu) dec_layers['PConv_12'] = PConv(256+128, 128, activation=F.leaky_relu) dec_layers['PConv_11'] = PConv(128+64, 64, activation=F.leaky_relu) dec_layers['PConv_10'] = PConv(64+ch0, ch0, bn=False, activation=None) self.layer_size = layer_size self.enc_layers = enc_layers self.dec_layers = dec_layers super(PartialConvCompletion, self).__init__(**enc_layers,**dec_layers)
Example #6
Source File: network.py From chainer-PGGAN with MIT License | 6 votes |
def __call__(self, x, alpha=1.0): if self.depth > 0 and alpha < 1: h1 = self['b%d'%(7-self.depth)](x, True) x2 = F.average_pooling_2d(x, 2, 2) h2 = F.leaky_relu(self['b%d'%(7-self.depth+1)].fromRGB(x2)) h = h2 * (1 - alpha) + h1 * alpha else: h = self['b%d'%(7-self.depth)](x, True) for i in range(self.depth): h = self['b%d'%(7-self.depth+1+i)](h) h = self.l(h) h = F.flatten(h) return h
Example #7
Source File: losses.py From EPG with MIT License | 6 votes |
def process_trajectory(self, l): """This is the time-dependent convolution operation, applied to a trajectory (in order). """ shp = l.shape[0] # First dim is batchsize=1, then either 1 channel for 2d conv or n_feat channels # for 1d conv. l = F.expand_dims(l, axis=0) l = F.transpose(l, (0, 2, 1)) l = self.traj_c0(l) l = F.leaky_relu(l) l = self.traj_c1(l) l = F.leaky_relu(l) l = F.sum(l, axis=(0, 2)) / l.shape[0] / l.shape[2] l = F.expand_dims(l, axis=0) l = self.traj_d0(l) l = F.tile(l, (shp, 1)) return l
Example #8
Source File: sr_model.py From become-yukarin with MIT License | 6 votes |
def __init__(self, in_ch, base=64, extensive_layers=8) -> None: super().__init__() w = chainer.initializers.Normal(0.02) with self.init_scope(): if extensive_layers > 0: self.c0 = L.Convolution2D(in_ch, base * 1, 3, 1, 1, initialW=w) else: self.c0 = L.Convolution2D(in_ch, base * 1, 1, 1, 0, initialW=w) _choose = lambda i: 'down' if i < extensive_layers else 'same' self.c1 = CBR(base * 1, base * 2, bn=True, sample=_choose(1), activation=F.leaky_relu, dropout=False) self.c2 = CBR(base * 2, base * 4, bn=True, sample=_choose(2), activation=F.leaky_relu, dropout=False) self.c3 = CBR(base * 4, base * 8, bn=True, sample=_choose(3), activation=F.leaky_relu, dropout=False) self.c4 = CBR(base * 8, base * 8, bn=True, sample=_choose(4), activation=F.leaky_relu, dropout=False) self.c5 = CBR(base * 8, base * 8, bn=True, sample=_choose(5), activation=F.leaky_relu, dropout=False) self.c6 = CBR(base * 8, base * 8, bn=True, sample=_choose(6), activation=F.leaky_relu, dropout=False) self.c7 = CBR(base * 8, base * 8, bn=True, sample=_choose(7), activation=F.leaky_relu, dropout=False)
Example #9
Source File: net.py From chainer with MIT License | 6 votes |
def __init__(self, in_ch): w = chainer.initializers.Normal(0.02) super(Encoder, self).__init__() with self.init_scope(): self.c0 = L.Convolution2D(in_ch, 64, 3, 1, 1, initialW=w) self.c1 = ConvBNR(64, 128, use_bn=True, sample='down', activation=F.leaky_relu, dropout=False) self.c2 = ConvBNR(128, 256, use_bn=True, sample='down', activation=F.leaky_relu, dropout=False) self.c3 = ConvBNR(256, 512, use_bn=True, sample='down', activation=F.leaky_relu, dropout=False) self.c4 = ConvBNR(512, 512, use_bn=True, sample='down', activation=F.leaky_relu, dropout=False) self.c5 = ConvBNR(512, 512, use_bn=True, sample='down', activation=F.leaky_relu, dropout=False) self.c6 = ConvBNR(512, 512, use_bn=True, sample='down', activation=F.leaky_relu, dropout=False) self.c7 = ConvBNR(512, 512, use_bn=True, sample='down', activation=F.leaky_relu, dropout=False)
Example #10
Source File: block_1d.py From Deep_VoiceChanger with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=True, dr=None): super(ConvBlock, self).__init__() initializer = chainer.initializers.GlorotUniform() self.activation = activation self.bn = bn self.dr = dr with self.init_scope(): if mode == 'none': self.c = L.Convolution1D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn) elif mode == 'down': self.c = L.Convolution1D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn) elif mode == 'up': self.c = L.Deconvolution1D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn) else: raise Exception('mode is missing') if bn: self.b = L.BatchNormalization(out_channels)
Example #11
Source File: model.py From become-yukarin with MIT License | 6 votes |
def __init__(self, in_ch, base=64, extensive_layers=8) -> None: super().__init__() w = chainer.initializers.Normal(0.02) with self.init_scope(): if extensive_layers > 0: self.c0 = Convolution1D(in_ch, base * 1, 3, 1, 1, initialW=w) else: self.c0 = Convolution1D(in_ch, base * 1, 1, 1, 0, initialW=w) _choose = lambda i: 'down' if i < extensive_layers else 'same' self.c1 = CBR(base * 1, base * 2, bn=True, sample=_choose(1), activation=F.leaky_relu, dropout=False) self.c2 = CBR(base * 2, base * 4, bn=True, sample=_choose(2), activation=F.leaky_relu, dropout=False) self.c3 = CBR(base * 4, base * 8, bn=True, sample=_choose(3), activation=F.leaky_relu, dropout=False) self.c4 = CBR(base * 8, base * 8, bn=True, sample=_choose(4), activation=F.leaky_relu, dropout=False) self.c5 = CBR(base * 8, base * 8, bn=True, sample=_choose(5), activation=F.leaky_relu, dropout=False) self.c6 = CBR(base * 8, base * 8, bn=True, sample=_choose(6), activation=F.leaky_relu, dropout=False) self.c7 = CBR(base * 8, base * 8, bn=True, sample=_choose(7), activation=F.leaky_relu, dropout=False)
Example #12
Source File: test_sequential.py From chainer with MIT License | 6 votes |
def test_str(self): self.assertEqual(str(chainer.Sequential()), 'Sequential()') expected = '''\ (0): Sequential( (0): Linear(in_size=None, out_size=3, nobias=False), (1): Linear(in_size=3, out_size=2, nobias=False), ), (1): Linear(in_size=2, out_size=3, nobias=False), (2): lambda x: functions.leaky_relu(x, slope=0.2), ''' layers = [ self.s1, self.l3, lambda x: functions.leaky_relu(x, slope=0.2), ] if six.PY3: # In Python2, it fails because of different id of the function. layer = functools.partial(functions.leaky_relu, slope=0.2) layers.append(layer) expected += ' (3): %s,\n' % layer expected = 'Sequential(\n%s)' % expected s = chainer.Sequential(*layers) self.assertEqual(str(s), expected)
Example #13
Source File: basic_cnn_tail.py From SeRanet with MIT License | 6 votes |
def __call__(self, x, t=None): self.clear() #x = Variable(x_data) # x_data.astype(np.float32) h = F.leaky_relu(self.conv1(x), slope=0.1) h = F.leaky_relu(self.conv2(h), slope=0.1) h = F.leaky_relu(self.conv3(h), slope=0.1) h = F.leaky_relu(self.conv4(h), slope=0.1) h = F.leaky_relu(self.conv5(h), slope=0.1) h = F.leaky_relu(self.conv6(h), slope=0.1) h = F.clipped_relu(self.conv7(h), z=1.0) if self.train: self.loss = F.mean_squared_error(h, t) return self.loss else: return h
Example #14
Source File: block.py From Deep_VoiceChanger with MIT License | 6 votes |
def __call__(self, x): if self.dr: with chainer.using_config('train', True): x = F.dropout(x, self.dr) if self.gap: x = F.sum(x, axis=(2,3)) N = x.shape[0] #Below code copyed from https://github.com/pfnet-research/chainer-gan-lib/blob/master/minibatch_discrimination/net.py feature = F.reshape(F.leaky_relu(x), (N, -1)) m = F.reshape(self.md(feature), (N, self.B * self.C, 1)) m0 = F.broadcast_to(m, (N, self.B * self.C, N)) m1 = F.transpose(m0, (2, 1, 0)) d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N))) d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1 h = F.concat([feature, d]) h = self.l(h) return h
Example #15
Source File: block.py From Deep_VoiceChanger with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=F.leaky_relu, mode='none', bn=False, dr=None): super(ResBlock, self).__init__() initializer = chainer.initializers.GlorotUniform() initializer_sc = chainer.initializers.GlorotUniform() self.activation = activation self.mode = _downsample if mode == 'down' else _upsample if mode == 'up' else None self.learnable_sc = in_channels != out_channels self.dr = dr self.bn = bn with self.init_scope(): self.c1 = L.Convolution2D(in_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn) self.c2 = L.Convolution2D(out_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn) if bn: self.b1 = L.BatchNormalization(out_channels) self.b2 = L.BatchNormalization(out_channels) if self.learnable_sc: self.c_sc = L.Convolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
Example #16
Source File: net.py From chainer-cyclegan with MIT License | 5 votes |
def __init__(self, in_ch=3, n_down_layers=4): layers = {} w = chainer.initializers.Normal(0.02) self.n_down_layers = n_down_layers layers['c0'] = CBR(in_ch, 64, bn=False, sample='down', activation=F.leaky_relu, dropout=False, noise=True) base = 64 for i in range(1, n_down_layers): layers['c'+str(i)] = CBR(base, base*2, bn=True, sample='down', activation=F.leaky_relu, dropout=False, noise=True) base*=2 layers['c'+str(n_down_layers)] = CBR(base, 1, bn=False, sample='none', activation=None, dropout=False, noise=True) super(Discriminator, self).__init__(**layers)
Example #17
Source File: net.py From pfio with MIT License | 5 votes |
def __init__(self, in_ch, out_ch): w = chainer.initializers.Normal(0.02) super(Discriminator, self).__init__() with self.init_scope(): self.c0_0 = ConvBNR(in_ch, 32, use_bn=False, sample='down', activation=F.leaky_relu, dropout=False) self.c0_1 = ConvBNR(out_ch, 32, use_bn=False, sample='down', activation=F.leaky_relu, dropout=False) self.c1 = ConvBNR(64, 128, use_bn=True, sample='down', activation=F.leaky_relu, dropout=False) self.c2 = ConvBNR(128, 256, use_bn=True, sample='down', activation=F.leaky_relu, dropout=False) self.c3 = ConvBNR(256, 512, use_bn=True, sample='down', activation=F.leaky_relu, dropout=False) self.c4 = L.Convolution2D(512, 1, 3, 1, 1, initialW=w)
Example #18
Source File: yolo_v3.py From chainercv with MIT License | 5 votes |
def _leaky_relu(x): return F.leaky_relu(x, slope=0.1)
Example #19
Source File: yolo_v2.py From chainercv with MIT License | 5 votes |
def _leaky_relu(x): return F.leaky_relu(x, slope=0.1)
Example #20
Source File: net.py From chainer-gan-lib with MIT License | 5 votes |
def __call__(self, x): h = F.unpooling_2d(x, 2, 2, 0, outsize=(x.shape[2]*2, x.shape[3]*2)) h = F.leaky_relu(feature_vector_normalization(self.c0(h))) h = F.leaky_relu(feature_vector_normalization(self.c1(h))) return h
Example #21
Source File: ssp.py From models with MIT License | 5 votes |
def __init__(self, n_class=1): self.n_class = n_class super(SSPYOLOv2, self).__init__() kwargs = {'activ': leaky_relu, 'bn_kwargs': {'eps': 1e-4}} with self.init_scope(): self.conv1 = Conv2DBNActiv(3, 32, 3, 1, 1, **kwargs) self.conv2 = Conv2DBNActiv(32, 64, 3, 1, 1, **kwargs) self.conv3 = Conv2DBNActiv(64, 128, 3, 1, 1, **kwargs) self.conv4 = Conv2DBNActiv(128, 64, 1, 1, 0, **kwargs) self.conv5 = Conv2DBNActiv(64, 128, 3, 1, 1, **kwargs) self.conv6 = Conv2DBNActiv(128, 256, 3, 1, 1, **kwargs) self.conv7 = Conv2DBNActiv(256, 128, 1, 1, 0, **kwargs) self.conv8 = Conv2DBNActiv(128, 256, 3, 1, 1, **kwargs) self.conv9 = Conv2DBNActiv(256, 512, 3, 1, 1, **kwargs) self.conv10 = Conv2DBNActiv(512, 256, 1, 1, 0, **kwargs) self.conv11 = Conv2DBNActiv(256, 512, 3, 1, 1, **kwargs) self.conv12 = Conv2DBNActiv(512, 256, 1, 1, 0, **kwargs) self.conv13 = Conv2DBNActiv(256, 512, 3, 1, 1, **kwargs) self.conv14 = Conv2DBNActiv(512, 1024, 3, 1, 1, **kwargs) self.conv15 = Conv2DBNActiv(1024, 512, 1, 1, 0, **kwargs) self.conv16 = Conv2DBNActiv(512, 1024, 3, 1, 1, **kwargs) self.conv17 = Conv2DBNActiv(1024, 512, 1, 1, 0, **kwargs) self.conv18 = Conv2DBNActiv(512, 1024, 3, 1, 1, **kwargs) self.conv19 = Conv2DBNActiv(1024, 1024, 3, 1, 1, **kwargs) self.conv20 = Conv2DBNActiv(1024, 1024, 3, 1, 1, **kwargs) self.conv21 = Conv2DBNActiv(512, 64, 1, 1, 0, **kwargs) self.conv22 = Conv2DBNActiv(1280, 1024, 3, 1, 1, **kwargs) self.conv23 = L.Convolution2D(1024, 20, 1, 1, 0, nobias=False)
Example #22
Source File: net.py From pfio with MIT License | 5 votes |
def forward(self, x): hs = [F.leaky_relu(self.c0(x))] for i in range(1, 8): hs.append(self['c%d' % i](hs[i-1])) return hs
Example #23
Source File: net.py From chainer-gan-lib with MIT License | 5 votes |
def __call__(self, z, stage): # stage0: c0->c1->out0 # stage1: c0->c1-> (1-a)*(up->out0) + (a)*(b1->out1) # stage2: c0->c1->b1->out1 # stage3: c0->c1->b1-> (1-a)*(up->out1) + (a)*(b2->out2) # stage4: c0->c1->b2->out2 # ... stage = min(stage, self.max_stage) alpha = stage - math.floor(stage) stage = math.floor(stage) h = F.reshape(z,(len(z), self.n_hidden, 1, 1)) h = F.leaky_relu(feature_vector_normalization(self.c0(h))) h = F.leaky_relu(feature_vector_normalization(self.c1(h))) for i in range(1, int(stage//2+1)): h = getattr(self, "b%d"%i)(h) if int(stage)%2==0: out = getattr(self, "out%d"%(stage//2)) x = out(h) else: out_prev = getattr(self, "out%d"%(stage//2)) out_curr = getattr(self, "out%d"%(stage//2+1)) b_curr = getattr(self, "b%d"%(stage//2+1)) x_0 = out_prev(F.unpooling_2d(h, 2, 2, 0, outsize=(2*h.shape[2], 2*h.shape[3]))) x_1 = out_curr(b_curr(h)) x = (1.0-alpha)*x_0 + alpha*x_1 if chainer.configuration.config.train: return x else: scale = int(32 // x.data.shape[2]) return F.unpooling_2d(x, scale, scale, 0, outsize=(32,32))
Example #24
Source File: ssp.py From models with MIT License | 5 votes |
def leaky_relu(x): return F.leaky_relu(x, slope=0.1)
Example #25
Source File: net.py From chainer-gan-lib with MIT License | 5 votes |
def __call__(self, x): h = F.leaky_relu((self.c0(x))) h = F.leaky_relu((self.c1(h))) h = self.pooling_comp * F.average_pooling_2d(h, 2, 2, 0) return h
Example #26
Source File: net.py From chainer-gan-lib with MIT License | 5 votes |
def __call__(self, x, stage): # stage0: in0->m_std->out0_0->out0_1->out0_2 # stage1: (1-a)*(down->in0) + (a)*(in1->b1) ->m_std->out0->out1->out2 # stage2: in1->b1->m_std->out0_0->out0_1->out0_2 # stage3: (1-a)*(down->in1) + (a)*(in2->b2) ->b1->m_std->out0->out1->out2 # stage4: in2->b2->b1->m_std->out0->out1->out2 # ... stage = min(stage, self.max_stage) alpha = stage - math.floor(stage) stage = math.floor(stage) if int(stage)%2==0: fromRGB = getattr(self, "in%d"%(stage//2)) h = F.leaky_relu(fromRGB(x)) else: fromRGB0 = getattr(self, "in%d"%(stage//2)) fromRGB1 = getattr(self, "in%d"%(stage//2+1)) b1 = getattr(self, "b%d"%(stage//2+1)) h0 = F.leaky_relu(fromRGB0(self.pooling_comp * F.average_pooling_2d(x, 2, 2, 0))) h1 = b1(F.leaky_relu(fromRGB1(x))) h = (1-alpha)*h0 + alpha*h1 for i in range(int(stage // 2), 0, -1): h = getattr(self, "b%d" % i)(h) h = minibatch_std(h) h = F.leaky_relu((self.out0(h))) h = F.leaky_relu((self.out1(h))) return self.out2(h)
Example #27
Source File: net.py From chainer-gan-lib with MIT License | 5 votes |
def __call__(self, x): h = F.leaky_relu(self.c0_0(x)) h = F.leaky_relu(self.bn0_1(self.c0_1(h))) h = F.leaky_relu(self.bn1_0(self.c1_0(h))) h = F.leaky_relu(self.bn1_1(self.c1_1(h))) h = F.leaky_relu(self.bn2_0(self.c2_0(h))) h = F.leaky_relu(self.bn2_1(self.c2_1(h))) h = F.leaky_relu(self.bn3_0(self.c3_0(h))) return self.l4(h)
Example #28
Source File: sr_model.py From become-yukarin with MIT License | 5 votes |
def __call__(self, x): hs = [F.leaky_relu(self.c0(x))] for i in range(1, 8): hs.append(self['c%d' % i](hs[i - 1])) return hs
Example #29
Source File: model.py From become-yukarin with MIT License | 5 votes |
def __call__(self, x): hs = [F.leaky_relu(self.c0(x))] for i in range(1, 8): hs.append(self['c%d' % i](hs[i - 1])) return hs
Example #30
Source File: sr_model.py From become-yukarin with MIT License | 5 votes |
def __init__(self, in_ch, out_ch, base=32, extensive_layers=5) -> None: super().__init__() w = chainer.initializers.Normal(0.02) with self.init_scope(): _choose = lambda i: 'down' if i < extensive_layers else 'same' self.c0_0 = CBR(in_ch, base * 1, bn=False, sample=_choose(0), activation=F.leaky_relu, dropout=False) self.c0_1 = CBR(out_ch, base * 1, bn=False, sample=_choose(0), activation=F.leaky_relu, dropout=False) self.c1 = CBR(base * 2, base * 4, bn=True, sample=_choose(1), activation=F.leaky_relu, dropout=False) self.c2 = CBR(base * 4, base * 8, bn=True, sample=_choose(2), activation=F.leaky_relu, dropout=False) self.c3 = CBR(base * 8, base * 16, bn=True, sample=_choose(3), activation=F.leaky_relu, dropout=False) if extensive_layers > 4: self.c4 = L.Convolution2D(base * 16, 1, 3, 1, 1, initialW=w) else: self.c4 = L.Convolution2D(base * 16, 1, 1, 1, 0, initialW=w)