Python mxnet.gluon.nn.LeakyReLU() Examples
The following are 30
code examples of mxnet.gluon.nn.LeakyReLU().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
mxnet.gluon.nn
, or try the search function
.
Example #1
Source File: utils.py From dgl with Apache License 2.0 | 6 votes |
def get_activation(act): """Get the activation based on the act string Parameters ---------- act: str or HybridBlock Returns ------- ret: HybridBlock """ if act is None: return lambda x: x if isinstance(act, str): if act == 'leaky': return nn.LeakyReLU(0.1) elif act in ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']: return nn.Activation(act) else: raise NotImplementedError else: return act
Example #2
Source File: test_gluon.py From SNIPER-mxnet with Apache License 2.0 | 6 votes |
def test_lambda(): net1 = mx.gluon.nn.HybridSequential() net1.add(nn.Activation('tanh'), nn.LeakyReLU(0.1)) net2 = mx.gluon.nn.HybridSequential() op3 = lambda F, x, *args: F.LeakyReLU(x, *args, slope=0.1) net2.add(nn.HybridLambda('tanh'), nn.HybridLambda(op3)) op4 = lambda x: mx.nd.LeakyReLU(x, slope=0.1) net3 = mx.gluon.nn.Sequential() net3.add(nn.Lambda('tanh'), nn.Lambda(op4)) input_data = mx.nd.random.uniform(shape=(2, 3, 5, 7)) out1, out2, out3 = net1(input_data), net2(input_data), net3(input_data) assert_almost_equal(out1.asnumpy(), out2.asnumpy(), rtol=1e-3) assert_almost_equal(out1.asnumpy(), out3.asnumpy(), rtol=1e-3)
Example #3
Source File: dcgan.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def get_netD(): # build the discriminator netD = nn.Sequential() with netD.name_scope(): # input is (nc) x 64 x 64 netD.add(nn.Conv2D(ndf, 4, 2, 1, use_bias=False)) netD.add(nn.LeakyReLU(0.2)) # state size. (ndf) x 32 x 32 netD.add(nn.Conv2D(ndf * 2, 4, 2, 1, use_bias=False)) netD.add(nn.BatchNorm()) netD.add(nn.LeakyReLU(0.2)) # state size. (ndf*2) x 16 x 16 netD.add(nn.Conv2D(ndf * 4, 4, 2, 1, use_bias=False)) netD.add(nn.BatchNorm()) netD.add(nn.LeakyReLU(0.2)) # state size. (ndf*4) x 8 x 8 netD.add(nn.Conv2D(ndf * 8, 4, 2, 1, use_bias=False)) netD.add(nn.BatchNorm()) netD.add(nn.LeakyReLU(0.2)) # state size. (ndf*8) x 4 x 4 netD.add(nn.Conv2D(2, 4, 1, 0, use_bias=False)) # state size. 2 x 1 x 1 return netD
Example #4
Source File: darknet53.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, bn_use_global_stats, alpha, **kwargs): super(DarkUnit, self).__init__(**kwargs) assert (out_channels % 2 == 0) mid_channels = out_channels // 2 with self.name_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, bn_use_global_stats=bn_use_global_stats, activation=nn.LeakyReLU(alpha=alpha)) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, bn_use_global_stats=bn_use_global_stats, activation=nn.LeakyReLU(alpha=alpha))
Example #5
Source File: mobilenetv3.py From gluon-cv with Apache License 2.0 | 6 votes |
def __init__(self, act_func, **kwargs): super(Activation, self).__init__(**kwargs) if act_func == "relu": self.act = nn.Activation('relu') elif act_func == "relu6": self.act = ReLU6() elif act_func == "hard_sigmoid": self.act = HardSigmoid() elif act_func == "swish": self.act = nn.Swish() elif act_func == "hard_swish": self.act = HardSwish() elif act_func == "leaky": self.act = nn.LeakyReLU(alpha=0.375) else: raise NotImplementedError
Example #6
Source File: test_gluon.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def test_lambda(): net1 = mx.gluon.nn.HybridSequential() net1.add(nn.Activation('tanh'), nn.LeakyReLU(0.1)) net2 = mx.gluon.nn.HybridSequential() op3 = lambda F, x, *args: F.LeakyReLU(x, *args, slope=0.1) net2.add(nn.HybridLambda('tanh'), nn.HybridLambda(op3)) op4 = lambda x: mx.nd.LeakyReLU(x, slope=0.1) net3 = mx.gluon.nn.Sequential() net3.add(nn.Lambda('tanh'), nn.Lambda(op4)) input_data = mx.nd.random.uniform(shape=(2, 3, 5, 7)) out1, out2, out3 = net1(input_data), net2(input_data), net3(input_data) assert_almost_equal(out1.asnumpy(), out2.asnumpy(), rtol=1e-3, atol=1e-3) assert_almost_equal(out1.asnumpy(), out3.asnumpy(), rtol=1e-3, atol=1e-3)
Example #7
Source File: mobilefacedetnet.py From MobileFace with MIT License | 5 votes |
def _conv2d(channel, kernel, padding, stride, norm_layer=BatchNorm, norm_kwargs=None): """A common conv-bn-leakyrelu cell""" cell = nn.HybridSequential(prefix='') cell.add(nn.Conv2D(channel, kernel_size=kernel, strides=stride, padding=padding, use_bias=False)) cell.add(norm_layer(epsilon=1e-5, momentum=0.9, **({} if norm_kwargs is None else norm_kwargs))) cell.add(nn.LeakyReLU(0.1)) return cell
Example #8
Source File: unet.py From crnn.gluon with Apache License 2.0 | 5 votes |
def __init__(self, channels, kernel_size): super().__init__() with self.name_scope(): self.conv = nn.HybridSequential() with self.conv.name_scope(): self.conv.add( nn.Conv2D(channels, kernel_size, padding=1, use_bias=False), nn.BatchNorm(), nn.LeakyReLU(0.1) )
Example #9
Source File: darknet.py From panoptic-fpn-gluon with Apache License 2.0 | 5 votes |
def _conv2d(channel, kernel, padding, stride, norm_layer=BatchNorm, norm_kwargs=None): """A common conv-bn-leakyrelu cell""" cell = nn.HybridSequential(prefix='') cell.add(nn.Conv2D(channel, kernel_size=kernel, strides=stride, padding=padding, use_bias=False)) cell.add(norm_layer(epsilon=1e-5, momentum=0.9, **({} if norm_kwargs is None else norm_kwargs))) cell.add(nn.LeakyReLU(0.1)) return cell
Example #10
Source File: train_cgan.py From panoptic-fpn-gluon with Apache License 2.0 | 5 votes |
def __init__(self, ndf=64, n_layers=3, use_sigmoid=False): super(NLayerDiscriminator, self).__init__() self.model = nn.HybridSequential() kw = 4 padw = 1 with self.name_scope(): self.model.add( nn.Conv2D(ndf, kernel_size=kw, strides=2, padding=padw), nn.LeakyReLU(0.2), ) nf_mult = 1 for n in range(1, n_layers): nf_mult = min(2**n, 8) self.model.add( nn.Conv2D(ndf * nf_mult,kernel_size=kw, strides=2, padding=padw), nn.InstanceNorm(), nn.LeakyReLU(0.2), ) nf_mult = min(2**n_layers, 8) self.model.add( nn.Conv2D(ndf * nf_mult,kernel_size=kw, strides=1, padding=padw), nn.InstanceNorm(), nn.LeakyReLU(0.2), ) self.model.add( nn.Conv2D(1, kernel_size=kw, strides=1, padding=padw) ) if use_sigmoid: self.model.add(nn.Activation('sigmoid'))
Example #11
Source File: train_wgan.py From panoptic-fpn-gluon with Apache License 2.0 | 5 votes |
def __init__(self, isize, nz, nc, ndf, ngpu, n_extra_layers=0): super(DCGAN_D_nobn, self).__init__() self.ngpu = ngpu assert isize % 16 == 0, "isize has to be a multiple of 16" with self.name_scope(): main = nn.Sequential() # input is nc x isize x isize # input is nc x isize x isize main.add(nn.Conv2D(in_channels=nc, channels=ndf, kernel_size=4, strides=2, padding=1, use_bias=False, prefix='initial.conv.{0}-{1}'.format(nc, ndf))) main.add(nn.LeakyReLU(0.2, prefix='initial.relu.{0}'.format(ndf))) csize, cndf = isize / 2, ndf # Extra layers for t in range(n_extra_layers): main.add(nn.Conv2D(in_channels=cndf, channels=cndf, kernel_size=3, strides=1, padding=1, use_bias=False, prefix='extra-layers-{0}.{1}.conv'.format(t, cndf))) main.add(nn.LeakyReLU(0.2, prefix='extra-layers-{0}.{1}.relu'.format(t, cndf))) while csize > 4: in_feat = cndf out_feat = cndf * 2 main.add(nn.Conv2D(in_channels=in_feat, channels=out_feat, kernel_size=4, strides=2, padding=1, use_bias=False, prefix='pyramid.{0}-{1}.conv'.format(in_feat, out_feat))) main.add(nn.LeakyReLU(0.2, prefix='pyramid.{0}.relu'.format(out_feat))) cndf = cndf * 2 csize = csize / 2 # state size. K x 4 x 4 main.add(nn.Conv2D(in_channels=cndf, channels=1, kernel_size=4, strides=1, padding=0, use_bias=False, prefix='final.{0}-{1}.conv'.format(cndf, 1))) self.main = main
Example #12
Source File: train_wgan.py From panoptic-fpn-gluon with Apache License 2.0 | 5 votes |
def __init__(self, isize, nz, nc, ngf, ngpu, n_extra_layers=0): super(DCGAN_G, self).__init__() self.ngpu = ngpu assert isize % 16 == 0, "isize has to be a multiple of 16" cngf, tisize = ngf // 2, 4 while tisize != isize: cngf = cngf * 2 tisize = tisize * 2 with self.name_scope(): main = nn.Sequential() # input is Z, going into a convolution main.add( nn.Conv2DTranspose(in_channels=nz, channels=cngf, kernel_size=4, strides=1, padding=0, use_bias=False, prefix='initial.{0}-{1}.convt'.format(nz, cngf))) main.add(nn.BatchNorm(in_channels=cngf, prefix='initial.{0}.batchnorm'.format(cngf))) main.add(nn.LeakyReLU(0, prefix='initial.{0}.relu'.format(cngf))) csize, cndf = 4, cngf while csize < isize // 2: main.add(nn.Conv2DTranspose(in_channels=cngf, channels=cngf // 2, kernel_size=4, strides=2, padding=1, use_bias=False, prefix='pyramid.{0}-{1}.convt'.format(cngf, cngf // 2))) main.add(nn.BatchNorm(in_channels=cngf // 2, prefix='pyramid.{0}.batchnorm'.format(cngf // 2))) main.add(nn.LeakyReLU(0, prefix='pyramid.{0}.relu'.format(cngf // 2))) cngf = cngf // 2 csize = csize * 2 # Extra layers for t in range(n_extra_layers): main.add(nn.Conv2D(in_channels=cngf, channels=cngf, kernel_size=3, strides=1, padding=1, use_bias=False, prefix='extra-layers-{0}.{1}.conv'.format(t, cngf))) main.add(nn.BatchNorm(in_channels=cngf, prefix='extra-layers-{0}.{1}.batchnorm'.format(t, cngf))) main.add(nn.LeakyReLU(0, prefix='extra-layers-{0}.{1}.relu'.format(t, cngf))) main.add( nn.Conv2DTranspose(in_channels=cngf, channels=nc, kernel_size=4, strides=2, padding=1, use_bias=False, activation='tanh', prefix='final.{0}-{1}.convt'.format(cngf, nc))) self.main = main
Example #13
Source File: train_wgan.py From panoptic-fpn-gluon with Apache License 2.0 | 5 votes |
def __init__(self, isize, nz, nc, ndf, ngpu, n_extra_layers=0): super(DCGAN_D, self).__init__() self.ngpu = ngpu assert isize % 16 == 0, "isize has to be a multiple of 16" with self.name_scope(): main = nn.Sequential() # input is nc x isize x isize main.add(nn.Conv2D(in_channels=nc, channels=ndf, kernel_size=4, strides=2, padding=1, use_bias=False, prefix='initial.conv.{0}-{1}'.format(nc, ndf))) main.add(nn.LeakyReLU(0.2, prefix='initial.relu.{0}'.format(ndf))) csize, cndf = isize / 2, ndf # Extra layers for t in range(n_extra_layers): main.add(nn.Conv2D(in_channels=cndf, channels=cndf, kernel_size=3, strides=1, padding=1, use_bias=False, prefix='extra-layers-{0}.{1}.conv'.format(t, cndf))) main.add(nn.BatchNorm(in_channels=cndf, prefix='extra-layers-{0}.{1}.batchnorm'.format(t, cndf))) main.add(nn.LeakyReLU(0.2, prefix='extra-layers-{0}.{1}.relu'.format(t, cndf))) while csize > 4: in_feat = cndf out_feat = cndf * 2 main.add(nn.Conv2D(in_channels=in_feat, channels=out_feat, kernel_size=4, strides=2, padding=1, use_bias=False, prefix='pyramid.{0}-{1}.conv'.format(in_feat, out_feat))) main.add(nn.BatchNorm(in_channels=out_feat, prefix='pyramid.{0}.batchnorm'.format(out_feat))) main.add(nn.LeakyReLU(0.2, prefix='pyramid.{0}.relu'.format(out_feat))) cndf = cndf * 2 csize = csize / 2 # state size. K x 4 x 4 main.add(nn.Conv2D(in_channels=cndf, channels=1, kernel_size=4, strides=1, padding=0, use_bias=False, prefix='final.{0}-{1}.conv'.format(cndf, 1))) self.main = main
Example #14
Source File: train_srgan.py From panoptic-fpn-gluon with Apache License 2.0 | 5 votes |
def __init__(self): super(SRDiscriminator,self).__init__() self.model = nn.HybridSequential() self.res_block = nn.HybridSequential() df_dim = 64 with self.name_scope(): self.model.add( nn.Conv2D(df_dim, 4, 2,1), nn.LeakyReLU(0.2) ) for i in [2,4,8,16,32]: self.model.add(ConvBlock(df_dim * i )) self.model.add(ConvBlock(df_dim * 16,1,1,padding=0)) self.model.add( nn.Conv2D(df_dim * 8, 1, 1,use_bias=False), nn.BatchNorm() ) self.res_block.add( ConvBlock(df_dim * 2, 1,1), ConvBlock(df_dim * 2, 3, 1), nn.Conv2D(df_dim * 8, 3, 1,use_bias=False), nn.BatchNorm() ) self.lrelu = nn.LeakyReLU(0.2) self.flatten = nn.Flatten() self.dense = nn.Dense(1)
Example #15
Source File: train_srgan.py From panoptic-fpn-gluon with Apache License 2.0 | 5 votes |
def __init__(self,filter_num,kernel_size=4,stride=2,padding=1): super(ConvBlock,self).__init__() self.model = nn.HybridSequential() with self.name_scope(): self.model.add( nn.Conv2D(filter_num, kernel_size, stride,padding,use_bias=False), nn.BatchNorm(), nn.LeakyReLU(0.2), )
Example #16
Source File: yolo3.py From MobileFace with MIT License | 5 votes |
def _conv2d(channel, kernel, padding, stride, norm_layer=BatchNorm, norm_kwargs=None): """A common conv-bn-leakyrelu cell""" cell = nn.HybridSequential(prefix='') cell.add(nn.Conv2D(channel, kernel_size=kernel, strides=stride, padding=padding, use_bias=False)) cell.add(norm_layer(epsilon=1e-5, momentum=0.9, **({} if norm_kwargs is None else norm_kwargs))) cell.add(nn.LeakyReLU(0.1)) return cell
Example #17
Source File: reldn.py From dgl with Apache License 2.0 | 5 votes |
def __init__(self, n_classes): super(EdgeSpatial, self).__init__() self.mlp = nn.Sequential() self.mlp.add(nn.Dense(64)) self.mlp.add(nn.LeakyReLU(0.1)) self.mlp.add(nn.Dense(64)) self.mlp.add(nn.LeakyReLU(0.1)) self.mlp.add(nn.Dense(n_classes))
Example #18
Source File: train_wgan.py From gluon-cv with Apache License 2.0 | 5 votes |
def __init__(self, isize, nz, nc, ndf, ngpu, n_extra_layers=0): super(DCGAN_D_nobn, self).__init__() self.ngpu = ngpu assert isize % 16 == 0, "isize has to be a multiple of 16" with self.name_scope(): main = nn.Sequential() # input is nc x isize x isize # input is nc x isize x isize main.add(nn.Conv2D(in_channels=nc, channels=ndf, kernel_size=4, strides=2, padding=1, use_bias=False, prefix='initial.conv.{0}-{1}'.format(nc, ndf))) main.add(nn.LeakyReLU(0.2, prefix='initial.relu.{0}'.format(ndf))) csize, cndf = isize / 2, ndf # Extra layers for t in range(n_extra_layers): main.add(nn.Conv2D(in_channels=cndf, channels=cndf, kernel_size=3, strides=1, padding=1, use_bias=False, prefix='extra-layers-{0}.{1}.conv'.format(t, cndf))) main.add(nn.LeakyReLU(0.2, prefix='extra-layers-{0}.{1}.relu'.format(t, cndf))) while csize > 4: in_feat = cndf out_feat = cndf * 2 main.add(nn.Conv2D(in_channels=in_feat, channels=out_feat, kernel_size=4, strides=2, padding=1, use_bias=False, prefix='pyramid.{0}-{1}.conv'.format(in_feat, out_feat))) main.add(nn.LeakyReLU(0.2, prefix='pyramid.{0}.relu'.format(out_feat))) cndf = cndf * 2 csize = csize / 2 # state size. K x 4 x 4 main.add(nn.Conv2D(in_channels=cndf, channels=1, kernel_size=4, strides=1, padding=0, use_bias=False, prefix='final.{0}-{1}.conv'.format(cndf, 1))) self.main = main
Example #19
Source File: train_srgan.py From gluon-cv with Apache License 2.0 | 5 votes |
def __init__(self,filter_num,kernel_size=4,stride=2,padding=1): super(ConvBlock,self).__init__() self.model = nn.HybridSequential() with self.name_scope(): self.model.add( nn.Conv2D(filter_num, kernel_size, stride,padding,use_bias=False), nn.BatchNorm(), nn.LeakyReLU(0.2), )
Example #20
Source File: train_srgan.py From gluon-cv with Apache License 2.0 | 5 votes |
def __init__(self): super(SRDiscriminator,self).__init__() self.model = nn.HybridSequential() self.res_block = nn.HybridSequential() df_dim = 64 with self.name_scope(): self.model.add( nn.Conv2D(df_dim, 4, 2,1), nn.LeakyReLU(0.2) ) for i in [2,4,8,16,32]: self.model.add(ConvBlock(df_dim * i )) self.model.add(ConvBlock(df_dim * 16,1,1,padding=0)) self.model.add( nn.Conv2D(df_dim * 8, 1, 1,use_bias=False), nn.BatchNorm() ) self.res_block.add( ConvBlock(df_dim * 2, 1,1), ConvBlock(df_dim * 2, 3, 1), nn.Conv2D(df_dim * 8, 3, 1,use_bias=False), nn.BatchNorm() ) self.lrelu = nn.LeakyReLU(0.2) self.flatten = nn.Flatten() self.dense = nn.Dense(1)
Example #21
Source File: train_wgan.py From gluon-cv with Apache License 2.0 | 5 votes |
def __init__(self, isize, nz, nc, ndf, ngpu, n_extra_layers=0): super(DCGAN_D, self).__init__() self.ngpu = ngpu assert isize % 16 == 0, "isize has to be a multiple of 16" with self.name_scope(): main = nn.Sequential() # input is nc x isize x isize main.add(nn.Conv2D(in_channels=nc, channels=ndf, kernel_size=4, strides=2, padding=1, use_bias=False, prefix='initial.conv.{0}-{1}'.format(nc, ndf))) main.add(nn.LeakyReLU(0.2, prefix='initial.relu.{0}'.format(ndf))) csize, cndf = isize / 2, ndf # Extra layers for t in range(n_extra_layers): main.add(nn.Conv2D(in_channels=cndf, channels=cndf, kernel_size=3, strides=1, padding=1, use_bias=False, prefix='extra-layers-{0}.{1}.conv'.format(t, cndf))) main.add(nn.BatchNorm(in_channels=cndf, prefix='extra-layers-{0}.{1}.batchnorm'.format(t, cndf))) main.add(nn.LeakyReLU(0.2, prefix='extra-layers-{0}.{1}.relu'.format(t, cndf))) while csize > 4: in_feat = cndf out_feat = cndf * 2 main.add(nn.Conv2D(in_channels=in_feat, channels=out_feat, kernel_size=4, strides=2, padding=1, use_bias=False, prefix='pyramid.{0}-{1}.conv'.format(in_feat, out_feat))) main.add(nn.BatchNorm(in_channels=out_feat, prefix='pyramid.{0}.batchnorm'.format(out_feat))) main.add(nn.LeakyReLU(0.2, prefix='pyramid.{0}.relu'.format(out_feat))) cndf = cndf * 2 csize = csize / 2 # state size. K x 4 x 4 main.add(nn.Conv2D(in_channels=cndf, channels=1, kernel_size=4, strides=1, padding=0, use_bias=False, prefix='final.{0}-{1}.conv'.format(cndf, 1))) self.main = main
Example #22
Source File: train_wgan.py From gluon-cv with Apache License 2.0 | 5 votes |
def __init__(self, isize, nz, nc, ngf, ngpu, n_extra_layers=0): super(DCGAN_G, self).__init__() self.ngpu = ngpu assert isize % 16 == 0, "isize has to be a multiple of 16" cngf, tisize = ngf // 2, 4 while tisize != isize: cngf = cngf * 2 tisize = tisize * 2 with self.name_scope(): main = nn.Sequential() # input is Z, going into a convolution main.add( nn.Conv2DTranspose(in_channels=nz, channels=cngf, kernel_size=4, strides=1, padding=0, use_bias=False, prefix='initial.{0}-{1}.convt'.format(nz, cngf))) main.add(nn.BatchNorm(in_channels=cngf, prefix='initial.{0}.batchnorm'.format(cngf))) main.add(nn.LeakyReLU(0, prefix='initial.{0}.relu'.format(cngf))) csize, cndf = 4, cngf while csize < isize // 2: main.add(nn.Conv2DTranspose(in_channels=cngf, channels=cngf // 2, kernel_size=4, strides=2, padding=1, use_bias=False, prefix='pyramid.{0}-{1}.convt'.format(cngf, cngf // 2))) main.add(nn.BatchNorm(in_channels=cngf // 2, prefix='pyramid.{0}.batchnorm'.format(cngf // 2))) main.add(nn.LeakyReLU(0, prefix='pyramid.{0}.relu'.format(cngf // 2))) cngf = cngf // 2 csize = csize * 2 # Extra layers for t in range(n_extra_layers): main.add(nn.Conv2D(in_channels=cngf, channels=cngf, kernel_size=3, strides=1, padding=1, use_bias=False, prefix='extra-layers-{0}.{1}.conv'.format(t, cngf))) main.add(nn.BatchNorm(in_channels=cngf, prefix='extra-layers-{0}.{1}.batchnorm'.format(t, cngf))) main.add(nn.LeakyReLU(0, prefix='extra-layers-{0}.{1}.relu'.format(t, cngf))) main.add( nn.Conv2DTranspose(in_channels=cngf, channels=nc, kernel_size=4, strides=2, padding=1, use_bias=False, activation='tanh', prefix='final.{0}-{1}.convt'.format(cngf, nc))) self.main = main
Example #23
Source File: darknet.py From imgclsmob with MIT License | 5 votes |
def dark_convYxY(in_channels, out_channels, bn_use_global_stats, alpha, pointwise): """ DarkNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_use_global_stats : bool Whether global moving statistics is used instead of local batch-norm for BatchNorm layers. alpha : float Slope coefficient for Leaky ReLU activation. pointwise : bool Whether use 1x1 (pointwise) convolution or 3x3 convolution. """ if pointwise: return conv1x1_block( in_channels=in_channels, out_channels=out_channels, bn_use_global_stats=bn_use_global_stats, activation=nn.LeakyReLU(alpha=alpha)) else: return conv3x3_block( in_channels=in_channels, out_channels=out_channels, bn_use_global_stats=bn_use_global_stats, activation=nn.LeakyReLU(alpha=alpha))
Example #24
Source File: train_cgan.py From gluon-cv with Apache License 2.0 | 5 votes |
def __init__(self, ndf=64, n_layers=3, use_sigmoid=False): super(NLayerDiscriminator, self).__init__() self.model = nn.HybridSequential() kw = 4 padw = 1 with self.name_scope(): self.model.add( nn.Conv2D(ndf, kernel_size=kw, strides=2, padding=padw), nn.LeakyReLU(0.2), ) nf_mult = 1 for n in range(1, n_layers): nf_mult = min(2**n, 8) self.model.add( nn.Conv2D(ndf * nf_mult,kernel_size=kw, strides=2, padding=padw), nn.InstanceNorm(), nn.LeakyReLU(0.2), ) nf_mult = min(2**n_layers, 8) self.model.add( nn.Conv2D(ndf * nf_mult,kernel_size=kw, strides=1, padding=padw), nn.InstanceNorm(), nn.LeakyReLU(0.2), ) self.model.add( nn.Conv2D(1, kernel_size=kw, strides=1, padding=padw) ) if use_sigmoid: self.model.add(nn.Activation('sigmoid'))
Example #25
Source File: darknet.py From gluon-cv with Apache License 2.0 | 5 votes |
def _conv2d(channel, kernel, padding, stride, norm_layer=BatchNorm, norm_kwargs=None): """A common conv-bn-leakyrelu cell""" cell = nn.HybridSequential(prefix='') cell.add(nn.Conv2D(channel, kernel_size=kernel, strides=stride, padding=padding, use_bias=False)) cell.add(norm_layer(epsilon=1e-5, momentum=0.9, **({} if norm_kwargs is None else norm_kwargs))) cell.add(nn.LeakyReLU(0.1)) return cell
Example #26
Source File: ibppose_coco.py From imgclsmob with MIT License | 5 votes |
def __init__(self, passes, backbone_out_channels, outs_channels, depth, growth_rate, use_bn, in_channels=3, in_size=(256, 256), **kwargs): super(IbpPose, self).__init__(**kwargs) self.in_size = in_size activation = (lambda: nn.LeakyReLU(alpha=0.01)) with self.name_scope(): self.backbone = IbpBackbone( in_channels=in_channels, out_channels=backbone_out_channels, activation=activation) self.decoder = nn.HybridSequential(prefix="") for i in range(passes): merge = (i != passes - 1) self.decoder.add(IbpPass( channels=backbone_out_channels, mid_channels=outs_channels, depth=depth, growth_rate=growth_rate, merge=merge, use_bn=use_bn, activation=activation))
Example #27
Source File: reldn.py From dgl with Apache License 2.0 | 5 votes |
def __init__(self, n_classes, vis_feat_dim=7*7*3): super(EdgeVisual, self).__init__() self.dim_in = vis_feat_dim self.mlp_joint = nn.Sequential() self.mlp_joint.add(nn.Dense(vis_feat_dim // 2)) self.mlp_joint.add(nn.LeakyReLU(0.1)) self.mlp_joint.add(nn.Dense(vis_feat_dim // 3)) self.mlp_joint.add(nn.LeakyReLU(0.1)) self.mlp_joint.add(nn.Dense(n_classes)) self.mlp_sub = nn.Dense(n_classes) self.mlp_ob = nn.Dense(n_classes)
Example #28
Source File: gatconv.py From dgl with Apache License 2.0 | 4 votes |
def __init__(self, in_feats, out_feats, num_heads, feat_drop=0., attn_drop=0., negative_slope=0.2, residual=False, activation=None): super(GATConv, self).__init__() self._num_heads = num_heads self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats) self._in_feats = in_feats self._out_feats = out_feats with self.name_scope(): if isinstance(in_feats, tuple): self.fc_src = nn.Dense(out_feats * num_heads, use_bias=False, weight_initializer=mx.init.Xavier(magnitude=math.sqrt(2.0)), in_units=self._in_src_feats) self.fc_dst = nn.Dense(out_feats * num_heads, use_bias=False, weight_initializer=mx.init.Xavier(magnitude=math.sqrt(2.0)), in_units=self._in_dst_feats) else: self.fc = nn.Dense(out_feats * num_heads, use_bias=False, weight_initializer=mx.init.Xavier(magnitude=math.sqrt(2.0)), in_units=in_feats) self.attn_l = self.params.get('attn_l', shape=(1, num_heads, out_feats), init=mx.init.Xavier(magnitude=math.sqrt(2.0))) self.attn_r = self.params.get('attn_r', shape=(1, num_heads, out_feats), init=mx.init.Xavier(magnitude=math.sqrt(2.0))) self.feat_drop = nn.Dropout(feat_drop) self.attn_drop = nn.Dropout(attn_drop) self.leaky_relu = nn.LeakyReLU(negative_slope) if residual: if in_feats != out_feats: self.res_fc = nn.Dense(out_feats * num_heads, use_bias=False, weight_initializer=mx.init.Xavier( magnitude=math.sqrt(2.0)), in_units=in_feats) else: self.res_fc = Identity() else: self.res_fc = None self.activation = activation
Example #29
Source File: model.py From YOLO with MIT License | 4 votes |
def __init__(self, layer_num, class_num, class_name,s=7, b=2, **kwargs): """ :param layer_num: :param class_num: :param s: :param b: :param verbose: :param kwargs: """ super(Yolo, self).__init__(**kwargs) self._s = s self._b = b self._class_num = class_num self._layer_num = layer_num self._class_name = class_name assert len(self._class_name) == self._class_num with self.name_scope(): self.out = nn.HybridSequential() self.out.add(nn.Conv2D(32, kernel_size=3, strides=1, padding=1)), self.out.add(nn.BatchNorm()), self.out.add(nn.LeakyReLU(0.05)), self.out.add(nn.MaxPool2D(2)) self.out.add(nn.Conv2D(32, kernel_size=3, strides=1, padding=1)), self.out.add(nn.BatchNorm()), self.out.add(nn.LeakyReLU(0.1)), self.out.add(nn.MaxPool2D(2)) self.out.add(nn.Conv2D(16, kernel_size=1, strides=1, padding=1)), self.out.add(nn.BatchNorm()), self.out.add(nn.LeakyReLU(0.05)), self.out.add(nn.MaxPool2D(2)) self.out.add(nn.Conv2D(16, kernel_size=3, strides=1, padding=1)), self.out.add(nn.BatchNorm()), self.out.add(nn.LeakyReLU(0.05)), self.out.add(nn.MaxPool2D(2)) self.out.add(nn.Conv2D(16, kernel_size=1, strides=1, padding=1)), self.out.add(nn.BatchNorm()), self.out.add(nn.LeakyReLU(0.05)), self.out.add(nn.Conv2D(16, kernel_size=3, strides=1, padding=1)), self.out.add(nn.BatchNorm()), self.out.add(nn.LeakyReLU(0.05)), self.out.add(nn.Flatten()) self.out.add(nn.Dense(128)) self.out.add(nn.LeakyReLU(0.05)) self.out.add(nn.Dense(self._s * self._s * (self._b * 5 + class_num)))
Example #30
Source File: darknet53.py From imgclsmob with MIT License | 4 votes |
def __init__(self, channels, init_block_channels, alpha=0.1, bn_use_global_stats=False, in_channels=3, in_size=(224, 224), classes=1000, **kwargs): super(DarkNet53, self).__init__(**kwargs) self.in_size = in_size self.classes = classes with self.name_scope(): self.features = nn.HybridSequential(prefix="") self.features.add(conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, bn_use_global_stats=bn_use_global_stats, activation=nn.LeakyReLU(alpha=alpha))) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.HybridSequential(prefix="stage{}_".format(i + 1)) with stage.name_scope(): for j, out_channels in enumerate(channels_per_stage): if j == 0: stage.add(conv3x3_block( in_channels=in_channels, out_channels=out_channels, strides=2, bn_use_global_stats=bn_use_global_stats, activation=nn.LeakyReLU(alpha=alpha))) else: stage.add(DarkUnit( in_channels=in_channels, out_channels=out_channels, bn_use_global_stats=bn_use_global_stats, alpha=alpha)) in_channels = out_channels self.features.add(stage) self.features.add(nn.AvgPool2D( pool_size=7, strides=1)) self.output = nn.HybridSequential(prefix="") self.output.add(nn.Flatten()) self.output.add(nn.Dense( units=classes, in_units=in_channels))