Python mxnet.gluon.nn.InstanceNorm() Examples

The following are 13 code examples of mxnet.gluon.nn.InstanceNorm(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mxnet.gluon.nn , or try the search function .
Example #1
Source File: test_gluon.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def test_instancenorm():
    layer = nn.InstanceNorm(in_channels=10)
    check_layer_forward(layer, (2, 10, 10, 10)) 
Example #2
Source File: train_cgan.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def __init__(self, output_nc, ngf=64, use_dropout=False, n_blocks=6, padding_type='reflect'):
        assert(n_blocks >= 0)
        super(ResnetGenerator, self).__init__()
        self.output_nc = output_nc
        self.ngf = ngf
        self.model = nn.HybridSequential()
        with self.name_scope():
            self.model.add(
                nn.ReflectionPad2D(3),
                nn.Conv2D(ngf, kernel_size=7, padding=0),
                nn.InstanceNorm(),
                nn.Activation('relu')
            )

            n_downsampling = 2
            for i in range(n_downsampling):
                mult = 2**i
                self.model.add(
                    nn.Conv2D(ngf * mult * 2, kernel_size=3,strides=2, padding=1),
                    nn.InstanceNorm(),
                    nn.Activation('relu')
                )

            mult = 2**n_downsampling
            for i in range(n_blocks):
                self.model.add(
                    ResnetBlock(ngf * mult, padding_type=padding_type, use_dropout=use_dropout)
                )

            for i in range(n_downsampling):
                mult = 2**(n_downsampling - i)
                self.model.add(
                    nn.Conv2DTranspose(int(ngf * mult / 2),kernel_size=3,strides=2,padding=1,output_padding=1),
                    nn.InstanceNorm(),
                    nn.Activation('relu')
                )
            self.model.add(
                nn.ReflectionPad2D(3),
                nn.Conv2D(output_nc,kernel_size=7,padding=0),
                nn.Activation('tanh')
            ) 
Example #3
Source File: train_cgan.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def build_conv_block(self, dim, padding_type, use_dropout):
        conv_block = nn.HybridSequential()
        p = 0
        with self.name_scope():
            if padding_type == 'reflect':
                conv_block.add(nn.ReflectionPad2D(1))
            elif padding_type == 'zero':
                p = 1
            else:
                raise NotImplementedError('padding [%s] is not implemented' % padding_type)

            conv_block.add(
                nn.Conv2D(dim, kernel_size=3, padding=p),
                nn.InstanceNorm(),
                nn.Activation('relu')
            )
            if use_dropout:
                conv_block.add(nn.Dropout(0.5))

            p = 0
            if padding_type == 'reflect':
                conv_block.add(nn.ReflectionPad2D(1))
            elif padding_type == 'zero':
                p = 1
            else:
                raise NotImplementedError('padding [%s] is not implemented' % padding_type)
            conv_block.add(
                nn.Conv2D(dim, kernel_size=3, padding=p),
                nn.InstanceNorm()
            )

        return conv_block 
Example #4
Source File: train_cgan.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def __init__(self, ndf=64, n_layers=3, use_sigmoid=False):
        super(NLayerDiscriminator, self).__init__()
        self.model = nn.HybridSequential()
        kw = 4
        padw = 1
        with self.name_scope():
            self.model.add(
                nn.Conv2D(ndf, kernel_size=kw, strides=2, padding=padw),
                nn.LeakyReLU(0.2),
            )

            nf_mult = 1
            for n in range(1, n_layers):
                nf_mult = min(2**n, 8)
                self.model.add(
                    nn.Conv2D(ndf * nf_mult,kernel_size=kw, strides=2, padding=padw),
                    nn.InstanceNorm(),
                    nn.LeakyReLU(0.2),
                )

            nf_mult = min(2**n_layers, 8)
            self.model.add(
                nn.Conv2D(ndf * nf_mult,kernel_size=kw, strides=1, padding=padw),
                nn.InstanceNorm(),
                nn.LeakyReLU(0.2),
            )
            self.model.add(
                nn.Conv2D(1, kernel_size=kw, strides=1, padding=padw)
            )
            if use_sigmoid:
                self.model.add(nn.Activation('sigmoid')) 
Example #5
Source File: common.py    From imgclsmob with MIT License 5 votes vote down vote up
def __init__(self,
                 channels,
                 bn_use_global_stats=False,
                 first_fraction=0.5,
                 inst_first=True,
                 **kwargs):
        super(IBN, self).__init__(**kwargs)
        self.inst_first = inst_first
        h1_channels = int(math.floor(channels * first_fraction))
        h2_channels = channels - h1_channels
        self.split_sections = [h1_channels, h2_channels]

        if self.inst_first:
            self.inst_norm = nn.InstanceNorm(
                in_channels=h1_channels,
                scale=True)
            self.batch_norm = nn.BatchNorm(
                in_channels=h2_channels,
                use_global_stats=bn_use_global_stats)

        else:
            self.batch_norm = nn.BatchNorm(
                in_channels=h1_channels,
                use_global_stats=bn_use_global_stats)
            self.inst_norm = nn.InstanceNorm(
                in_channels=h2_channels,
                scale=True) 
Example #6
Source File: ibnbresnet.py    From imgclsmob with MIT License 5 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 strides,
                 padding,
                 dilation=1,
                 groups=1,
                 use_bias=False,
                 activate=True,
                 **kwargs):
        super(IBNbConvBlock, self).__init__(**kwargs)
        self.activate = activate

        with self.name_scope():
            self.conv = nn.Conv2D(
                channels=out_channels,
                kernel_size=kernel_size,
                strides=strides,
                padding=padding,
                dilation=dilation,
                groups=groups,
                use_bias=use_bias,
                in_channels=in_channels)
            self.inst_norm = nn.InstanceNorm(
                in_channels=out_channels,
                scale=True)
            if self.activate:
                self.activ = nn.Activation("relu") 
Example #7
Source File: ibnbresnet.py    From imgclsmob with MIT License 5 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 strides,
                 use_inst_norm,
                 bn_use_global_stats,
                 **kwargs):
        super(IBNbResUnit, self).__init__(**kwargs)
        self.use_inst_norm = use_inst_norm
        self.resize_identity = (in_channels != out_channels) or (strides != 1)

        with self.name_scope():
            self.body = ResBottleneck(
                in_channels=in_channels,
                out_channels=out_channels,
                strides=strides,
                bn_use_global_stats=bn_use_global_stats,
                conv1_stride=False)
            if self.resize_identity:
                self.identity_conv = conv1x1_block(
                    in_channels=in_channels,
                    out_channels=out_channels,
                    strides=strides,
                    bn_use_global_stats=bn_use_global_stats,
                    activation=None)
            if self.use_inst_norm:
                self.inst_norm = nn.InstanceNorm(
                    in_channels=out_channels,
                    scale=True)
            self.activ = nn.Activation("relu") 
Example #8
Source File: train_cgan.py    From panoptic-fpn-gluon with Apache License 2.0 5 votes vote down vote up
def __init__(self, output_nc, ngf=64, use_dropout=False, n_blocks=6, padding_type='reflect'):
        assert(n_blocks >= 0)
        super(ResnetGenerator, self).__init__()
        self.output_nc = output_nc
        self.ngf = ngf
        self.model = nn.HybridSequential()
        with self.name_scope():
            self.model.add(
                nn.ReflectionPad2D(3),
                nn.Conv2D(ngf, kernel_size=7, padding=0),
                nn.InstanceNorm(),
                nn.Activation('relu')
            )

            n_downsampling = 2
            for i in range(n_downsampling):
                mult = 2**i
                self.model.add(
                    nn.Conv2D(ngf * mult * 2, kernel_size=3,strides=2, padding=1),
                    nn.InstanceNorm(),
                    nn.Activation('relu')
                )

            mult = 2**n_downsampling
            for i in range(n_blocks):
                self.model.add(
                    ResnetBlock(ngf * mult, padding_type=padding_type, use_dropout=use_dropout)
                )

            for i in range(n_downsampling):
                mult = 2**(n_downsampling - i)
                self.model.add(
                    nn.Conv2DTranspose(int(ngf * mult / 2),kernel_size=3,strides=2,padding=1,output_padding=1),
                    nn.InstanceNorm(),
                    nn.Activation('relu')
                )
            self.model.add(
                nn.ReflectionPad2D(3),
                nn.Conv2D(output_nc,kernel_size=7,padding=0),
                nn.Activation('tanh')
            ) 
Example #9
Source File: train_cgan.py    From panoptic-fpn-gluon with Apache License 2.0 5 votes vote down vote up
def build_conv_block(self, dim, padding_type, use_dropout):
        conv_block = nn.HybridSequential()
        p = 0
        with self.name_scope():
            if padding_type == 'reflect':
                conv_block.add(nn.ReflectionPad2D(1))
            elif padding_type == 'zero':
                p = 1
            else:
                raise NotImplementedError('padding [%s] is not implemented' % padding_type)

            conv_block.add(
                nn.Conv2D(dim, kernel_size=3, padding=p),
                nn.InstanceNorm(),
                nn.Activation('relu')
            )
            if use_dropout:
                conv_block.add(nn.Dropout(0.5))

            p = 0
            if padding_type == 'reflect':
                conv_block.add(nn.ReflectionPad2D(1))
            elif padding_type == 'zero':
                p = 1
            else:
                raise NotImplementedError('padding [%s] is not implemented' % padding_type)
            conv_block.add(
                nn.Conv2D(dim, kernel_size=3, padding=p),
                nn.InstanceNorm()
            )

        return conv_block 
Example #10
Source File: train_cgan.py    From panoptic-fpn-gluon with Apache License 2.0 5 votes vote down vote up
def __init__(self, ndf=64, n_layers=3, use_sigmoid=False):
        super(NLayerDiscriminator, self).__init__()
        self.model = nn.HybridSequential()
        kw = 4
        padw = 1
        with self.name_scope():
            self.model.add(
                nn.Conv2D(ndf, kernel_size=kw, strides=2, padding=padw),
                nn.LeakyReLU(0.2),
            )

            nf_mult = 1
            for n in range(1, n_layers):
                nf_mult = min(2**n, 8)
                self.model.add(
                    nn.Conv2D(ndf * nf_mult,kernel_size=kw, strides=2, padding=padw),
                    nn.InstanceNorm(),
                    nn.LeakyReLU(0.2),
                )

            nf_mult = min(2**n_layers, 8)
            self.model.add(
                nn.Conv2D(ndf * nf_mult,kernel_size=kw, strides=1, padding=padw),
                nn.InstanceNorm(),
                nn.LeakyReLU(0.2),
            )
            self.model.add(
                nn.Conv2D(1, kernel_size=kw, strides=1, padding=padw)
            )
            if use_sigmoid:
                self.model.add(nn.Activation('sigmoid')) 
Example #11
Source File: test_gluon.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def test_instancenorm():
    layer = nn.InstanceNorm(in_channels=10)
    check_layer_forward(layer, (2, 10, 10, 10)) 
Example #12
Source File: train_cgan.py    From gluon-cv with Apache License 2.0 4 votes vote down vote up
def __init__(self, outer_nc, inner_nc,submodule=None, outermost=False, innermost=False, use_dropout=False):
        super(UnetSkipConnectionBlock, self).__init__()
        self.outermost = outermost
        downconv = nn.Conv2D(inner_nc, kernel_size=4,strides=2, padding=1)
        downrelu = nn.LeakyReLU(0.2)
        downnorm = nn.InstanceNorm()
        uprelu = nn.Activation('relu')
        upnorm = nn.InstanceNorm()
        self.model = nn.HybridSequential()
        with self.model.name_scope():
            if outermost:
                self.model.add(
                    downconv
                )
                if submodule is not None:
                    self.model.add(
                        submodule
                    )
                self.model.add(
                    uprelu,
                    nn.Conv2DTranspose(outer_nc, kernel_size=4, strides=2, padding=1),
                    nn.Activation('tanh')
                )
            elif innermost:
                self.model.add(
                    downrelu,
                    downconv,
                    uprelu,
                    nn.Conv2DTranspose(outer_nc, kernel_size=4, strides=2, padding=1),
                    upnorm
                )
            else:
                self.model.add(
                    downrelu,
                    downconv,
                    downnorm,
                )
                if submodule is not None:
                    self.model.add(
                        submodule
                    )
                self.model.add(
                    uprelu,
                    nn.Conv2DTranspose(outer_nc, kernel_size=4, strides=2, padding=1),
                    upnorm,
                )
                if use_dropout:
                    self.model.add(nn.Dropout(0.5)) 
Example #13
Source File: train_cgan.py    From panoptic-fpn-gluon with Apache License 2.0 4 votes vote down vote up
def __init__(self, outer_nc, inner_nc,submodule=None, outermost=False, innermost=False, use_dropout=False):
        super(UnetSkipConnectionBlock, self).__init__()
        self.outermost = outermost
        downconv = nn.Conv2D(inner_nc, kernel_size=4,strides=2, padding=1)
        downrelu = nn.LeakyReLU(0.2)
        downnorm = nn.InstanceNorm()
        uprelu = nn.Activation('relu')
        upnorm = nn.InstanceNorm()
        self.model = nn.HybridSequential()
        with self.model.name_scope():
            if outermost:
                self.model.add(
                    downconv
                )
                if submodule is not None:
                    self.model.add(
                        submodule
                    )
                self.model.add(
                    uprelu,
                    nn.Conv2DTranspose(outer_nc, kernel_size=4, strides=2, padding=1),
                    nn.Activation('tanh')
                )
            elif innermost:
                self.model.add(
                    downrelu,
                    downconv,
                    uprelu,
                    nn.Conv2DTranspose(outer_nc, kernel_size=4, strides=2, padding=1),
                    upnorm
                )
            else:
                self.model.add(
                    downrelu,
                    downconv,
                    downnorm,
                )
                if submodule is not None:
                    self.model.add(
                        submodule
                    )
                self.model.add(
                    uprelu,
                    nn.Conv2DTranspose(outer_nc, kernel_size=4, strides=2, padding=1),
                    upnorm,
                )
                if use_dropout:
                    self.model.add(nn.Dropout(0.5))