Python mxnet.gluon.nn.HybridSequential() Examples

The following are 30 code examples of mxnet.gluon.nn.HybridSequential(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mxnet.gluon.nn , or try the search function .
Example #1
Source File: fdensenet.py    From insightface with MIT License 6 votes vote down vote up
def __init__(self, num_init_features, growth_rate, block_config,
                 bn_size=4, dropout=0, classes=1000, **kwargs):

        super(DenseNet, self).__init__(**kwargs)
        with self.name_scope():
            self.features = nn.HybridSequential(prefix='')
            self.features.add(nn.Conv2D(num_init_features, kernel_size=3,
                                        strides=1, padding=1, use_bias=False))
            self.features.add(nn.BatchNorm())
            self.features.add(nn.Activation('relu'))
            self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
            # Add dense blocks
            num_features = num_init_features
            for i, num_layers in enumerate(block_config):
                self.features.add(_make_dense_block(num_layers, bn_size, growth_rate, dropout, i+1))
                num_features = num_features + num_layers * growth_rate
                if i != len(block_config) - 1:
                    self.features.add(_make_transition(num_features // 2))
                    num_features = num_features // 2
            self.features.add(nn.BatchNorm())
            self.features.add(nn.Activation('relu'))
            #self.features.add(nn.AvgPool2D(pool_size=7))
            #self.features.add(nn.Flatten())

            #self.output = nn.Dense(classes) 
Example #2
Source File: test_gluon.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_inline():
    net = mx.gluon.nn.HybridSequential()
    with net.name_scope():
        net.add(mx.gluon.nn.Dense(10))
        net.add(mx.gluon.nn.Dense(10))
        net.add(mx.gluon.nn.Dense(10))

    net.initialize()
    net.hybridize(inline_limit=3)
    with mx.autograd.record():
        y = net(mx.nd.zeros((1,10)))

    len_1 = len(json.loads(mx.autograd.get_symbol(y).tojson())['nodes'])
    y.backward()

    net.hybridize(inline_limit=0)
    with mx.autograd.record():
        y = net(mx.nd.zeros((1,10)))

    len_2 = len(json.loads(mx.autograd.get_symbol(y).tojson())['nodes'])
    y.backward()

    assert len_1 == len_2 + 2 
Example #3
Source File: test_gluon.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_lambda():
    net1 = mx.gluon.nn.HybridSequential()
    net1.add(nn.Activation('tanh'),
             nn.LeakyReLU(0.1))

    net2 = mx.gluon.nn.HybridSequential()
    op3 = lambda F, x, *args: F.LeakyReLU(x, *args, slope=0.1)
    net2.add(nn.HybridLambda('tanh'),
             nn.HybridLambda(op3))

    op4 = lambda x: mx.nd.LeakyReLU(x, slope=0.1)
    net3 = mx.gluon.nn.Sequential()
    net3.add(nn.Lambda('tanh'),
             nn.Lambda(op4))

    input_data = mx.nd.random.uniform(shape=(2, 3, 5, 7))
    out1, out2, out3 = net1(input_data), net2(input_data), net3(input_data)
    assert_almost_equal(out1.asnumpy(), out2.asnumpy(), rtol=1e-3, atol=1e-3)
    assert_almost_equal(out1.asnumpy(), out3.asnumpy(), rtol=1e-3, atol=1e-3) 
Example #4
Source File: fnasnet.py    From insightocr with MIT License 6 votes vote down vote up
def __init__(self):
        super(CellStem0, self).__init__()
        self.conv_1x1 = nn.HybridSequential()
        self.conv_1x1.add(nn.Activation(activation='relu'))
        self.conv_1x1.add(nn.Conv2D(42, 1, strides=1, use_bias=False))
        self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.comb_iter_0_left = BranchSeparables(42, 42, 5, 2, 2)
        self.comb_iter_0_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False)

        self.comb_iter_1_left = nn.MaxPool2D(pool_size=3, strides=2, padding=1)
        self.comb_iter_1_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False)

        self.comb_iter_2_left = nn.AvgPool2D(pool_size=3, strides=2, padding=1)
        self.comb_iter_2_right = BranchSeparablesStem(96, 42, 5, 2, 2, bias=False)

        self.comb_iter_3_right = nn.AvgPool2D(pool_size=3, strides=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(42, 42, 3, 1, 1, bias=False)
        self.comb_iter_4_right = nn.MaxPool2D(pool_size=3, strides=2, padding=1) 
Example #5
Source File: fnasnet.py    From insightocr with MIT License 6 votes vote down vote up
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
        super(ReductionCell0, self).__init__()
        self.conv_prev_1x1 = nn.HybridSequential()
        self.conv_prev_1x1.add(nn.Activation(activation='relu'))
        self.conv_prev_1x1.add(nn.Conv2D(channels=out_channels_left, kernel_size=1, strides=1, use_bias=False))
        self.conv_prev_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.conv_1x1 = nn.HybridSequential()
        self.conv_1x1.add(nn.Activation(activation='relu'))
        self.conv_1x1.add(nn.Conv2D(channels=out_channels_right, kernel_size=1, strides=1, use_bias=False))
        self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.comb_iter_0_left = BranchSeparablesReduction(out_channels_right, out_channels_right, 5, 2, 2, bias=False)
        self.comb_iter_0_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 7, 2, 3, bias=False)

        self.comb_iter_1_left = MaxPoolPad()
        self.comb_iter_1_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 7, 2, 3, bias=False)

        self.comb_iter_2_left = AvgPoolPad()
        self.comb_iter_2_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 5, 2, 2, bias=False)

        self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1)

        self.comb_iter_4_left = BranchSeparablesReduction(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
        self.comb_iter_4_right = MaxPoolPad() 
Example #6
Source File: fnasnet.py    From insightocr with MIT License 6 votes vote down vote up
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
        super(NormalCell, self).__init__()
        self.conv_prev_1x1 = nn.HybridSequential()
        self.conv_prev_1x1.add(nn.Activation(activation='relu'))
        self.conv_prev_1x1.add(nn.Conv2D(channels=out_channels_left, kernel_size=1, strides=1, use_bias=False))
        self.conv_prev_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.conv_1x1 = nn.HybridSequential()
        self.conv_1x1.add(nn.Activation(activation='relu'))
        self.conv_1x1.add(nn.Conv2D(channels=out_channels_right, kernel_size=1, strides=1, use_bias=False))
        self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
        self.comb_iter_0_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1, bias=False)

        self.comb_iter_1_left = BranchSeparables(out_channels_left, out_channels_left, 5, 1, 2, bias=False)
        self.comb_iter_1_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1, bias=False)

        self.comb_iter_2_left = nn.AvgPool2D(3, strides=1, padding=1)

        self.comb_iter_3_left = nn.AvgPool2D(3, strides=1, padding=1)
        self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False) 
Example #7
Source File: aspp_temp.py    From Deep-Feature-Flow-Segmentation with MIT License 6 votes vote down vote up
def __init__(self, prefix, entry_block3_stride, use_global_stats, norm_layer):
        super(EntryFlow, self).__init__(prefix)

        with self.name_scope():
            self.conv1 = nn.HybridSequential(prefix='conv1_')
            with self.conv1.name_scope():
                self.conv1.add(nn.Conv2D(32, kernel_size=3, strides=2, padding=1, use_bias=False, prefix='1_'))
                self.conv1.add(norm_layer(in_channels=32, use_global_stats=use_global_stats, prefix='1_BN_'))
                self.conv1.add(nn.Activation("relu"))
            self.conv2 = nn.HybridSequential(prefix='conv1_')
            with self.conv2.name_scope():
                self.conv2.add(nn.Conv2D(64, kernel_size=3, padding=1, use_bias=False, prefix='2_'))
                self.conv2.add(norm_layer(in_channels=64, use_global_stats=use_global_stats, prefix='2_BN_'))
                self.conv2.add(nn.Activation("relu"))

            self.conv3 = XceptionBlock(filters_list=[128, 128, 128], kernel_size=3, strides=2,
                                       use_global_stats=use_global_stats, norm_layer=norm_layer,
                                       dilation=1, depth_activation=False, in_filters=64, prefix='block1_')
            self.conv4 = XceptionBlock(filters_list=[256, 256, 256], kernel_size=3, strides=2, return_skip=True,
                                       use_global_stats=use_global_stats, norm_layer=norm_layer,
                                       dilation=1, depth_activation=False, in_filters=128, prefix='block2_')
            self.conv5 = XceptionBlock(filters_list=[728, 728, 728], kernel_size=3, strides=entry_block3_stride,
                                       use_shortcut_conv=True, dilation=1, depth_activation=False, in_filters=256,
                                       norm_layer=norm_layer, use_global_stats=use_global_stats, prefix='block3_') 
Example #8
Source File: fdensenet.py    From insightface with MIT License 6 votes vote down vote up
def _make_dense_layer(growth_rate, bn_size, dropout):
    new_features = nn.HybridSequential(prefix='')
    new_features.add(nn.BatchNorm())
    #new_features.add(nn.Activation('relu'))
    new_features.add(Act())
    new_features.add(nn.Conv2D(bn_size * growth_rate, kernel_size=1, use_bias=False))
    new_features.add(nn.BatchNorm())
    #new_features.add(nn.Activation('relu'))
    new_features.add(Act())
    new_features.add(nn.Conv2D(growth_rate, kernel_size=3, padding=1, use_bias=False))
    if dropout:
        new_features.add(nn.Dropout(dropout))

    out = gluon.contrib.nn.HybridConcurrent(axis=1, prefix='')
    out.add(gluon.contrib.nn.Identity())
    out.add(new_features)

    return out 
Example #9
Source File: fnasnet.py    From insightface with MIT License 6 votes vote down vote up
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
        super(NormalCell, self).__init__()
        self.conv_prev_1x1 = nn.HybridSequential()
        self.conv_prev_1x1.add(nn.Activation(activation='relu'))
        self.conv_prev_1x1.add(nn.Conv2D(channels=out_channels_left, kernel_size=1, strides=1, use_bias=False))
        self.conv_prev_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.conv_1x1 = nn.HybridSequential()
        self.conv_1x1.add(nn.Activation(activation='relu'))
        self.conv_1x1.add(nn.Conv2D(channels=out_channels_right, kernel_size=1, strides=1, use_bias=False))
        self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
        self.comb_iter_0_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1, bias=False)

        self.comb_iter_1_left = BranchSeparables(out_channels_left, out_channels_left, 5, 1, 2, bias=False)
        self.comb_iter_1_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1, bias=False)

        self.comb_iter_2_left = nn.AvgPool2D(3, strides=1, padding=1)

        self.comb_iter_3_left = nn.AvgPool2D(3, strides=1, padding=1)
        self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False) 
Example #10
Source File: fnasnet.py    From insightface with MIT License 6 votes vote down vote up
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
        super(ReductionCell0, self).__init__()
        self.conv_prev_1x1 = nn.HybridSequential()
        self.conv_prev_1x1.add(nn.Activation(activation='relu'))
        self.conv_prev_1x1.add(nn.Conv2D(channels=out_channels_left, kernel_size=1, strides=1, use_bias=False))
        self.conv_prev_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.conv_1x1 = nn.HybridSequential()
        self.conv_1x1.add(nn.Activation(activation='relu'))
        self.conv_1x1.add(nn.Conv2D(channels=out_channels_right, kernel_size=1, strides=1, use_bias=False))
        self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.comb_iter_0_left = BranchSeparablesReduction(out_channels_right, out_channels_right, 5, 2, 2, bias=False)
        self.comb_iter_0_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 7, 2, 3, bias=False)

        self.comb_iter_1_left = MaxPoolPad()
        self.comb_iter_1_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 7, 2, 3, bias=False)

        self.comb_iter_2_left = AvgPoolPad()
        self.comb_iter_2_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 5, 2, 2, bias=False)

        self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1)

        self.comb_iter_4_left = BranchSeparablesReduction(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
        self.comb_iter_4_right = MaxPoolPad() 
Example #11
Source File: fnasnet.py    From insightface with MIT License 6 votes vote down vote up
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
        super(ReductionCell1, self).__init__()
        self.conv_prev_1x1 = nn.HybridSequential()
        self.conv_prev_1x1.add(nn.Activation(activation='relu'))
        self.conv_prev_1x1.add(nn.Conv2D(channels=out_channels_left, kernel_size=1, strides=1, use_bias=False))
        self.conv_prev_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.conv_1x1 = nn.HybridSequential()
        self.conv_1x1.add(nn.Activation(activation='relu'))
        self.conv_1x1.add(nn.Conv2D(channels=out_channels_right, kernel_size=1, strides=1, use_bias=False))
        self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False)
        self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False)

        self.comb_iter_1_left = nn.MaxPool2D(3, strides=2, padding=1)
        self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False)

        self.comb_iter_2_left = nn.AvgPool2D(3, strides=2, padding=1)
        self.comb_iter_2_right = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False)

        self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
        self.comb_iter_4_right = nn.MaxPool2D(3, strides=2, padding=1) 
Example #12
Source File: resnet.py    From gluon-cv with Apache License 2.0 6 votes vote down vote up
def __init__(self, depth, ctx, pretrained=True, num_classes=0):
        super(ResNet, self).__init__()
        self.pretrained = pretrained

        with self.name_scope():
            network = ResNet.__factory[depth](pretrained=pretrained, ctx=ctx).features[0:-1]
            network[-1][0].body[0]._kwargs['stride'] = (1, 1)
            network[-1][0].downsample[0]._kwargs['stride'] = (1, 1)
            self.base = nn.HybridSequential()
            for n in network:
                self.base.add(n)

            self.avgpool = nn.GlobalAvgPool2D()
            self.flatten = nn.Flatten()
            self.bn = nn.BatchNorm(center=False, scale=True)
            self.bn.initialize(init=init.Zero(), ctx=ctx)

            self.classifier = nn.Dense(num_classes, use_bias=False)
            self.classifier.initialize(init=init.Normal(0.001), ctx=ctx) 
Example #13
Source File: train_srgan.py    From gluon-cv with Apache License 2.0 6 votes vote down vote up
def __init__(self):
        super(SRGenerator, self).__init__()
        self.conv1 = nn.Conv2D(64, kernel_size=3, strides=1,padding=1,activation='relu')
        self.res_block = nn.HybridSequential()
        with self.name_scope():
            for i in range(16):
                self.res_block.add(
                    ResnetBlock()
                )

            self.res_block.add(
                nn.Conv2D(64, kernel_size=3, strides=1,padding=1,use_bias=False),
                nn.BatchNorm()
            )
        self.subpix_block1 = SubpixelBlock()
        self.subpix_block2 = SubpixelBlock()
        self.conv4 = nn.Conv2D(3,kernel_size=1,strides=1,activation='tanh') 
Example #14
Source File: residual_attentionnet.py    From gluon-cv with Apache License 2.0 6 votes vote down vote up
def __init__(self, channels, size1=14, scale=(1, 2, 1),
                 norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
        super(AttentionModule_stage3, self).__init__(**kwargs)
        p, t, r = scale
        with self.name_scope():
            self.first_residual_blocks = nn.HybridSequential()
            _add_block(self.first_residual_blocks, ResidualBlock, p, channels,
                       norm_layer=norm_layer, norm_kwargs=norm_kwargs)

            self.trunk_branches = nn.HybridSequential()
            _add_block(self.trunk_branches, ResidualBlock, t, channels,
                       norm_layer=norm_layer, norm_kwargs=norm_kwargs)

            self.mpool1 = nn.MaxPool2D(pool_size=3, strides=2, padding=1)

            self.softmax1_blocks = nn.HybridSequential()
            _add_block(self.softmax1_blocks, ResidualBlock, 2 * r, channels,
                       norm_layer=norm_layer, norm_kwargs=norm_kwargs)

            self.interpolation1 = UpsamplingBilinear2d(size=size1)

            self.softmax2_blocks = nn.HybridSequential()
            _add_sigmoid_layer(self.softmax2_blocks, channels, norm_layer, norm_kwargs)

            self.last_blocks = ResidualBlock(channels) 
Example #15
Source File: residual_attentionnet.py    From gluon-cv with Apache License 2.0 6 votes vote down vote up
def __init__(self, channels, scale=(1, 2, 1), norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
        super(AttentionModule_stage4, self).__init__(**kwargs)
        p, t, r = scale
        with self.name_scope():
            self.first_residual_blocks = nn.HybridSequential()
            _add_block(self.first_residual_blocks, ResidualBlock, p, channels,
                       norm_layer=norm_layer, norm_kwargs=norm_kwargs)

            self.trunk_branches = nn.HybridSequential()
            _add_block(self.trunk_branches, ResidualBlock, t, channels,
                       norm_layer=norm_layer, norm_kwargs=norm_kwargs)

            self.softmax1_blocks = nn.HybridSequential()
            _add_block(self.softmax1_blocks, ResidualBlock, 2 * r, channels,
                       norm_layer=norm_layer, norm_kwargs=norm_kwargs)

            self.softmax2_blocks = nn.HybridSequential()
            _add_sigmoid_layer(self.softmax2_blocks, channels, norm_layer, norm_kwargs)

            self.last_blocks = ResidualBlock(channels) 
Example #16
Source File: dla.py    From gluon-cv with Apache License 2.0 6 votes vote down vote up
def _make_level(self, block, inplanes, planes, blocks, norm_layer, norm_kwargs, stride=1):
        downsample = None
        if stride != 1 or inplanes != planes:
            downsample = nn.HybridSequential()
            downsample.add(*[
                nn.MaxPool2D(stride, strides=stride),
                nn.Conv2D(channels=planes, in_channels=inplanes,
                          kernel_size=1, strides=1, use_bias=False),
                norm_layer(in_channels=planes, **norm_kwargs)])

        layers = []
        layers.append(block(inplanes, planes, stride,
                            norm_layer=norm_layer, norm_kwargs=norm_kwargs, downsample=downsample))
        for _ in range(1, blocks):
            layers.append(block(inplanes, planes, norm_layer=norm_layer, norm_kwargs=norm_kwargs))

        curr_level = nn.HybridSequential()
        curr_level.add(*layers)
        return curr_level 
Example #17
Source File: fastscnn.py    From gluon-cv with Apache License 2.0 6 votes vote down vote up
def __init__(self, dw_channels, out_channels, stride=1,
                 norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs):
        super(_DSConv, self).__init__()
        with self.name_scope():
            self.conv = nn.HybridSequential()
            self.conv.add(nn.Conv2D(in_channels=dw_channels, channels=dw_channels,
                                    kernel_size=3, strides=stride,
                                    padding=1, groups=dw_channels, use_bias=False))
            self.conv.add(norm_layer(in_channels=dw_channels,
                                     **({} if norm_kwargs is None else norm_kwargs)))
            self.conv.add(nn.Activation('relu'))
            self.conv.add(nn.Conv2D(in_channels=dw_channels, channels=out_channels,
                                    kernel_size=1, use_bias=False))
            self.conv.add(norm_layer(in_channels=out_channels,
                                     **({} if norm_kwargs is None else norm_kwargs)))
            self.conv.add(nn.Activation('relu')) 
Example #18
Source File: deconv_dla.py    From gluon-cv with Apache License 2.0 6 votes vote down vote up
def __init__(self, startp, channels, scales, in_channels=None,
                 use_dcnv2=False, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs):
        super(DLAUp, self).__init__(**kwargs)
        self.startp = startp
        if in_channels is None:
            in_channels = channels
        self.channels = channels
        channels = list(channels)
        scales = np.array(scales, dtype=int)
        with self.name_scope():
            self.idas = nn.HybridSequential('ida')
            for i in range(len(channels) - 1):
                j = -i - 2
                self.idas.add(IDAUp(channels[j], in_channels[j:],
                                    scales[j:] // scales[j], use_dcnv2=use_dcnv2,
                                    norm_layer=norm_layer, norm_kwargs=norm_kwargs))
                scales[j + 1:] = scales[j]
                in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]] 
Example #19
Source File: squeezenet.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def _make_fire(squeeze_channels, expand1x1_channels, expand3x3_channels):
    out = nn.HybridSequential(prefix='')
    out.add(_make_fire_conv(squeeze_channels, 1))

    paths = HybridConcurrent(axis=1, prefix='')
    paths.add(_make_fire_conv(expand1x1_channels, 1))
    paths.add(_make_fire_conv(expand3x3_channels, 3, 1))
    out.add(paths)

    return out 
Example #20
Source File: train_cgan.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def __init__(self, output_nc, ngf=64, use_dropout=False, n_blocks=6, padding_type='reflect'):
        assert(n_blocks >= 0)
        super(ResnetGenerator, self).__init__()
        self.output_nc = output_nc
        self.ngf = ngf
        self.model = nn.HybridSequential()
        with self.name_scope():
            self.model.add(
                nn.ReflectionPad2D(3),
                nn.Conv2D(ngf, kernel_size=7, padding=0),
                nn.InstanceNorm(),
                nn.Activation('relu')
            )

            n_downsampling = 2
            for i in range(n_downsampling):
                mult = 2**i
                self.model.add(
                    nn.Conv2D(ngf * mult * 2, kernel_size=3,strides=2, padding=1),
                    nn.InstanceNorm(),
                    nn.Activation('relu')
                )

            mult = 2**n_downsampling
            for i in range(n_blocks):
                self.model.add(
                    ResnetBlock(ngf * mult, padding_type=padding_type, use_dropout=use_dropout)
                )

            for i in range(n_downsampling):
                mult = 2**(n_downsampling - i)
                self.model.add(
                    nn.Conv2DTranspose(int(ngf * mult / 2),kernel_size=3,strides=2,padding=1,output_padding=1),
                    nn.InstanceNorm(),
                    nn.Activation('relu')
                )
            self.model.add(
                nn.ReflectionPad2D(3),
                nn.Conv2D(output_nc,kernel_size=7,padding=0),
                nn.Activation('tanh')
            ) 
Example #21
Source File: train_srgan.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def __init__(self,filter_num,kernel_size=4,stride=2,padding=1):
        super(ConvBlock,self).__init__()
        self.model = nn.HybridSequential()
        with self.name_scope():
            self.model.add(
                nn.Conv2D(filter_num, kernel_size, stride,padding,use_bias=False),
                nn.BatchNorm(),
                nn.LeakyReLU(0.2),
            ) 
Example #22
Source File: test_gluon.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def test_req():
    data = mx.nd.random.uniform(shape=(1,3,224,224))
    label = mx.nd.random.uniform(shape=(1))
    label[:] = 1
    loss = gluon.loss.SoftmaxCrossEntropyLoss()

    net = nn.HybridSequential()
    net1 = nn.HybridSequential()
    net1.add(nn.Dense(4))
    net2 = nn.HybridSequential()
    net2.add(nn.Dense(3))
    net2.add(nn.Dense(2))
    net.add(net1)
    net.add(net2)
    net.initialize()

    net.hybridize()

    for v in net.collect_params().values():
        v.grad_req = 'add'

    net.collect_params().zero_grad()
    with mx.autograd.record():
        pred = net(data)
        l = loss(pred, label)
        l.backward()
        grad = net[0][0].weight.grad().mean().asnumpy()
        # run twice to check req = add
        pred = net(data)
        l = loss(pred, label)
        l.backward()

    grad_double = net[0][0].weight.grad().mean().asnumpy()
    assert_almost_equal(grad * 2, grad_double) 
Example #23
Source File: train_srgan.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def __init__(self):
        super(ResnetBlock, self).__init__()
        self.conv_block = nn.HybridSequential()
        with self.name_scope():
            self.conv_block.add(
                nn.Conv2D(64, kernel_size=3, strides=1,padding=1,use_bias=False),
                nn.BatchNorm(),
                nn.Activation('relu'),
                nn.Conv2D(64, kernel_size=3, strides=1,padding=1,use_bias=False),
                nn.BatchNorm()
            ) 
Example #24
Source File: test_gluon.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def test_apply():
    global called_blocks
    called_blocks = []

    def record_name(block):
        global called_blocks
        called_blocks.append(block.name)

    block = nn.HybridSequential(prefix='test_')
    with block.name_scope():
        block.add(nn.Dense(10))
        block.add(nn.Dropout(0.5))
    block.apply(record_name)

    assert called_blocks == ['test_dense0', 'test_dropout0', 'test'] 
Example #25
Source File: test_gluon.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def test_hybrid_stale_cache():
    net = mx.gluon.nn.HybridSequential()
    with net.name_scope():
        net.add(mx.gluon.nn.Dense(10, weight_initializer='zeros', bias_initializer='ones', flatten=False))

    net.hybridize()
    net.initialize()
    net(mx.nd.ones((2,3,5)))

    net.add(mx.gluon.nn.Flatten())
    assert net(mx.nd.ones((2,3,5))).shape == (2, 30)

    net = mx.gluon.nn.HybridSequential()
    with net.name_scope():
        net.fc1 = mx.gluon.nn.Dense(10, weight_initializer='zeros',
                                    bias_initializer='ones', flatten=False)
        net.fc2 = mx.gluon.nn.Dense(10, weight_initializer='zeros',
                                    bias_initializer='ones', flatten=False)
    net.hybridize()
    net.initialize()
    net(mx.nd.ones((2,3,5)))

    net.fc2 = mx.gluon.nn.Dense(10, weight_initializer='zeros',
                                bias_initializer='ones', flatten=True)
    net.initialize()
    assert net(mx.nd.ones((2,3,5))).shape == (2, 10) 
Example #26
Source File: fastscnn.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def _make_layer(self, block, inplanes, planes, blocks, t=6, stride=1,
                    norm_layer=nn.BatchNorm, norm_kwargs=None):
        layers = nn.HybridSequential()
        with layers.name_scope():
            layers.add(block(inplanes, planes, t, stride, norm_layer, norm_kwargs))
            for i in range(1, blocks):
                layers.add(block(planes, planes, t, 1, norm_layer, norm_kwargs))
        return layers 
Example #27
Source File: fastscnn.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def __init__(self, in_channels, out_channels, t=6, stride=2,
                 norm_layer=nn.BatchNorm, norm_kwargs=None):
        super(LinearBottleneck, self).__init__()
        self.use_shortcut = stride == 1 and in_channels == out_channels
        with self.name_scope():
            self.block = nn.HybridSequential()
            self.block.add(_ConvBNReLU(in_channels, in_channels * t, 1,
                                       norm_layer=norm_layer, norm_kwargs=norm_kwargs))
            self.block.add(_DWConv(in_channels * t, in_channels * t, stride,
                                   norm_layer=norm_layer, norm_kwargs=norm_kwargs))
            self.block.add(nn.Conv2D(in_channels=in_channels * t, channels=out_channels,
                                     kernel_size=1, use_bias=False))
            self.block.add(norm_layer(in_channels=out_channels,
                                      **({} if norm_kwargs is None else norm_kwargs))) 
Example #28
Source File: fastscnn.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def __init__(self, dw_channels, out_channels, stride=1,
                 norm_layer=nn.BatchNorm, norm_kwargs=None):
        super(_DWConv, self).__init__()
        with self.name_scope():
            self.conv = nn.HybridSequential()
            self.conv.add(nn.Conv2D(in_channels=dw_channels, channels=out_channels, kernel_size=3,
                                    strides=stride, padding=1, groups=dw_channels, use_bias=False))
            self.conv.add(norm_layer(in_channels=out_channels,
                                     **({} if norm_kwargs is None else norm_kwargs)))
            self.conv.add(nn.Activation('relu')) 
Example #29
Source File: fastscnn.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def _PSP1x1Conv(in_channels, out_channels, norm_layer, norm_kwargs):
    block = nn.HybridSequential()
    with block.name_scope():
        block.add(nn.Conv2D(in_channels=in_channels, channels=out_channels,
                            kernel_size=1, use_bias=False))
        block.add(norm_layer(in_channels=out_channels,
                             **({} if norm_kwargs is None else norm_kwargs)))
        block.add(nn.Activation('relu'))
    return block 
Example #30
Source File: train_cgan.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def __init__(self, ndf=64, n_layers=3, use_sigmoid=False):
        super(NLayerDiscriminator, self).__init__()
        self.model = nn.HybridSequential()
        kw = 4
        padw = 1
        with self.name_scope():
            self.model.add(
                nn.Conv2D(ndf, kernel_size=kw, strides=2, padding=padw),
                nn.LeakyReLU(0.2),
            )

            nf_mult = 1
            for n in range(1, n_layers):
                nf_mult = min(2**n, 8)
                self.model.add(
                    nn.Conv2D(ndf * nf_mult,kernel_size=kw, strides=2, padding=padw),
                    nn.InstanceNorm(),
                    nn.LeakyReLU(0.2),
                )

            nf_mult = min(2**n_layers, 8)
            self.model.add(
                nn.Conv2D(ndf * nf_mult,kernel_size=kw, strides=1, padding=padw),
                nn.InstanceNorm(),
                nn.LeakyReLU(0.2),
            )
            self.model.add(
                nn.Conv2D(1, kernel_size=kw, strides=1, padding=padw)
            )
            if use_sigmoid:
                self.model.add(nn.Activation('sigmoid'))