Python mxnet.gluon.nn.AvgPool2D() Examples

The following are 30 code examples of mxnet.gluon.nn.AvgPool2D(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mxnet.gluon.nn , or try the search function .
Example #1
Source File: googlenet.py    From gluon-cv with Apache License 2.0 6 votes vote down vote up
def _make_branch(use_pool, norm_layer, norm_kwargs, *conv_settings):
    out = nn.HybridSequential(prefix='')
    if use_pool == 'avg':
        out.add(nn.AvgPool2D(pool_size=3, strides=1, padding=1))
    elif use_pool == 'max':
        out.add(nn.MaxPool2D(pool_size=3, strides=1, padding=1))
    setting_names = ['in_channels', 'channels', 'kernel_size', 'strides', 'padding']
    for setting in conv_settings:
        kwargs = {}
        for i, value in enumerate(setting):
            if value is not None:
                if setting_names[i] == 'in_channels':
                    in_channels = value
                elif setting_names[i] == 'channels':
                    channels = value
                else:
                    kwargs[setting_names[i]] = value
        out.add(_make_basic_conv(in_channels, channels, norm_layer, norm_kwargs, **kwargs))
    return out 
Example #2
Source File: fnasnet.py    From insightface with MIT License 6 votes vote down vote up
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
        super(NormalCell, self).__init__()
        self.conv_prev_1x1 = nn.HybridSequential()
        self.conv_prev_1x1.add(nn.Activation(activation='relu'))
        self.conv_prev_1x1.add(nn.Conv2D(channels=out_channels_left, kernel_size=1, strides=1, use_bias=False))
        self.conv_prev_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.conv_1x1 = nn.HybridSequential()
        self.conv_1x1.add(nn.Activation(activation='relu'))
        self.conv_1x1.add(nn.Conv2D(channels=out_channels_right, kernel_size=1, strides=1, use_bias=False))
        self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
        self.comb_iter_0_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1, bias=False)

        self.comb_iter_1_left = BranchSeparables(out_channels_left, out_channels_left, 5, 1, 2, bias=False)
        self.comb_iter_1_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1, bias=False)

        self.comb_iter_2_left = nn.AvgPool2D(3, strides=1, padding=1)

        self.comb_iter_3_left = nn.AvgPool2D(3, strides=1, padding=1)
        self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False) 
Example #3
Source File: fnasnet.py    From insightface with MIT License 6 votes vote down vote up
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
        super(ReductionCell0, self).__init__()
        self.conv_prev_1x1 = nn.HybridSequential()
        self.conv_prev_1x1.add(nn.Activation(activation='relu'))
        self.conv_prev_1x1.add(nn.Conv2D(channels=out_channels_left, kernel_size=1, strides=1, use_bias=False))
        self.conv_prev_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.conv_1x1 = nn.HybridSequential()
        self.conv_1x1.add(nn.Activation(activation='relu'))
        self.conv_1x1.add(nn.Conv2D(channels=out_channels_right, kernel_size=1, strides=1, use_bias=False))
        self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.comb_iter_0_left = BranchSeparablesReduction(out_channels_right, out_channels_right, 5, 2, 2, bias=False)
        self.comb_iter_0_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 7, 2, 3, bias=False)

        self.comb_iter_1_left = MaxPoolPad()
        self.comb_iter_1_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 7, 2, 3, bias=False)

        self.comb_iter_2_left = AvgPoolPad()
        self.comb_iter_2_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 5, 2, 2, bias=False)

        self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1)

        self.comb_iter_4_left = BranchSeparablesReduction(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
        self.comb_iter_4_right = MaxPoolPad() 
Example #4
Source File: fnasnet.py    From insightface with MIT License 6 votes vote down vote up
def __init__(self):
        super(CellStem0, self).__init__()
        self.conv_1x1 = nn.HybridSequential()
        self.conv_1x1.add(nn.Activation(activation='relu'))
        self.conv_1x1.add(nn.Conv2D(42, 1, strides=1, use_bias=False))
        self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.comb_iter_0_left = BranchSeparables(42, 42, 5, 2, 2)
        self.comb_iter_0_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False)

        self.comb_iter_1_left = nn.MaxPool2D(pool_size=3, strides=2, padding=1)
        self.comb_iter_1_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False)

        self.comb_iter_2_left = nn.AvgPool2D(pool_size=3, strides=2, padding=1)
        self.comb_iter_2_right = BranchSeparablesStem(96, 42, 5, 2, 2, bias=False)

        self.comb_iter_3_right = nn.AvgPool2D(pool_size=3, strides=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(42, 42, 3, 1, 1, bias=False)
        self.comb_iter_4_right = nn.MaxPool2D(pool_size=3, strides=2, padding=1) 
Example #5
Source File: fnasnet.py    From insightface with MIT License 6 votes vote down vote up
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
        super(ReductionCell1, self).__init__()
        self.conv_prev_1x1 = nn.HybridSequential()
        self.conv_prev_1x1.add(nn.Activation(activation='relu'))
        self.conv_prev_1x1.add(nn.Conv2D(channels=out_channels_left, kernel_size=1, strides=1, use_bias=False))
        self.conv_prev_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.conv_1x1 = nn.HybridSequential()
        self.conv_1x1.add(nn.Activation(activation='relu'))
        self.conv_1x1.add(nn.Conv2D(channels=out_channels_right, kernel_size=1, strides=1, use_bias=False))
        self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False)
        self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False)

        self.comb_iter_1_left = nn.MaxPool2D(3, strides=2, padding=1)
        self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False)

        self.comb_iter_2_left = nn.AvgPool2D(3, strides=2, padding=1)
        self.comb_iter_2_right = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False)

        self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
        self.comb_iter_4_right = nn.MaxPool2D(3, strides=2, padding=1) 
Example #6
Source File: fdensenet.py    From insightface with MIT License 6 votes vote down vote up
def __init__(self, num_init_features, growth_rate, block_config,
                 bn_size=4, dropout=0, classes=1000, **kwargs):

        super(DenseNet, self).__init__(**kwargs)
        with self.name_scope():
            self.features = nn.HybridSequential(prefix='')
            self.features.add(nn.Conv2D(num_init_features, kernel_size=3,
                                        strides=1, padding=1, use_bias=False))
            self.features.add(nn.BatchNorm())
            self.features.add(nn.Activation('relu'))
            self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
            # Add dense blocks
            num_features = num_init_features
            for i, num_layers in enumerate(block_config):
                self.features.add(_make_dense_block(num_layers, bn_size, growth_rate, dropout, i+1))
                num_features = num_features + num_layers * growth_rate
                if i != len(block_config) - 1:
                    self.features.add(_make_transition(num_features // 2))
                    num_features = num_features // 2
            self.features.add(nn.BatchNorm())
            self.features.add(nn.Activation('relu'))
            #self.features.add(nn.AvgPool2D(pool_size=7))
            #self.features.add(nn.Flatten())

            #self.output = nn.Dense(classes) 
Example #7
Source File: fnasnet.py    From insightocr with MIT License 6 votes vote down vote up
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
        super(ReductionCell1, self).__init__()
        self.conv_prev_1x1 = nn.HybridSequential()
        self.conv_prev_1x1.add(nn.Activation(activation='relu'))
        self.conv_prev_1x1.add(nn.Conv2D(channels=out_channels_left, kernel_size=1, strides=1, use_bias=False))
        self.conv_prev_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.conv_1x1 = nn.HybridSequential()
        self.conv_1x1.add(nn.Activation(activation='relu'))
        self.conv_1x1.add(nn.Conv2D(channels=out_channels_right, kernel_size=1, strides=1, use_bias=False))
        self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False)
        self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False)

        self.comb_iter_1_left = nn.MaxPool2D(3, strides=2, padding=1)
        self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False)

        self.comb_iter_2_left = nn.AvgPool2D(3, strides=2, padding=1)
        self.comb_iter_2_right = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False)

        self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
        self.comb_iter_4_right = nn.MaxPool2D(3, strides=2, padding=1) 
Example #8
Source File: fnasnet.py    From insightocr with MIT License 6 votes vote down vote up
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
        super(NormalCell, self).__init__()
        self.conv_prev_1x1 = nn.HybridSequential()
        self.conv_prev_1x1.add(nn.Activation(activation='relu'))
        self.conv_prev_1x1.add(nn.Conv2D(channels=out_channels_left, kernel_size=1, strides=1, use_bias=False))
        self.conv_prev_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.conv_1x1 = nn.HybridSequential()
        self.conv_1x1.add(nn.Activation(activation='relu'))
        self.conv_1x1.add(nn.Conv2D(channels=out_channels_right, kernel_size=1, strides=1, use_bias=False))
        self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
        self.comb_iter_0_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1, bias=False)

        self.comb_iter_1_left = BranchSeparables(out_channels_left, out_channels_left, 5, 1, 2, bias=False)
        self.comb_iter_1_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1, bias=False)

        self.comb_iter_2_left = nn.AvgPool2D(3, strides=1, padding=1)

        self.comb_iter_3_left = nn.AvgPool2D(3, strides=1, padding=1)
        self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False) 
Example #9
Source File: fnasnet.py    From insightocr with MIT License 6 votes vote down vote up
def __init__(self):
        super(CellStem0, self).__init__()
        self.conv_1x1 = nn.HybridSequential()
        self.conv_1x1.add(nn.Activation(activation='relu'))
        self.conv_1x1.add(nn.Conv2D(42, 1, strides=1, use_bias=False))
        self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.comb_iter_0_left = BranchSeparables(42, 42, 5, 2, 2)
        self.comb_iter_0_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False)

        self.comb_iter_1_left = nn.MaxPool2D(pool_size=3, strides=2, padding=1)
        self.comb_iter_1_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False)

        self.comb_iter_2_left = nn.AvgPool2D(pool_size=3, strides=2, padding=1)
        self.comb_iter_2_right = BranchSeparablesStem(96, 42, 5, 2, 2, bias=False)

        self.comb_iter_3_right = nn.AvgPool2D(pool_size=3, strides=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(42, 42, 3, 1, 1, bias=False)
        self.comb_iter_4_right = nn.MaxPool2D(pool_size=3, strides=2, padding=1) 
Example #10
Source File: msdnet.py    From imgclsmob with MIT License 6 votes vote down vote up
def __init__(self,
                 in_channels,
                 classes,
                 **kwargs):
        super(MSDClassifier, self).__init__(**kwargs)
        with self.name_scope():
            self.features = nn.HybridSequential(prefix="")
            self.features.add(conv3x3_block(
                in_channels=in_channels,
                out_channels=in_channels,
                strides=2))
            self.features.add(conv3x3_block(
                in_channels=in_channels,
                out_channels=in_channels,
                strides=2))
            self.features.add(nn.AvgPool2D(
                pool_size=2,
                strides=2))

            self.output = nn.HybridSequential(prefix="")
            self.output.add(nn.Flatten())
            self.output.add(nn.Dense(
                units=classes,
                in_units=in_channels)) 
Example #11
Source File: densenet.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def __init__(self, num_init_features, growth_rate, block_config,
                 bn_size=4, dropout=0, classes=1000,
                 norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
        super(DenseNet, self).__init__(**kwargs)
        with self.name_scope():
            self.features = nn.HybridSequential(prefix='')
            self.features.add(nn.Conv2D(num_init_features, kernel_size=7,
                                        strides=2, padding=3, use_bias=False))
            self.features.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
            self.features.add(nn.Activation('relu'))
            self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
            # Add dense blocks
            num_features = num_init_features
            for i, num_layers in enumerate(block_config):
                self.features.add(_make_dense_block(
                    num_layers, bn_size, growth_rate, dropout, i+1, norm_layer, norm_kwargs))
                num_features = num_features + num_layers * growth_rate
                if i != len(block_config) - 1:
                    self.features.add(_make_transition(num_features // 2, norm_layer, norm_kwargs))
                    num_features = num_features // 2
            self.features.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
            self.features.add(nn.Activation('relu'))
            self.features.add(nn.AvgPool2D(pool_size=7))
            self.features.add(nn.Flatten())

            self.output = nn.Dense(classes) 
Example #12
Source File: inception.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def __init__(self, classes=1000, norm_layer=BatchNorm,
                 norm_kwargs=None, partial_bn=False, **kwargs):
        super(Inception3, self).__init__(**kwargs)
        # self.use_aux_logits = use_aux_logits
        with self.name_scope():
            self.features = nn.HybridSequential(prefix='')
            self.features.add(_make_basic_conv(channels=32, kernel_size=3, strides=2,
                                               norm_layer=norm_layer, norm_kwargs=norm_kwargs))
            if partial_bn:
                if norm_kwargs is not None:
                    norm_kwargs['use_global_stats'] = True
                else:
                    norm_kwargs = {}
                    norm_kwargs['use_global_stats'] = True

            self.features.add(_make_basic_conv(channels=32, kernel_size=3,
                                               norm_layer=norm_layer, norm_kwargs=norm_kwargs))
            self.features.add(_make_basic_conv(channels=64, kernel_size=3, padding=1,
                                               norm_layer=norm_layer, norm_kwargs=norm_kwargs))
            self.features.add(nn.MaxPool2D(pool_size=3, strides=2))
            self.features.add(_make_basic_conv(channels=80, kernel_size=1,
                                               norm_layer=norm_layer, norm_kwargs=norm_kwargs))
            self.features.add(_make_basic_conv(channels=192, kernel_size=3,
                                               norm_layer=norm_layer, norm_kwargs=norm_kwargs))
            self.features.add(nn.MaxPool2D(pool_size=3, strides=2))
            self.features.add(_make_A(32, 'A1_', norm_layer, norm_kwargs))
            self.features.add(_make_A(64, 'A2_', norm_layer, norm_kwargs))
            self.features.add(_make_A(64, 'A3_', norm_layer, norm_kwargs))
            self.features.add(_make_B('B_', norm_layer, norm_kwargs))
            self.features.add(_make_C(128, 'C1_', norm_layer, norm_kwargs))
            self.features.add(_make_C(160, 'C2_', norm_layer, norm_kwargs))
            self.features.add(_make_C(160, 'C3_', norm_layer, norm_kwargs))
            self.features.add(_make_C(192, 'C4_', norm_layer, norm_kwargs))
            self.features.add(_make_D('D_', norm_layer, norm_kwargs))
            self.features.add(_make_E('E1_', norm_layer, norm_kwargs))
            self.features.add(_make_E('E2_', norm_layer, norm_kwargs))
            self.features.add(nn.AvgPool2D(pool_size=8))
            self.features.add(nn.Dropout(0.5))

            self.output = nn.Dense(classes) 
Example #13
Source File: densenet.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def _make_transition(num_output_features, norm_layer, norm_kwargs):
    out = nn.HybridSequential(prefix='')
    out.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
    out.add(nn.Activation('relu'))
    out.add(nn.Conv2D(num_output_features, kernel_size=1, use_bias=False))
    out.add(nn.AvgPool2D(pool_size=2, strides=2))
    return out

# Net 
Example #14
Source File: googlenet.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def _make_aux(in_channels, classes, norm_layer, norm_kwargs):
    out = nn.HybridSequential(prefix='')
    out.add(nn.AvgPool2D(pool_size=5, strides=3))
    out.add(_make_basic_conv(in_channels=in_channels, channels=128, kernel_size=1,
                             norm_layer=norm_layer, norm_kwargs=norm_kwargs))

    out.add(nn.Flatten())
    out.add(nn.Dense(units=1024, in_units=2048))
    out.add(nn.Activation('relu'))
    out.add(nn.Dropout(0.7))
    out.add(nn.Dense(units=classes, in_units=1024))
    return out 
Example #15
Source File: inception.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def _make_branch(use_pool, norm_layer, norm_kwargs, *conv_settings):
    out = nn.HybridSequential(prefix='')
    if use_pool == 'avg':
        out.add(nn.AvgPool2D(pool_size=3, strides=1, padding=1))
    elif use_pool == 'max':
        out.add(nn.MaxPool2D(pool_size=3, strides=2))
    setting_names = ['channels', 'kernel_size', 'strides', 'padding']
    for setting in conv_settings:
        kwargs = {}
        for i, value in enumerate(setting):
            if value is not None:
                kwargs[setting_names[i]] = value
        out.add(_make_basic_conv(norm_layer, norm_kwargs, **kwargs))
    return out 
Example #16
Source File: senet.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def __init__(self, channels, cardinality, bottleneck_width, stride,
                 downsample=False, downsample_kernel_size=3, avg_down=False,
                 norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
        super(SEBlock, self).__init__(**kwargs)
        D = int(math.floor(channels * (bottleneck_width / 64)))
        group_width = cardinality * D

        self.body = nn.HybridSequential(prefix='')
        self.body.add(nn.Conv2D(group_width // 2, kernel_size=1, use_bias=False))
        self.body.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
        self.body.add(nn.Activation('relu'))
        self.body.add(nn.Conv2D(group_width, kernel_size=3, strides=stride, padding=1,
                                groups=cardinality, use_bias=False))
        self.body.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
        self.body.add(nn.Activation('relu'))
        self.body.add(nn.Conv2D(channels * 4, kernel_size=1, use_bias=False))
        self.body.add(norm_layer(gamma_initializer='zeros',
                                 **({} if norm_kwargs is None else norm_kwargs)))

        self.se = nn.HybridSequential(prefix='')
        self.se.add(nn.Conv2D(channels // 4, kernel_size=1, padding=0))
        self.se.add(nn.Activation('relu'))
        self.se.add(nn.Conv2D(channels * 4, kernel_size=1, padding=0))
        self.se.add(nn.Activation('sigmoid'))

        if downsample:
            self.downsample = nn.HybridSequential(prefix='')
            if avg_down:
                self.downsample.add(nn.AvgPool2D(pool_size=stride, strides=stride,
                                                 ceil_mode=True, count_include_pad=False))
                self.downsample.add(nn.Conv2D(channels=channels * 4, kernel_size=1,
                                              strides=1, use_bias=False))
            else:
                downsample_padding = 1 if downsample_kernel_size == 3 else 0
                self.downsample.add(nn.Conv2D(channels * 4, kernel_size=downsample_kernel_size,
                                              strides=stride,
                                              padding=downsample_padding, use_bias=False))
            self.downsample.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
        else:
            self.downsample = None 
Example #17
Source File: fnasnet.py    From insightocr with MIT License 5 votes vote down vote up
def __init__(self):
        super(CellStem1, self).__init__()
        self.conv_1x1 = nn.HybridSequential()
        self.conv_1x1.add(nn.Activation('relu'))
        self.conv_1x1.add(nn.Conv2D(channels=84, kernel_size=1, strides=1, use_bias=False))
        self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.relu = nn.Activation('relu')
        self.path_1 = nn.HybridSequential()
        self.path_1.add(nn.AvgPool2D(pool_size=1, strides=2))
        self.path_1.add(nn.Conv2D(channels=42, kernel_size=1, strides=1, use_bias=False))

        #self.path_2 = nn.ModuleList()
        #self.path_2.add_module('pad', nn.ZeroPad2d((0, 1, 0, 1)))
        #self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
        self.path_2_avgpool = nn.AvgPool2D(pool_size=1, strides=2)
        self.path_2_conv = nn.Conv2D(channels=42, kernel_size=1, strides=1, use_bias=False)

        self.final_path_bn = nn.BatchNorm(epsilon=0.001, momentum=0.1)

        self.comb_iter_0_left = BranchSeparables(84, 84, 5, 2, 2, bias=False)
        self.comb_iter_0_right = BranchSeparables(84, 84, 7, 2, 3, bias=False)

        self.comb_iter_1_left = nn.MaxPool2D(pool_size=3, strides=2, padding=1)
        self.comb_iter_1_right = BranchSeparables(84, 84, 7, 2, 3, bias=False)

        self.comb_iter_2_left = nn.AvgPool2D(pool_size=3, strides=2, padding=1)
        self.comb_iter_2_right = BranchSeparables(84, 84, 5, 2, 2, bias=False)

        self.comb_iter_3_right = nn.AvgPool2D(pool_size=3, strides=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(84, 84, 3, 1, 1, bias=False)
        self.comb_iter_4_right = nn.MaxPool2D(pool_size=3, strides=2, padding=1) 
Example #18
Source File: test_gluon.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def test_slice_pooling2d_slice_pooling2d():
    max_pooling = nn.MaxPool2D(strides=(2, 3), padding=(1, 1))
    avg_pooling = nn.AvgPool2D(strides=(2, 2), padding=(1, 1))
    global_maxpooling = nn.GlobalMaxPool2D()
    global_avgpooling = nn.GlobalAvgPool2D()
    pooling_layers = [max_pooling, avg_pooling, global_maxpooling, global_avgpooling]
    class Net(gluon.HybridBlock):
        def __init__(self,
                     slice,
                     pooling_layer1,
                     pooling_layer2,
                     **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                self.slice = slice
                self.pool0 = pooling_layer1
                self.pool1 = pooling_layer2

        def hybrid_forward(self, F, x):
            x_slice = x.slice(begin=self.slice[0][0], end=self.slice[0][1])
            y = self.pool0(x_slice)
            y_slice = y.slice(begin=self.slice[1][0], end=self.slice[1][1])
            out = self.pool1(y_slice)
            return out

    x = mx.nd.random.uniform(shape=(16, 128, 256, 256))
    slice = [[(8, 0, 100, 50), (16, -1, -1, -1)], [(0, 64, 0, 50), (2, -1, -1, -1)]]
    for i in range(len(pooling_layers)):
        for j in range(len(pooling_layers)):
            if isinstance(pooling_layers[i], (nn.GlobalMaxPool2D, nn.GlobalAvgPool2D)):
                slice[1] = [(0, 64, 0, 0), (2, -1, 1, 1)]
            net = Net(slice, pooling_layers[i], pooling_layers[j])
            check_layer_forward_withinput(net, x) 
Example #19
Source File: fnasnet.py    From insightocr with MIT License 5 votes vote down vote up
def __init__(self, strides=2, padding=1):
        super(AvgPoolPad, self).__init__()
        self.pool = nn.AvgPool2D(pool_size=(3,3), strides=strides, padding=padding) 
Example #20
Source File: inception.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def make_aux(classes, norm_layer, norm_kwargs):
    out = nn.HybridSequential(prefix='')
    out.add(nn.AvgPool2D(pool_size=5, strides=3))
    out.add(_make_basic_conv(channels=128, kernel_size=1,
                             norm_layer=norm_layer, norm_kwargs=norm_kwargs))
    out.add(_make_basic_conv(channels=768, kernel_size=5,
                             norm_layer=norm_layer, norm_kwargs=norm_kwargs))
    out.add(nn.Flatten())
    out.add(nn.Dense(classes))
    return out

# Net 
Example #21
Source File: test_gluon.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def test_reshape_pooling2d():
    max_pooling = nn.MaxPool2D(strides=(2, 3), padding=(1, 1))
    avg_pooling = nn.AvgPool2D(strides=(2, 2), padding=(1, 1))
    global_maxpooling = nn.GlobalMaxPool2D()
    global_avgpooling = nn.GlobalAvgPool2D()
    pooling_layers = [max_pooling, avg_pooling, global_maxpooling, global_avgpooling]
    class Net(gluon.HybridBlock):
        def __init__(self,
                     shape,
                     pooling_layer,
                     **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                self.reshape = shape
                self.pool0 = pooling_layer

        def hybrid_forward(self, F, x):
            x_reshape = x.reshape(self.reshape)
            out = self.pool0(x_reshape)
            return out

    x = mx.nd.random.uniform(shape=(4, 32, 32, 32))
    shape = (4, 64, 64, -1)
    for i in range(len(pooling_layers)):
        net = Net(shape, pooling_layers[i])
        check_layer_forward_withinput(net, x) 
Example #22
Source File: nasnet.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def __init__(self, stride=2, padding=1):
        super(AvgPoolPad, self).__init__()
        # There's no 'count_include_pad' parameter, which makes it different
        self.pool = nn.AvgPool2D(3, strides=stride, padding=padding, count_include_pad=False) 
Example #23
Source File: nasnet.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def __init__(self, stem_filters, norm_layer, norm_kwargs, num_filters=42):
        super(CellStem0, self).__init__()

        self.conv_1x1 = nn.HybridSequential(prefix='')
        self.conv_1x1.add(nn.Activation('relu'))
        self.conv_1x1.add(nn.Conv2D(num_filters, 1, strides=1, use_bias=False))
        self.conv_1x1.add(norm_layer(momentum=0.1, epsilon=0.001,
                                     **({} if norm_kwargs is None else norm_kwargs)))

        self.comb_iter_0_left = BranchSeparables(num_filters, num_filters, 5, 2, 2,
                                                 norm_layer, norm_kwargs)
        self.comb_iter_0_right = BranchSeparablesStem(stem_filters, num_filters, 7, 2, 3,
                                                      norm_layer, norm_kwargs)

        self.comb_iter_1_left = nn.MaxPool2D(3, strides=2, padding=1)
        self.comb_iter_1_right = BranchSeparablesStem(stem_filters, num_filters, 7, 2, 3,
                                                      norm_layer, norm_kwargs)

        self.comb_iter_2_left = nn.AvgPool2D(3, strides=2, padding=1, count_include_pad=False)
        self.comb_iter_2_right = BranchSeparablesStem(stem_filters, num_filters, 5, 2, 2,
                                                      norm_layer, norm_kwargs)

        self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1, count_include_pad=False)

        self.comb_iter_4_left = BranchSeparables(num_filters, num_filters, 3, 1, 1,
                                                 norm_layer, norm_kwargs)
        self.comb_iter_4_right = nn.MaxPool2D(3, strides=2, padding=1) 
Example #24
Source File: nasnet.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def __init__(self, out_channels_left, out_channels_right, norm_layer, norm_kwargs):
        super(FirstCell, self).__init__()

        self.conv_1x1 = nn.HybridSequential(prefix='')
        self.conv_1x1.add(nn.Activation('relu'))
        self.conv_1x1.add(nn.Conv2D(out_channels_right, 1, strides=1, use_bias=False))
        self.conv_1x1.add(norm_layer(momentum=0.1, epsilon=0.001,
                                     **({} if norm_kwargs is None else norm_kwargs)))

        self.path_1 = nn.HybridSequential(prefix='')
        self.path_1.add(nn.AvgPool2D(1, strides=2, count_include_pad=False))
        self.path_1.add(nn.Conv2D(out_channels_left, 1, strides=1, use_bias=False))

        self.path_2 = nn.HybridSequential(prefix='')
        # No nn.ZeroPad2D in gluon
        self.path_2.add(nn.AvgPool2D(1, strides=2, count_include_pad=False))
        self.path_2.add(nn.Conv2D(out_channels_left, 1, strides=1, use_bias=False))

        self.final_path_bn = norm_layer(momentum=0.1, epsilon=0.001,
                                        **({} if norm_kwargs is None else norm_kwargs))

        self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2,
                                                 norm_layer, norm_kwargs)
        self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1,
                                                  norm_layer, norm_kwargs)

        self.comb_iter_1_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2,
                                                 norm_layer, norm_kwargs)
        self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1,
                                                  norm_layer, norm_kwargs)

        self.comb_iter_2_left = nn.AvgPool2D(3, strides=1, padding=1, count_include_pad=False)

        self.comb_iter_3_left = nn.AvgPool2D(3, strides=1, padding=1, count_include_pad=False)
        self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1, count_include_pad=False)

        self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1,
                                                 norm_layer, norm_kwargs) 
Example #25
Source File: nasnet.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def __init__(self, out_channels_left, out_channels_right, norm_layer, norm_kwargs):
        super(NormalCell, self).__init__()

        self.conv_prev_1x1 = nn.HybridSequential(prefix='')
        self.conv_prev_1x1.add(nn.Activation('relu'))
        self.conv_prev_1x1.add(nn.Conv2D(out_channels_left, 1, strides=1, use_bias=False))
        self.conv_prev_1x1.add(norm_layer(momentum=0.1, epsilon=0.001,
                                          **({} if norm_kwargs is None else norm_kwargs)))

        self.conv_1x1 = nn.HybridSequential(prefix='')
        self.conv_1x1.add(nn.Activation('relu'))
        self.conv_1x1.add(nn.Conv2D(out_channels_right, 1, strides=1, use_bias=False))
        self.conv_1x1.add(norm_layer(momentum=0.1, epsilon=0.001,
                                     **({} if norm_kwargs is None else norm_kwargs)))

        self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2,
                                                 norm_layer, norm_kwargs)
        self.comb_iter_0_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1,
                                                  norm_layer, norm_kwargs)

        self.comb_iter_1_left = BranchSeparables(out_channels_left, out_channels_left, 5, 1, 2,
                                                 norm_layer, norm_kwargs)
        self.comb_iter_1_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1,
                                                  norm_layer, norm_kwargs)

        self.comb_iter_2_left = nn.AvgPool2D(3, strides=1, padding=1, count_include_pad=False)

        self.comb_iter_3_left = nn.AvgPool2D(3, strides=1, padding=1, count_include_pad=False)
        self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1, count_include_pad=False)

        self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1,
                                                 norm_layer, norm_kwargs) 
Example #26
Source File: nasnet.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def __init__(self, out_channels_left, out_channels_right, norm_layer, norm_kwargs):
        super(ReductionCell1, self).__init__()

        self.conv_prev_1x1 = nn.HybridSequential(prefix='')
        self.conv_prev_1x1.add(nn.Activation('relu'))
        self.conv_prev_1x1.add(nn.Conv2D(out_channels_left, 1, strides=1, use_bias=False))
        self.conv_prev_1x1.add(norm_layer(momentum=0.1, epsilon=0.001,
                                          **({} if norm_kwargs is None else norm_kwargs)))

        self.conv_1x1 = nn.HybridSequential(prefix='')
        self.conv_1x1.add(nn.Activation('relu'))
        self.conv_1x1.add(nn.Conv2D(out_channels_right, 1, strides=1, use_bias=False))
        self.conv_1x1.add(norm_layer(momentum=0.1, epsilon=0.001,
                                     **({} if norm_kwargs is None else norm_kwargs)))

        self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2,
                                                 norm_layer, norm_kwargs)
        self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3,
                                                  norm_layer, norm_kwargs)

        self.comb_iter_1_left = nn.MaxPool2D(3, strides=2, padding=1)
        self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3,
                                                  norm_layer, norm_kwargs)

        self.comb_iter_2_left = nn.AvgPool2D(3, strides=2, padding=1, count_include_pad=False)
        self.comb_iter_2_right = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2,
                                                  norm_layer, norm_kwargs)

        self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1, count_include_pad=False)

        self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1,
                                                 norm_layer, norm_kwargs)
        self.comb_iter_4_right = nn.MaxPool2D(3, strides=2, padding=1) 
Example #27
Source File: sknet.py    From imgclsmob with MIT License 5 votes vote down vote up
def __init__(self,
                 channels,
                 init_block_channels,
                 bn_use_global_stats=False,
                 in_channels=3,
                 in_size=(224, 224),
                 classes=1000,
                 **kwargs):
        super(SKNet, self).__init__(**kwargs)
        self.in_size = in_size
        self.classes = classes

        with self.name_scope():
            self.features = nn.HybridSequential(prefix="")
            self.features.add(ResInitBlock(
                in_channels=in_channels,
                out_channels=init_block_channels,
                bn_use_global_stats=bn_use_global_stats))
            in_channels = init_block_channels
            for i, channels_per_stage in enumerate(channels):
                stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
                with stage.name_scope():
                    for j, out_channels in enumerate(channels_per_stage):
                        strides = 2 if (j == 0) and (i != 0) else 1
                        stage.add(SKNetUnit(
                            in_channels=in_channels,
                            out_channels=out_channels,
                            strides=strides,
                            bn_use_global_stats=bn_use_global_stats))
                        in_channels = out_channels
                self.features.add(stage)
            self.features.add(nn.AvgPool2D(
                pool_size=7,
                strides=1))

            self.output = nn.HybridSequential(prefix="")
            self.output.add(nn.Flatten())
            self.output.add(nn.Dense(
                units=classes,
                in_units=in_channels)) 
Example #28
Source File: condensenet.py    From imgclsmob with MIT License 5 votes vote down vote up
def __init__(self,
                 **kwargs):
        super(TransitionBlock, self).__init__(**kwargs)
        with self.name_scope():
            self.pool = nn.AvgPool2D(
                pool_size=2,
                strides=2,
                padding=0) 
Example #29
Source File: ror_cifar.py    From imgclsmob with MIT License 5 votes vote down vote up
def __init__(self,
                 channels,
                 init_block_channels,
                 bn_use_global_stats=False,
                 dropout_rate=0.0,
                 in_channels=3,
                 in_size=(32, 32),
                 classes=10,
                 **kwargs):
        super(CIFARRoR, self).__init__(**kwargs)
        self.in_size = in_size
        self.classes = classes

        with self.name_scope():
            self.features = nn.HybridSequential(prefix="")
            self.features.add(conv3x3_block(
                in_channels=in_channels,
                out_channels=init_block_channels,
                bn_use_global_stats=bn_use_global_stats))
            in_channels = init_block_channels
            self.features.add(RoRResBody(
                in_channels=in_channels,
                out_channels_lists=channels,
                bn_use_global_stats=bn_use_global_stats,
                dropout_rate=dropout_rate))
            in_channels = channels[-1][-1]
            self.features.add(nn.AvgPool2D(
                pool_size=8,
                strides=1))

            self.output = nn.HybridSequential(prefix="")
            self.output.add(nn.Flatten())
            self.output.add(nn.Dense(
                units=classes,
                in_units=in_channels)) 
Example #30
Source File: wrn.py    From imgclsmob with MIT License 5 votes vote down vote up
def __init__(self,
                 channels,
                 init_block_channels,
                 width_factor,
                 in_channels=3,
                 in_size=(224, 224),
                 classes=1000,
                 **kwargs):
        super(WRN, self).__init__(**kwargs)
        self.in_size = in_size
        self.classes = classes

        with self.name_scope():
            self.features = nn.HybridSequential(prefix="")
            self.features.add(WRNInitBlock(
                in_channels=in_channels,
                out_channels=init_block_channels))
            in_channels = init_block_channels
            for i, channels_per_stage in enumerate(channels):
                stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
                with stage.name_scope():
                    for j, out_channels in enumerate(channels_per_stage):
                        strides = 2 if (j == 0) and (i != 0) else 1
                        stage.add(WRNUnit(
                            in_channels=in_channels,
                            out_channels=out_channels,
                            strides=strides,
                            width_factor=width_factor))
                        in_channels = out_channels
                self.features.add(stage)
            self.features.add(nn.AvgPool2D(
                pool_size=7,
                strides=1))

            self.output = nn.HybridSequential(prefix="")
            self.output.add(nn.Flatten())
            self.output.add(nn.Dense(
                units=classes,
                in_units=in_channels))