Python mxnet.gluon.nn.Flatten() Examples
The following are 30
code examples of mxnet.gluon.nn.Flatten().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
mxnet.gluon.nn
, or try the search function
.
Example #1
Source File: cifarresnet.py From gluon-cv with Apache License 2.0 | 6 votes |
def __init__(self, block, layers, channels, classes=10, norm_layer=BatchNorm, norm_kwargs=None, **kwargs): super(CIFARResNetV2, self).__init__(**kwargs) assert len(layers) == len(channels) - 1 with self.name_scope(): self.features = nn.HybridSequential(prefix='') self.features.add(norm_layer(scale=False, center=False, **({} if norm_kwargs is None else norm_kwargs))) self.features.add(nn.Conv2D(channels[0], 3, 1, 1, use_bias=False)) in_channels = channels[0] for i, num_layer in enumerate(layers): stride = 1 if i == 0 else 2 self.features.add(self._make_layer(block, num_layer, channels[i+1], stride, i+1, in_channels=in_channels, norm_layer=norm_layer, norm_kwargs=norm_kwargs)) in_channels = channels[i+1] self.features.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs))) self.features.add(nn.Activation('relu')) self.features.add(nn.GlobalAvgPool2D()) self.features.add(nn.Flatten()) self.output = nn.Dense(classes, in_units=in_channels)
Example #2
Source File: bamresnet.py From imgclsmob with MIT License | 6 votes |
def __init__(self, channels, bn_use_global_stats, reduction_ratio=16, num_layers=1, **kwargs): super(ChannelGate, self).__init__(**kwargs) mid_channels = channels // reduction_ratio with self.name_scope(): self.pool = nn.GlobalAvgPool2D() self.flatten = nn.Flatten() self.init_fc = DenseBlock( in_channels=channels, out_channels=mid_channels, bn_use_global_stats=bn_use_global_stats) self.main_fcs = nn.HybridSequential(prefix="") for i in range(num_layers - 1): self.main_fcs.add(DenseBlock( in_channels=mid_channels, out_channels=mid_channels, bn_use_global_stats=bn_use_global_stats)) self.final_fc = nn.Dense( units=channels, in_units=mid_channels)
Example #3
Source File: msdnet.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, classes, **kwargs): super(MSDClassifier, self).__init__(**kwargs) with self.name_scope(): self.features = nn.HybridSequential(prefix="") self.features.add(conv3x3_block( in_channels=in_channels, out_channels=in_channels, strides=2)) self.features.add(conv3x3_block( in_channels=in_channels, out_channels=in_channels, strides=2)) self.features.add(nn.AvgPool2D( pool_size=2, strides=2)) self.output = nn.HybridSequential(prefix="") self.output.add(nn.Flatten()) self.output.add(nn.Dense( units=classes, in_units=in_channels))
Example #4
Source File: fdensenet.py From insightface with MIT License | 6 votes |
def __init__(self, num_init_features, growth_rate, block_config, bn_size=4, dropout=0, classes=1000, **kwargs): super(DenseNet, self).__init__(**kwargs) with self.name_scope(): self.features = nn.HybridSequential(prefix='') self.features.add(nn.Conv2D(num_init_features, kernel_size=3, strides=1, padding=1, use_bias=False)) self.features.add(nn.BatchNorm()) self.features.add(nn.Activation('relu')) self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1)) # Add dense blocks num_features = num_init_features for i, num_layers in enumerate(block_config): self.features.add(_make_dense_block(num_layers, bn_size, growth_rate, dropout, i+1)) num_features = num_features + num_layers * growth_rate if i != len(block_config) - 1: self.features.add(_make_transition(num_features // 2)) num_features = num_features // 2 self.features.add(nn.BatchNorm()) self.features.add(nn.Activation('relu')) #self.features.add(nn.AvgPool2D(pool_size=7)) #self.features.add(nn.Flatten()) #self.output = nn.Dense(classes)
Example #5
Source File: mobilenet.py From gluon-cv with Apache License 2.0 | 6 votes |
def __init__(self, multiplier=1.0, classes=1000, norm_layer=BatchNorm, norm_kwargs=None, **kwargs): super(MobileNet, self).__init__(**kwargs) with self.name_scope(): self.features = nn.HybridSequential(prefix='') with self.features.name_scope(): _add_conv(self.features, channels=int(32 * multiplier), kernel=3, pad=1, stride=2, norm_layer=norm_layer, norm_kwargs=norm_kwargs) dw_channels = [int(x * multiplier) for x in [32, 64] + [128] * 2 + [256] * 2 + [512] * 6 + [1024]] channels = [int(x * multiplier) for x in [64] + [128] * 2 + [256] * 2 + [512] * 6 + [1024] * 2] strides = [1, 2] * 3 + [1] * 5 + [2, 1] for dwc, c, s in zip(dw_channels, channels, strides): _add_conv_dw(self.features, dw_channels=dwc, channels=c, stride=s, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.features.add(nn.GlobalAvgPool2D()) self.features.add(nn.Flatten()) self.output = nn.Dense(classes)
Example #6
Source File: alexnet.py From gluon-cv with Apache License 2.0 | 6 votes |
def __init__(self, classes=1000, **kwargs): super(AlexNet, self).__init__(**kwargs) with self.name_scope(): self.features = nn.HybridSequential(prefix='') with self.features.name_scope(): self.features.add(nn.Conv2D(64, kernel_size=11, strides=4, padding=2, activation='relu')) self.features.add(nn.MaxPool2D(pool_size=3, strides=2)) self.features.add(nn.Conv2D(192, kernel_size=5, padding=2, activation='relu')) self.features.add(nn.MaxPool2D(pool_size=3, strides=2)) self.features.add(nn.Conv2D(384, kernel_size=3, padding=1, activation='relu')) self.features.add(nn.Conv2D(256, kernel_size=3, padding=1, activation='relu')) self.features.add(nn.Conv2D(256, kernel_size=3, padding=1, activation='relu')) self.features.add(nn.MaxPool2D(pool_size=3, strides=2)) self.features.add(nn.Flatten()) self.features.add(nn.Dense(4096, activation='relu')) self.features.add(nn.Dropout(0.5)) self.features.add(nn.Dense(4096, activation='relu')) self.features.add(nn.Dropout(0.5)) self.output = nn.Dense(classes)
Example #7
Source File: resnet.py From gluon-cv with Apache License 2.0 | 6 votes |
def __init__(self, depth, ctx, pretrained=True, num_classes=0): super(ResNet, self).__init__() self.pretrained = pretrained with self.name_scope(): network = ResNet.__factory[depth](pretrained=pretrained, ctx=ctx).features[0:-1] network[-1][0].body[0]._kwargs['stride'] = (1, 1) network[-1][0].downsample[0]._kwargs['stride'] = (1, 1) self.base = nn.HybridSequential() for n in network: self.base.add(n) self.avgpool = nn.GlobalAvgPool2D() self.flatten = nn.Flatten() self.bn = nn.BatchNorm(center=False, scale=True) self.bn.initialize(init=init.Zero(), ctx=ctx) self.classifier = nn.Dense(num_classes, use_bias=False) self.classifier.initialize(init=init.Normal(0.001), ctx=ctx)
Example #8
Source File: cifarwideresnet.py From gluon-cv with Apache License 2.0 | 6 votes |
def __init__(self, block, layers, channels, drop_rate, classes=10, norm_layer=BatchNorm, norm_kwargs=None, **kwargs): super(CIFARWideResNet, self).__init__(**kwargs) assert len(layers) == len(channels) - 1 with self.name_scope(): self.features = nn.HybridSequential(prefix='') self.features.add(norm_layer(scale=False, center=False, **({} if norm_kwargs is None else norm_kwargs))) self.features.add(nn.Conv2D(channels[0], 3, 1, 1, use_bias=False)) self.features.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs))) in_channels = channels[0] for i, num_layer in enumerate(layers): stride = 1 if i == 0 else 2 self.features.add(self._make_layer(block, num_layer, channels[i+1], drop_rate, stride, i+1, in_channels=in_channels, norm_layer=norm_layer, norm_kwargs=norm_kwargs)) in_channels = channels[i+1] self.features.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs))) self.features.add(nn.Activation('relu')) self.features.add(nn.GlobalAvgPool2D()) self.features.add(nn.Flatten()) self.output = nn.Dense(classes)
Example #9
Source File: fmobilenetv2.py From insightface with MIT License | 5 votes |
def __init__(self, num_classes=1000, width_mult=1.0, **kwargs): super(MobilenetV2, self).__init__(**kwargs) self.w = width_mult self.cn = [int(x*self.w) for x in [32, 16, 24, 32, 64, 96, 160, 320]] def InvertedResidualSequence(t, cn_id, n, s): seq = nn.HybridSequential() seq.add(InvertedResidual(t, self.cn[cn_id-1], self.cn[cn_id], s, same_shape=False)) for _ in range(n-1): seq.add(InvertedResidual(t, self.cn[cn_id-1], self.cn[cn_id], 1)) return seq self.b0 = ConvBlock(self.cn[0], 3, 1) self.b1 = InvertedResidualSequence(1, 1, 1, 1) self.b2 = InvertedResidualSequence(6, 2, 2, 2) self.b3 = InvertedResidualSequence(6, 3, 3, 2) self.b4 = InvertedResidualSequence(6, 4, 4, 1) self.b5 = InvertedResidualSequence(6, 5, 3, 2) self.b6 = InvertedResidualSequence(6, 6, 3, 2) self.b7 = InvertedResidualSequence(6, 7, 1, 1) self.last_channels = int(1280*self.w) if self.w > 1.0 else 1280 with self.name_scope(): self.features = nn.HybridSequential() with self.features.name_scope(): self.features.add(self.b0, self.b1, self.b2, self.b3, self.b4, self.b5, self.b6, self.b7) self.features.add(Conv1x1(self.last_channels)) #self.features.add(nn.GlobalAvgPool2D()) #self.features.add(nn.Flatten()) #self.output = nn.Dense(num_classes)
Example #10
Source File: test_gluon.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def test_flatten(): flatten = nn.Flatten() x = mx.nd.zeros((3,4,5,6)) assert flatten(x).shape == (3, 4*5*6) x = mx.nd.zeros((3,6)) assert flatten(x).shape == (3, 6) x = mx.nd.zeros((3,)) assert flatten(x).shape == (3, 1)
Example #11
Source File: fmobilenetv2.py From insightocr with MIT License | 5 votes |
def __init__(self, num_classes=1000, width_mult=1.0, **kwargs): super(MobilenetV2, self).__init__(**kwargs) self.w = width_mult self.cn = [int(x*self.w) for x in [32, 16, 24, 32, 64, 96, 160, 320]] def InvertedResidualSequence(t, cn_id, n, s): seq = nn.HybridSequential() seq.add(InvertedResidual(t, self.cn[cn_id-1], self.cn[cn_id], s, same_shape=False)) for _ in range(n-1): seq.add(InvertedResidual(t, self.cn[cn_id-1], self.cn[cn_id], 1)) return seq self.b0 = ConvBlock(self.cn[0], 3, 1) self.b1 = InvertedResidualSequence(1, 1, 1, 1) self.b2 = InvertedResidualSequence(6, 2, 2, 2) self.b3 = InvertedResidualSequence(6, 3, 3, 2) self.b4 = InvertedResidualSequence(6, 4, 4, 1) self.b5 = InvertedResidualSequence(6, 5, 3, 2) self.b6 = InvertedResidualSequence(6, 6, 3, 2) self.b7 = InvertedResidualSequence(6, 7, 1, 1) self.last_channels = int(1280*self.w) if self.w > 1.0 else 1280 with self.name_scope(): self.features = nn.HybridSequential() with self.features.name_scope(): self.features.add(self.b0, self.b1, self.b2, self.b3, self.b4, self.b5, self.b6, self.b7) self.features.add(Conv1x1(self.last_channels)) #self.features.add(nn.GlobalAvgPool2D()) #self.features.add(nn.Flatten()) #self.output = nn.Dense(num_classes)
Example #12
Source File: fmnasnet.py From insightface with MIT License | 5 votes |
def __init__(self, m=1.0, **kwargs): super(MNasNet, self).__init__(**kwargs) self.first_oup = int(32*m) self.second_oup = int(16*m) #self.second_oup = int(32*m) self.interverted_residual_setting = [ # t, c, n, s, k [3, int(24*m), 3, 2, 3, "stage2_"], # -> 56x56 [3, int(40*m), 3, 2, 5, "stage3_"], # -> 28x28 [6, int(80*m), 3, 2, 5, "stage4_1_"], # -> 14x14 [6, int(96*m), 2, 1, 3, "stage4_2_"], # -> 14x14 [6, int(192*m), 4, 2, 5, "stage5_1_"], # -> 7x7 [6, int(320*m), 1, 1, 3, "stage5_2_"], # -> 7x7 ] self.last_channels = int(1024*m) with self.name_scope(): self.features = nn.HybridSequential() self.features.add(ConvBlock(self.first_oup, 3, 1, prefix="stage1_conv0_")) self.features.add(SepCONV(self.first_oup, self.second_oup, 3, prefix="stage1_sepconv0_")) inp = self.second_oup for i, (t, c, n, s, k, prefix) in enumerate(self.interverted_residual_setting): oup = c self.features.add(ExpandedConvSequence(t, k, inp, oup, n, s, prefix=prefix)) inp = oup self.features.add(Conv1x1(self.last_channels, prefix="stage5_3_")) #self.features.add(nn.GlobalAvgPool2D()) #self.features.add(nn.Flatten()) #self.output = nn.Dense(num_classes)
Example #13
Source File: ror_cifar.py From imgclsmob with MIT License | 5 votes |
def __init__(self, channels, init_block_channels, bn_use_global_stats=False, dropout_rate=0.0, in_channels=3, in_size=(32, 32), classes=10, **kwargs): super(CIFARRoR, self).__init__(**kwargs) self.in_size = in_size self.classes = classes with self.name_scope(): self.features = nn.HybridSequential(prefix="") self.features.add(conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, bn_use_global_stats=bn_use_global_stats)) in_channels = init_block_channels self.features.add(RoRResBody( in_channels=in_channels, out_channels_lists=channels, bn_use_global_stats=bn_use_global_stats, dropout_rate=dropout_rate)) in_channels = channels[-1][-1] self.features.add(nn.AvgPool2D( pool_size=8, strides=1)) self.output = nn.HybridSequential(prefix="") self.output.add(nn.Flatten()) self.output.add(nn.Dense( units=classes, in_units=in_channels))
Example #14
Source File: densenet.py From gluon-cv with Apache License 2.0 | 5 votes |
def __init__(self, num_init_features, growth_rate, block_config, bn_size=4, dropout=0, classes=1000, norm_layer=BatchNorm, norm_kwargs=None, **kwargs): super(DenseNet, self).__init__(**kwargs) with self.name_scope(): self.features = nn.HybridSequential(prefix='') self.features.add(nn.Conv2D(num_init_features, kernel_size=7, strides=2, padding=3, use_bias=False)) self.features.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs))) self.features.add(nn.Activation('relu')) self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1)) # Add dense blocks num_features = num_init_features for i, num_layers in enumerate(block_config): self.features.add(_make_dense_block( num_layers, bn_size, growth_rate, dropout, i+1, norm_layer, norm_kwargs)) num_features = num_features + num_layers * growth_rate if i != len(block_config) - 1: self.features.add(_make_transition(num_features // 2, norm_layer, norm_kwargs)) num_features = num_features // 2 self.features.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs))) self.features.add(nn.Activation('relu')) self.features.add(nn.AvgPool2D(pool_size=7)) self.features.add(nn.Flatten()) self.output = nn.Dense(classes)
Example #15
Source File: squeezenet.py From gluon-cv with Apache License 2.0 | 5 votes |
def __init__(self, version, classes=1000, **kwargs): super(SqueezeNet, self).__init__(**kwargs) assert version in ['1.0', '1.1'], ("Unsupported SqueezeNet version {version}:" "1.0 or 1.1 expected".format(version=version)) with self.name_scope(): self.features = nn.HybridSequential(prefix='') if version == '1.0': self.features.add(nn.Conv2D(96, kernel_size=7, strides=2)) self.features.add(nn.Activation('relu')) self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True)) self.features.add(_make_fire(16, 64, 64)) self.features.add(_make_fire(16, 64, 64)) self.features.add(_make_fire(32, 128, 128)) self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True)) self.features.add(_make_fire(32, 128, 128)) self.features.add(_make_fire(48, 192, 192)) self.features.add(_make_fire(48, 192, 192)) self.features.add(_make_fire(64, 256, 256)) self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True)) self.features.add(_make_fire(64, 256, 256)) else: self.features.add(nn.Conv2D(64, kernel_size=3, strides=2)) self.features.add(nn.Activation('relu')) self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True)) self.features.add(_make_fire(16, 64, 64)) self.features.add(_make_fire(16, 64, 64)) self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True)) self.features.add(_make_fire(32, 128, 128)) self.features.add(_make_fire(32, 128, 128)) self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True)) self.features.add(_make_fire(48, 192, 192)) self.features.add(_make_fire(48, 192, 192)) self.features.add(_make_fire(64, 256, 256)) self.features.add(_make_fire(64, 256, 256)) self.features.add(nn.Dropout(0.5)) self.output = nn.HybridSequential(prefix='') self.output.add(nn.Conv2D(classes, kernel_size=1)) self.output.add(nn.Activation('relu')) self.output.add(nn.AvgPool2D(13)) self.output.add(nn.Flatten())
Example #16
Source File: xception.py From imgclsmob with MIT License | 5 votes |
def __init__(self, channels, bn_use_global_stats=False, in_channels=3, in_size=(299, 299), classes=1000, **kwargs): super(Xception, self).__init__(**kwargs) self.in_size = in_size self.classes = classes with self.name_scope(): self.features = nn.HybridSequential(prefix="") self.features.add(XceptionInitBlock( in_channels=in_channels, bn_use_global_stats=bn_use_global_stats)) in_channels = 64 for i, channels_per_stage in enumerate(channels): stage = nn.HybridSequential(prefix="stage{}_".format(i + 1)) with stage.name_scope(): for j, out_channels in enumerate(channels_per_stage): stage.add(XceptionUnit( in_channels=in_channels, out_channels=out_channels, strides=(2 if (j == 0) else 1), reps=(2 if (j == 0) else 3), start_with_relu=((i != 0) or (j != 0)), grow_first=((i != len(channels) - 1) or (j != len(channels_per_stage) - 1)), bn_use_global_stats=bn_use_global_stats)) in_channels = out_channels self.features.add(stage) self.features.add(XceptionFinalBlock(bn_use_global_stats=bn_use_global_stats)) self.output = nn.HybridSequential(prefix="") self.output.add(nn.Flatten()) self.output.add(nn.Dense( units=classes, in_units=2048))
Example #17
Source File: train_srgan.py From gluon-cv with Apache License 2.0 | 5 votes |
def __init__(self): super(SRDiscriminator,self).__init__() self.model = nn.HybridSequential() self.res_block = nn.HybridSequential() df_dim = 64 with self.name_scope(): self.model.add( nn.Conv2D(df_dim, 4, 2,1), nn.LeakyReLU(0.2) ) for i in [2,4,8,16,32]: self.model.add(ConvBlock(df_dim * i )) self.model.add(ConvBlock(df_dim * 16,1,1,padding=0)) self.model.add( nn.Conv2D(df_dim * 8, 1, 1,use_bias=False), nn.BatchNorm() ) self.res_block.add( ConvBlock(df_dim * 2, 1,1), ConvBlock(df_dim * 2, 3, 1), nn.Conv2D(df_dim * 8, 3, 1,use_bias=False), nn.BatchNorm() ) self.lrelu = nn.LeakyReLU(0.2) self.flatten = nn.Flatten() self.dense = nn.Dense(1)
Example #18
Source File: test_gluon.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def test_hybrid_stale_cache(): net = mx.gluon.nn.HybridSequential() with net.name_scope(): net.add(mx.gluon.nn.Dense(10, weight_initializer='zeros', bias_initializer='ones', flatten=False)) net.hybridize() net.initialize() net(mx.nd.ones((2,3,5))) net.add(mx.gluon.nn.Flatten()) assert net(mx.nd.ones((2,3,5))).shape == (2, 30) net = mx.gluon.nn.HybridSequential() with net.name_scope(): net.fc1 = mx.gluon.nn.Dense(10, weight_initializer='zeros', bias_initializer='ones', flatten=False) net.fc2 = mx.gluon.nn.Dense(10, weight_initializer='zeros', bias_initializer='ones', flatten=False) net.hybridize() net.initialize() net(mx.nd.ones((2,3,5))) net.fc2 = mx.gluon.nn.Dense(10, weight_initializer='zeros', bias_initializer='ones', flatten=True) net.initialize() assert net(mx.nd.ones((2,3,5))).shape == (2, 10)
Example #19
Source File: resnet.py From gluon-cv with Apache License 2.0 | 5 votes |
def __init__(self, block, layers, channels, classes=1000, thumbnail=False, last_gamma=False, use_se=False, norm_layer=BatchNorm, norm_kwargs=None, **kwargs): super(ResNetV2, self).__init__(**kwargs) assert len(layers) == len(channels) - 1 with self.name_scope(): self.features = nn.HybridSequential(prefix='') self.features.add(norm_layer(scale=False, center=False, **({} if norm_kwargs is None else norm_kwargs))) if thumbnail: self.features.add(_conv3x3(channels[0], 1, 0)) else: self.features.add(nn.Conv2D(channels[0], 7, 2, 3, use_bias=False)) self.features.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs))) self.features.add(nn.Activation('relu')) self.features.add(nn.MaxPool2D(3, 2, 1)) in_channels = channels[0] for i, num_layer in enumerate(layers): stride = 1 if i == 0 else 2 self.features.add(self._make_layer(block, num_layer, channels[i+1], stride, i+1, in_channels=in_channels, last_gamma=last_gamma, use_se=use_se, norm_layer=norm_layer, norm_kwargs=norm_kwargs)) in_channels = channels[i+1] self.features.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs))) self.features.add(nn.Activation('relu')) self.features.add(nn.GlobalAvgPool2D()) self.features.add(nn.Flatten()) self.output = nn.Dense(classes, in_units=in_channels)
Example #20
Source File: googlenet.py From gluon-cv with Apache License 2.0 | 5 votes |
def _make_aux(in_channels, classes, norm_layer, norm_kwargs): out = nn.HybridSequential(prefix='') out.add(nn.AvgPool2D(pool_size=5, strides=3)) out.add(_make_basic_conv(in_channels=in_channels, channels=128, kernel_size=1, norm_layer=norm_layer, norm_kwargs=norm_kwargs)) out.add(nn.Flatten()) out.add(nn.Dense(units=1024, in_units=2048)) out.add(nn.Activation('relu')) out.add(nn.Dropout(0.7)) out.add(nn.Dense(units=classes, in_units=1024)) return out
Example #21
Source File: inception.py From gluon-cv with Apache License 2.0 | 5 votes |
def make_aux(classes, norm_layer, norm_kwargs): out = nn.HybridSequential(prefix='') out.add(nn.AvgPool2D(pool_size=5, strides=3)) out.add(_make_basic_conv(channels=128, kernel_size=1, norm_layer=norm_layer, norm_kwargs=norm_kwargs)) out.add(_make_basic_conv(channels=768, kernel_size=5, norm_layer=norm_layer, norm_kwargs=norm_kwargs)) out.add(nn.Flatten()) out.add(nn.Dense(classes)) return out # Net
Example #22
Source File: gluon_batchsize_test.py From InsightFace_TF with MIT License | 5 votes |
def inference(): net = gluon.nn.Sequential() with net.name_scope(): net.add(nn.Conv2D(channels=64, kernel_size=3, padding=1)) net.add(nn.BatchNorm(axis=1, center=True, scale=True)) # net.add(mx.sym.LeakyReLU(data=net, act_type='prelu', name='prelu1')) net.add(nn.Conv2D(channels=64, kernel_size=3, padding=1)) net.add(nn.BatchNorm(axis=1, center=True, scale=True)) net.add(nn.Conv2D(channels=64, kernel_size=3, padding=1, strides=2)) net.add(nn.BatchNorm(axis=1, center=True, scale=True)) net.add(nn.Conv2D(channels=128, kernel_size=3, padding=1)) net.add(nn.BatchNorm(axis=1, center=True, scale=True)) net.add(nn.Conv2D(channels=128, kernel_size=3, padding=1)) net.add(nn.BatchNorm(axis=1, center=True, scale=True)) net.add(nn.Conv2D(channels=128, kernel_size=3, padding=1, strides=2)) net.add(nn.BatchNorm(axis=1, center=True, scale=True)) net.add(nn.Conv2D(channels=256, kernel_size=3, padding=1)) net.add(nn.BatchNorm(axis=1, center=True, scale=True)) net.add(nn.Conv2D(channels=256, kernel_size=3, padding=1)) net.add(nn.BatchNorm(axis=1, center=True, scale=True)) net.add(nn.Conv2D(channels=256, kernel_size=3, padding=1, strides=2)) net.add(nn.BatchNorm(axis=1, center=True, scale=True)) net.add(nn.Flatten()) net.add(nn.Dense(10)) return net
Example #23
Source File: shufflenet.py From imgclsmob with MIT License | 5 votes |
def __init__(self, channels, init_block_channels, groups, in_channels=3, in_size=(224, 224), classes=1000, **kwargs): super(ShuffleNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes with self.name_scope(): self.features = nn.HybridSequential(prefix="") self.features.add(ShuffleInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.HybridSequential(prefix="stage{}_".format(i + 1)) with stage.name_scope(): for j, out_channels in enumerate(channels_per_stage): downsample = (j == 0) ignore_group = (i == 0) and (j == 0) stage.add(ShuffleUnit( in_channels=in_channels, out_channels=out_channels, groups=groups, downsample=downsample, ignore_group=ignore_group)) in_channels = out_channels self.features.add(stage) self.features.add(nn.AvgPool2D( pool_size=7, strides=1)) self.output = nn.HybridSequential(prefix="") self.output.add(nn.Flatten()) self.output.add(nn.Dense( units=classes, in_units=in_channels))
Example #24
Source File: vgg.py From imgclsmob with MIT License | 5 votes |
def __init__(self, channels, use_bias=True, use_bn=False, bn_use_global_stats=False, in_channels=3, in_size=(224, 224), classes=1000, **kwargs): super(VGG, self).__init__(**kwargs) self.in_size = in_size self.classes = classes with self.name_scope(): self.features = nn.HybridSequential(prefix="") for i, channels_per_stage in enumerate(channels): stage = nn.HybridSequential(prefix="stage{}_".format(i + 1)) with stage.name_scope(): for j, out_channels in enumerate(channels_per_stage): stage.add(conv3x3_block( in_channels=in_channels, out_channels=out_channels, use_bias=use_bias, use_bn=use_bn, bn_use_global_stats=bn_use_global_stats)) in_channels = out_channels stage.add(nn.MaxPool2D( pool_size=2, strides=2, padding=0)) self.features.add(stage) self.output = nn.HybridSequential(prefix="") self.output.add(nn.Flatten()) in_channels = in_channels * 7 * 7 self.output.add(VGGOutputBlock( in_channels=in_channels, classes=classes))
Example #25
Source File: wrn.py From imgclsmob with MIT License | 5 votes |
def __init__(self, channels, init_block_channels, width_factor, in_channels=3, in_size=(224, 224), classes=1000, **kwargs): super(WRN, self).__init__(**kwargs) self.in_size = in_size self.classes = classes with self.name_scope(): self.features = nn.HybridSequential(prefix="") self.features.add(WRNInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.HybridSequential(prefix="stage{}_".format(i + 1)) with stage.name_scope(): for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 stage.add(WRNUnit( in_channels=in_channels, out_channels=out_channels, strides=strides, width_factor=width_factor)) in_channels = out_channels self.features.add(stage) self.features.add(nn.AvgPool2D( pool_size=7, strides=1)) self.output = nn.HybridSequential(prefix="") self.output.add(nn.Flatten()) self.output.add(nn.Dense( units=classes, in_units=in_channels))
Example #26
Source File: sknet.py From imgclsmob with MIT License | 5 votes |
def __init__(self, channels, init_block_channels, bn_use_global_stats=False, in_channels=3, in_size=(224, 224), classes=1000, **kwargs): super(SKNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes with self.name_scope(): self.features = nn.HybridSequential(prefix="") self.features.add(ResInitBlock( in_channels=in_channels, out_channels=init_block_channels, bn_use_global_stats=bn_use_global_stats)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.HybridSequential(prefix="stage{}_".format(i + 1)) with stage.name_scope(): for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 stage.add(SKNetUnit( in_channels=in_channels, out_channels=out_channels, strides=strides, bn_use_global_stats=bn_use_global_stats)) in_channels = out_channels self.features.add(stage) self.features.add(nn.AvgPool2D( pool_size=7, strides=1)) self.output = nn.HybridSequential(prefix="") self.output.add(nn.Flatten()) self.output.add(nn.Dense( units=classes, in_units=in_channels))
Example #27
Source File: preresnet.py From imgclsmob with MIT License | 4 votes |
def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, bn_use_global_stats=False, in_channels=3, in_size=(224, 224), classes=1000, **kwargs): super(PreResNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes with self.name_scope(): self.features = nn.HybridSequential(prefix="") self.features.add(PreResInitBlock( in_channels=in_channels, out_channels=init_block_channels, bn_use_global_stats=bn_use_global_stats)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.HybridSequential(prefix="stage{}_".format(i + 1)) with stage.name_scope(): for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 stage.add(PreResUnit( in_channels=in_channels, out_channels=out_channels, strides=strides, bn_use_global_stats=bn_use_global_stats, bottleneck=bottleneck, conv1_stride=conv1_stride)) in_channels = out_channels self.features.add(stage) self.features.add(PreResActivation( in_channels=in_channels, bn_use_global_stats=bn_use_global_stats)) self.features.add(nn.AvgPool2D( pool_size=7, strides=1)) self.output = nn.HybridSequential(prefix="") self.output.add(nn.Flatten()) self.output.add(nn.Dense( units=classes, in_units=in_channels))
Example #28
Source File: diaresnet.py From imgclsmob with MIT License | 4 votes |
def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, bn_use_global_stats=False, in_channels=3, in_size=(224, 224), classes=1000, **kwargs): super(DIAResNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes with self.name_scope(): self.features = nn.HybridSequential(prefix="") self.features.add(ResInitBlock( in_channels=in_channels, out_channels=init_block_channels, bn_use_global_stats=bn_use_global_stats)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = DualPathSequential( return_two=False, prefix="stage{}_".format(i + 1)) attention = DIAAttention( in_x_features=channels_per_stage[0], in_h_features=channels_per_stage[0]) with stage.name_scope(): for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 stage.add(DIAResUnit( in_channels=in_channels, out_channels=out_channels, strides=strides, bn_use_global_stats=bn_use_global_stats, bottleneck=bottleneck, conv1_stride=conv1_stride, attention=attention)) in_channels = out_channels self.features.add(stage) self.features.add(nn.AvgPool2D( pool_size=7, strides=1)) self.output = nn.HybridSequential(prefix="") self.output.add(nn.Flatten()) self.output.add(nn.Dense( units=classes, in_units=in_channels))
Example #29
Source File: airnext.py From imgclsmob with MIT License | 4 votes |
def __init__(self, channels, init_block_channels, cardinality, bottleneck_width, ratio, bn_use_global_stats=False, in_channels=3, in_size=(224, 224), classes=1000, **kwargs): super(AirNeXt, self).__init__(**kwargs) self.in_size = in_size self.classes = classes with self.name_scope(): self.features = nn.HybridSequential(prefix="") self.features.add(AirInitBlock( in_channels=in_channels, out_channels=init_block_channels, bn_use_global_stats=bn_use_global_stats)) in_channels = init_block_channels in_size = tuple([x // 4 for x in in_size]) for i, channels_per_stage in enumerate(channels): stage = nn.HybridSequential(prefix="stage{}_".format(i + 1)) with stage.name_scope(): for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 stage.add(AirNeXtUnit( in_channels=in_channels, out_channels=out_channels, strides=strides, cardinality=cardinality, bottleneck_width=bottleneck_width, bn_use_global_stats=bn_use_global_stats, ratio=ratio, in_size=in_size)) in_channels = out_channels in_size = tuple([x // strides for x in in_size]) self.features.add(stage) self.features.add(nn.AvgPool2D( pool_size=7, strides=1)) self.output = nn.HybridSequential(prefix="") self.output.add(nn.Flatten()) self.output.add(nn.Dense( units=classes, in_units=in_channels))
Example #30
Source File: resattnet.py From imgclsmob with MIT License | 4 votes |
def __init__(self, channels, init_block_channels, attentions, att_scales, bn_use_global_stats=False, in_channels=3, in_size=(224, 224), classes=1000, **kwargs): super(ResAttNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes with self.name_scope(): self.features = nn.HybridSequential(prefix="") self.features.add(ResAttInitBlock( in_channels=in_channels, out_channels=init_block_channels, bn_use_global_stats=bn_use_global_stats)) in_channels = init_block_channels in_size = tuple([x // 4 for x in in_size]) for i, channels_per_stage in enumerate(channels): hourglass_depth = len(channels) - 1 - i stage = nn.HybridSequential(prefix="stage{}_".format(i + 1)) with stage.name_scope(): for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 if attentions[i][j]: stage.add(AttBlock( in_channels=in_channels, out_channels=out_channels, hourglass_depth=hourglass_depth, att_scales=att_scales, in_size=in_size, bn_use_global_stats=bn_use_global_stats)) else: stage.add(ResBlock( in_channels=in_channels, out_channels=out_channels, strides=strides, bn_use_global_stats=bn_use_global_stats)) in_channels = out_channels in_size = tuple([x // strides for x in in_size]) self.features.add(stage) self.features.add(PreActivation( in_channels=in_channels, bn_use_global_stats=bn_use_global_stats)) self.features.add(nn.AvgPool2D( pool_size=7, strides=1)) self.output = nn.HybridSequential(prefix="") self.output.add(nn.Flatten()) self.output.add(nn.Dense( units=classes, in_units=in_channels))