Python mxnet.gluon.nn.MaxPool2D() Examples
The following are 30
code examples of mxnet.gluon.nn.MaxPool2D().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
mxnet.gluon.nn
, or try the search function
.
Example #1
Source File: dpn.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, kernel_size, padding, **kwargs): super(DPNInitBlock, self).__init__(**kwargs) with self.name_scope(): self.conv = nn.Conv2D( channels=out_channels, kernel_size=kernel_size, strides=2, padding=padding, use_bias=False, in_channels=in_channels) self.bn = dpn_batch_norm(channels=out_channels) self.activ = nn.Activation("relu") self.pool = nn.MaxPool2D( pool_size=3, strides=2, padding=1)
Example #2
Source File: shufflenet.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, **kwargs): super(ShuffleInitBlock, self).__init__(**kwargs) with self.name_scope(): self.conv = conv3x3( in_channels=in_channels, out_channels=out_channels, strides=2) self.bn = nn.BatchNorm(in_channels=out_channels) self.activ = nn.Activation("relu") self.pool = nn.MaxPool2D( pool_size=3, strides=2, padding=1)
Example #3
Source File: fnasnet.py From insightocr with MIT License | 6 votes |
def __init__(self): super(CellStem0, self).__init__() self.conv_1x1 = nn.HybridSequential() self.conv_1x1.add(nn.Activation(activation='relu')) self.conv_1x1.add(nn.Conv2D(42, 1, strides=1, use_bias=False)) self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1)) self.comb_iter_0_left = BranchSeparables(42, 42, 5, 2, 2) self.comb_iter_0_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False) self.comb_iter_1_left = nn.MaxPool2D(pool_size=3, strides=2, padding=1) self.comb_iter_1_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False) self.comb_iter_2_left = nn.AvgPool2D(pool_size=3, strides=2, padding=1) self.comb_iter_2_right = BranchSeparablesStem(96, 42, 5, 2, 2, bias=False) self.comb_iter_3_right = nn.AvgPool2D(pool_size=3, strides=1, padding=1) self.comb_iter_4_left = BranchSeparables(42, 42, 3, 1, 1, bias=False) self.comb_iter_4_right = nn.MaxPool2D(pool_size=3, strides=2, padding=1)
Example #4
Source File: fnasnet.py From insightocr with MIT License | 6 votes |
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right): super(ReductionCell1, self).__init__() self.conv_prev_1x1 = nn.HybridSequential() self.conv_prev_1x1.add(nn.Activation(activation='relu')) self.conv_prev_1x1.add(nn.Conv2D(channels=out_channels_left, kernel_size=1, strides=1, use_bias=False)) self.conv_prev_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1)) self.conv_1x1 = nn.HybridSequential() self.conv_1x1.add(nn.Activation(activation='relu')) self.conv_1x1.add(nn.Conv2D(channels=out_channels_right, kernel_size=1, strides=1, use_bias=False)) self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1)) self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False) self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False) self.comb_iter_1_left = nn.MaxPool2D(3, strides=2, padding=1) self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False) self.comb_iter_2_left = nn.AvgPool2D(3, strides=2, padding=1) self.comb_iter_2_right = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False) self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1) self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False) self.comb_iter_4_right = nn.MaxPool2D(3, strides=2, padding=1)
Example #5
Source File: darts.py From imgclsmob with MIT License | 6 votes |
def darts_maxpool3x3(channels, strides): """ DARTS specific 3x3 Max pooling layer. Parameters: ---------- channels : int Number of input/output channels. Unused parameter. strides : int or tuple/list of 2 int Strides of the convolution. """ assert (channels > 0) return nn.MaxPool2D( pool_size=3, strides=strides, padding=1)
Example #6
Source File: fractalnet_cifar.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, num_columns, loc_drop_prob, dropout_prob, **kwargs): super(FractalUnit, self).__init__(**kwargs) with self.name_scope(): self.block = FractalBlock( in_channels=in_channels, out_channels=out_channels, num_columns=num_columns, loc_drop_prob=loc_drop_prob, dropout_prob=dropout_prob) self.pool = nn.MaxPool2D( pool_size=2, strides=2)
Example #7
Source File: utils_final.py From InsightFace_TF with MIT License | 6 votes |
def resnet18(num_classes): net = nn.HybridSequential() with net.name_scope(): net.add( nn.BatchNorm(), nn.Conv2D(64, kernel_size=3, strides=1), nn.MaxPool2D(pool_size=3, strides=2), Residual(64), Residual(64), Residual(128, same_shape=False), Residual(128), Residual(256, same_shape=False), Residual(256), nn.GlobalAvgPool2D(), nn.Dense(num_classes) ) return net
Example #8
Source File: alexnet.py From gluon-cv with Apache License 2.0 | 6 votes |
def __init__(self, classes=1000, **kwargs): super(AlexNet, self).__init__(**kwargs) with self.name_scope(): self.features = nn.HybridSequential(prefix='') with self.features.name_scope(): self.features.add(nn.Conv2D(64, kernel_size=11, strides=4, padding=2, activation='relu')) self.features.add(nn.MaxPool2D(pool_size=3, strides=2)) self.features.add(nn.Conv2D(192, kernel_size=5, padding=2, activation='relu')) self.features.add(nn.MaxPool2D(pool_size=3, strides=2)) self.features.add(nn.Conv2D(384, kernel_size=3, padding=1, activation='relu')) self.features.add(nn.Conv2D(256, kernel_size=3, padding=1, activation='relu')) self.features.add(nn.Conv2D(256, kernel_size=3, padding=1, activation='relu')) self.features.add(nn.MaxPool2D(pool_size=3, strides=2)) self.features.add(nn.Flatten()) self.features.add(nn.Dense(4096, activation='relu')) self.features.add(nn.Dropout(0.5)) self.features.add(nn.Dense(4096, activation='relu')) self.features.add(nn.Dropout(0.5)) self.output = nn.Dense(classes)
Example #9
Source File: squeezenext.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, **kwargs): super(SqnxtInitBlock, self).__init__(**kwargs) with self.name_scope(): self.conv = conv7x7_block( in_channels=in_channels, out_channels=out_channels, strides=2, padding=1, use_bias=True) self.pool = nn.MaxPool2D( pool_size=3, strides=2, ceil_mode=True)
Example #10
Source File: pyramidnet.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, bn_use_global_stats, **kwargs): super(PyrInitBlock, self).__init__(**kwargs) with self.name_scope(): self.conv = nn.Conv2D( channels=out_channels, kernel_size=7, strides=2, padding=3, use_bias=False, in_channels=in_channels) self.bn = nn.BatchNorm( in_channels=out_channels, use_global_stats=bn_use_global_stats) self.activ = nn.Activation("relu") self.pool = nn.MaxPool2D( pool_size=3, strides=2, padding=1)
Example #11
Source File: diracnetv2.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, **kwargs): super(DiracInitBlock, self).__init__(**kwargs) with self.name_scope(): self.conv = nn.Conv2D( channels=out_channels, kernel_size=7, strides=2, padding=3, use_bias=True, in_channels=in_channels) self.pool = nn.MaxPool2D( pool_size=3, strides=2, padding=1)
Example #12
Source File: se_resnet.py From gluon-cv with Apache License 2.0 | 6 votes |
def __init__(self, block, layers, channels, classes=1000, thumbnail=False, norm_layer=BatchNorm, norm_kwargs=None, **kwargs): super(SE_ResNetV1, self).__init__(**kwargs) assert len(layers) == len(channels) - 1 with self.name_scope(): self.features = nn.HybridSequential(prefix='') if thumbnail: self.features.add(_conv3x3(channels[0], 1, 0)) else: self.features.add(nn.Conv2D(channels[0], 7, 2, 3, use_bias=False)) self.features.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs))) self.features.add(nn.Activation('relu')) self.features.add(nn.MaxPool2D(3, 2, 1)) for i, num_layer in enumerate(layers): stride = 1 if i == 0 else 2 self.features.add(self._make_layer(block, num_layer, channels[i+1], stride, i+1, in_channels=channels[i], norm_layer=norm_layer, norm_kwargs=norm_kwargs)) self.features.add(nn.GlobalAvgPool2D()) self.output = nn.Dense(classes, in_units=channels[-1])
Example #13
Source File: fnasnet.py From insightface with MIT License | 6 votes |
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right): super(ReductionCell1, self).__init__() self.conv_prev_1x1 = nn.HybridSequential() self.conv_prev_1x1.add(nn.Activation(activation='relu')) self.conv_prev_1x1.add(nn.Conv2D(channels=out_channels_left, kernel_size=1, strides=1, use_bias=False)) self.conv_prev_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1)) self.conv_1x1 = nn.HybridSequential() self.conv_1x1.add(nn.Activation(activation='relu')) self.conv_1x1.add(nn.Conv2D(channels=out_channels_right, kernel_size=1, strides=1, use_bias=False)) self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1)) self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False) self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False) self.comb_iter_1_left = nn.MaxPool2D(3, strides=2, padding=1) self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False) self.comb_iter_2_left = nn.AvgPool2D(3, strides=2, padding=1) self.comb_iter_2_right = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False) self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1) self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False) self.comb_iter_4_right = nn.MaxPool2D(3, strides=2, padding=1)
Example #14
Source File: resnet.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, bn_use_global_stats=False, bn_cudnn_off=False, **kwargs): super(ResInitBlock, self).__init__(**kwargs) with self.name_scope(): self.conv = conv7x7_block( in_channels=in_channels, out_channels=out_channels, strides=2, bn_use_global_stats=bn_use_global_stats, bn_cudnn_off=bn_cudnn_off) self.pool = nn.MaxPool2D( pool_size=3, strides=2, padding=1)
Example #15
Source File: resattnet.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, length, bn_use_global_stats, **kwargs): super(DownAttBlock, self).__init__(**kwargs) with self.name_scope(): self.pool = nn.MaxPool2D( pool_size=3, strides=2, padding=1) self.res_blocks = ResBlockSequence( in_channels=in_channels, out_channels=out_channels, length=length, bn_use_global_stats=bn_use_global_stats)
Example #16
Source File: preresnet.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, bn_use_global_stats, **kwargs): super(PreResInitBlock, self).__init__(**kwargs) with self.name_scope(): self.conv = nn.Conv2D( channels=out_channels, kernel_size=7, strides=2, padding=3, use_bias=False, in_channels=in_channels) self.bn = nn.BatchNorm( in_channels=out_channels, use_global_stats=bn_use_global_stats) self.activ = nn.Activation("relu") self.pool = nn.MaxPool2D( pool_size=3, strides=2, padding=1)
Example #17
Source File: residual_attentionnet.py From gluon-cv with Apache License 2.0 | 6 votes |
def __init__(self, channels, size1=14, scale=(1, 2, 1), norm_layer=BatchNorm, norm_kwargs=None, **kwargs): super(AttentionModule_stage3, self).__init__(**kwargs) p, t, r = scale with self.name_scope(): self.first_residual_blocks = nn.HybridSequential() _add_block(self.first_residual_blocks, ResidualBlock, p, channels, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.trunk_branches = nn.HybridSequential() _add_block(self.trunk_branches, ResidualBlock, t, channels, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.mpool1 = nn.MaxPool2D(pool_size=3, strides=2, padding=1) self.softmax1_blocks = nn.HybridSequential() _add_block(self.softmax1_blocks, ResidualBlock, 2 * r, channels, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.interpolation1 = UpsamplingBilinear2d(size=size1) self.softmax2_blocks = nn.HybridSequential() _add_sigmoid_layer(self.softmax2_blocks, channels, norm_layer, norm_kwargs) self.last_blocks = ResidualBlock(channels)
Example #18
Source File: fishnet.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels_list, bn_use_global_stats, **kwargs): super(DownUnit, self).__init__(**kwargs) with self.name_scope(): self.blocks = nn.HybridSequential(prefix="") for i, out_channels in enumerate(out_channels_list): self.blocks.add(FishBlock( in_channels=in_channels, out_channels=out_channels, bn_use_global_stats=bn_use_global_stats)) in_channels = out_channels self.pool = nn.MaxPool2D( pool_size=2, strides=2)
Example #19
Source File: fdensenet.py From insightface with MIT License | 6 votes |
def __init__(self, num_init_features, growth_rate, block_config, bn_size=4, dropout=0, classes=1000, **kwargs): super(DenseNet, self).__init__(**kwargs) with self.name_scope(): self.features = nn.HybridSequential(prefix='') self.features.add(nn.Conv2D(num_init_features, kernel_size=3, strides=1, padding=1, use_bias=False)) self.features.add(nn.BatchNorm()) self.features.add(nn.Activation('relu')) self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1)) # Add dense blocks num_features = num_init_features for i, num_layers in enumerate(block_config): self.features.add(_make_dense_block(num_layers, bn_size, growth_rate, dropout, i+1)) num_features = num_features + num_layers * growth_rate if i != len(block_config) - 1: self.features.add(_make_transition(num_features // 2)) num_features = num_features // 2 self.features.add(nn.BatchNorm()) self.features.add(nn.Activation('relu')) #self.features.add(nn.AvgPool2D(pool_size=7)) #self.features.add(nn.Flatten()) #self.output = nn.Dense(classes)
Example #20
Source File: dla.py From gluon-cv with Apache License 2.0 | 6 votes |
def _make_level(self, block, inplanes, planes, blocks, norm_layer, norm_kwargs, stride=1): downsample = None if stride != 1 or inplanes != planes: downsample = nn.HybridSequential() downsample.add(*[ nn.MaxPool2D(stride, strides=stride), nn.Conv2D(channels=planes, in_channels=inplanes, kernel_size=1, strides=1, use_bias=False), norm_layer(in_channels=planes, **norm_kwargs)]) layers = [] layers.append(block(inplanes, planes, stride, norm_layer=norm_layer, norm_kwargs=norm_kwargs, downsample=downsample)) for _ in range(1, blocks): layers.append(block(inplanes, planes, norm_layer=norm_layer, norm_kwargs=norm_kwargs)) curr_level = nn.HybridSequential() curr_level.add(*layers) return curr_level
Example #21
Source File: resnet.py From gluon-cv with Apache License 2.0 | 6 votes |
def __init__(self, block, layers, channels, classes=1000, thumbnail=False, last_gamma=False, use_se=False, norm_layer=BatchNorm, norm_kwargs=None, **kwargs): super(ResNetV1, self).__init__(**kwargs) assert len(layers) == len(channels) - 1 with self.name_scope(): self.features = nn.HybridSequential(prefix='') if thumbnail: self.features.add(_conv3x3(channels[0], 1, 0)) else: self.features.add(nn.Conv2D(channels[0], 7, 2, 3, use_bias=False)) self.features.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs))) self.features.add(nn.Activation('relu')) self.features.add(nn.MaxPool2D(3, 2, 1)) for i, num_layer in enumerate(layers): stride = 1 if i == 0 else 2 self.features.add(self._make_layer(block, num_layer, channels[i+1], stride, i+1, in_channels=channels[i], last_gamma=last_gamma, use_se=use_se, norm_layer=norm_layer, norm_kwargs=norm_kwargs)) self.features.add(nn.GlobalAvgPool2D()) self.output = nn.Dense(classes, in_units=channels[-1])
Example #22
Source File: fnasnet.py From insightface with MIT License | 6 votes |
def __init__(self): super(CellStem0, self).__init__() self.conv_1x1 = nn.HybridSequential() self.conv_1x1.add(nn.Activation(activation='relu')) self.conv_1x1.add(nn.Conv2D(42, 1, strides=1, use_bias=False)) self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1)) self.comb_iter_0_left = BranchSeparables(42, 42, 5, 2, 2) self.comb_iter_0_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False) self.comb_iter_1_left = nn.MaxPool2D(pool_size=3, strides=2, padding=1) self.comb_iter_1_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False) self.comb_iter_2_left = nn.AvgPool2D(pool_size=3, strides=2, padding=1) self.comb_iter_2_right = BranchSeparablesStem(96, 42, 5, 2, 2, bias=False) self.comb_iter_3_right = nn.AvgPool2D(pool_size=3, strides=1, padding=1) self.comb_iter_4_left = BranchSeparables(42, 42, 3, 1, 1, bias=False) self.comb_iter_4_right = nn.MaxPool2D(pool_size=3, strides=2, padding=1)
Example #23
Source File: polynet.py From imgclsmob with MIT License | 5 votes |
def __init__(self, **kwargs): super(MaxPoolBranch, self).__init__(**kwargs) with self.name_scope(): self.pool = nn.MaxPool2D( pool_size=3, strides=2, padding=0)
Example #24
Source File: nasnet.py From gluon-cv with Apache License 2.0 | 5 votes |
def __init__(self, out_channels_left, out_channels_right, norm_layer, norm_kwargs): super(ReductionCell1, self).__init__() self.conv_prev_1x1 = nn.HybridSequential(prefix='') self.conv_prev_1x1.add(nn.Activation('relu')) self.conv_prev_1x1.add(nn.Conv2D(out_channels_left, 1, strides=1, use_bias=False)) self.conv_prev_1x1.add(norm_layer(momentum=0.1, epsilon=0.001, **({} if norm_kwargs is None else norm_kwargs))) self.conv_1x1 = nn.HybridSequential(prefix='') self.conv_1x1.add(nn.Activation('relu')) self.conv_1x1.add(nn.Conv2D(out_channels_right, 1, strides=1, use_bias=False)) self.conv_1x1.add(norm_layer(momentum=0.1, epsilon=0.001, **({} if norm_kwargs is None else norm_kwargs))) self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, norm_layer, norm_kwargs) self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, norm_layer, norm_kwargs) self.comb_iter_1_left = nn.MaxPool2D(3, strides=2, padding=1) self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, norm_layer, norm_kwargs) self.comb_iter_2_left = nn.AvgPool2D(3, strides=2, padding=1, count_include_pad=False) self.comb_iter_2_right = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, norm_layer, norm_kwargs) self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1, count_include_pad=False) self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, norm_layer, norm_kwargs) self.comb_iter_4_right = nn.MaxPool2D(3, strides=2, padding=1)
Example #25
Source File: shufflenetv2b.py From imgclsmob with MIT License | 5 votes |
def __init__(self, in_channels, out_channels, **kwargs): super(ShuffleInitBlock, self).__init__(**kwargs) with self.name_scope(): self.conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, strides=2) self.pool = nn.MaxPool2D( pool_size=3, strides=2, padding=1, ceil_mode=False)
Example #26
Source File: pnasnet.py From imgclsmob with MIT License | 5 votes |
def __init__(self, strides=2, extra_padding=False, **kwargs): super(PnasMaxPoolBlock, self).__init__(**kwargs) self.extra_padding = extra_padding with self.name_scope(): self.pool = nn.MaxPool2D( pool_size=3, strides=strides, padding=1)
Example #27
Source File: vgg.py From d-SNE with Apache License 2.0 | 5 votes |
def _make_features(self, layers, filters, batch_norm): featurizer = nn.HybridSequential(prefix='') for i, num in enumerate(layers): for _ in range(num): featurizer.add(nn.Conv2D(filters[i], kernel_size=3, padding=1, weight_initializer=Xavier(rnd_type='gaussian', factor_type='out', magnitude=2), bias_initializer='zeros')) if batch_norm: featurizer.add(nn.BatchNorm()) featurizer.add(nn.Activation('relu')) featurizer.add(nn.MaxPool2D(strides=2)) return featurizer
Example #28
Source File: vovnet.py From imgclsmob with MIT License | 5 votes |
def __init__(self, in_channels, out_channels, branch_channels, num_branches, resize, use_residual, bn_use_global_stats=False, **kwargs): super(VoVUnit, self).__init__(**kwargs) self.resize = resize self.use_residual = use_residual with self.name_scope(): if self.resize: self.pool = nn.MaxPool2D( pool_size=3, strides=2, ceil_mode=True) self.branches = SequentialConcurrent(prefix="") with self.branches.name_scope(): branch_in_channels = in_channels for i in range(num_branches): self.branches.add(conv3x3_block( in_channels=branch_in_channels, out_channels=branch_channels, bn_use_global_stats=bn_use_global_stats)) branch_in_channels = branch_channels self.concat_conv = conv1x1_block( in_channels=(in_channels + num_branches * branch_channels), out_channels=out_channels, bn_use_global_stats=bn_use_global_stats)
Example #29
Source File: nasnet.py From gluon-cv with Apache License 2.0 | 5 votes |
def __init__(self, stem_filters, norm_layer, norm_kwargs, num_filters=42): super(CellStem0, self).__init__() self.conv_1x1 = nn.HybridSequential(prefix='') self.conv_1x1.add(nn.Activation('relu')) self.conv_1x1.add(nn.Conv2D(num_filters, 1, strides=1, use_bias=False)) self.conv_1x1.add(norm_layer(momentum=0.1, epsilon=0.001, **({} if norm_kwargs is None else norm_kwargs))) self.comb_iter_0_left = BranchSeparables(num_filters, num_filters, 5, 2, 2, norm_layer, norm_kwargs) self.comb_iter_0_right = BranchSeparablesStem(stem_filters, num_filters, 7, 2, 3, norm_layer, norm_kwargs) self.comb_iter_1_left = nn.MaxPool2D(3, strides=2, padding=1) self.comb_iter_1_right = BranchSeparablesStem(stem_filters, num_filters, 7, 2, 3, norm_layer, norm_kwargs) self.comb_iter_2_left = nn.AvgPool2D(3, strides=2, padding=1, count_include_pad=False) self.comb_iter_2_right = BranchSeparablesStem(stem_filters, num_filters, 5, 2, 2, norm_layer, norm_kwargs) self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1, count_include_pad=False) self.comb_iter_4_left = BranchSeparables(num_filters, num_filters, 3, 1, 1, norm_layer, norm_kwargs) self.comb_iter_4_right = nn.MaxPool2D(3, strides=2, padding=1)
Example #30
Source File: dla.py From imgclsmob with MIT License | 5 votes |
def __init__(self, in_channels, out_channels, strides, bn_use_global_stats, body_class=ResBlock, return_down=False, **kwargs): super(DLAResBlock, self).__init__(**kwargs) self.return_down = return_down self.downsample = (strides > 1) self.project = (in_channels != out_channels) with self.name_scope(): self.body = body_class( in_channels=in_channels, out_channels=out_channels, strides=strides, bn_use_global_stats=bn_use_global_stats) self.activ = nn.Activation("relu") if self.downsample: self.downsample_pool = nn.MaxPool2D( pool_size=strides, strides=strides) if self.project: self.project_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bn_use_global_stats=bn_use_global_stats, activation=None)