Python mxnet.gluon.nn.Activation() Examples
The following are 30
code examples of mxnet.gluon.nn.Activation().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
mxnet.gluon.nn
, or try the search function
.
Example #1
Source File: net.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 7 votes |
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=InstanceNorm): super(Bottleneck, self).__init__() self.expansion = 4 self.downsample = downsample if self.downsample is not None: self.residual_layer = nn.Conv2D(in_channels=inplanes, channels=planes * self.expansion, kernel_size=1, strides=(stride, stride)) self.conv_block = nn.Sequential() with self.conv_block.name_scope(): self.conv_block.add(norm_layer(in_channels=inplanes)) self.conv_block.add(nn.Activation('relu')) self.conv_block.add(nn.Conv2D(in_channels=inplanes, channels=planes, kernel_size=1)) self.conv_block.add(norm_layer(in_channels=planes)) self.conv_block.add(nn.Activation('relu')) self.conv_block.add(ConvLayer(planes, planes, kernel_size=3, stride=stride)) self.conv_block.add(norm_layer(in_channels=planes)) self.conv_block.add(nn.Activation('relu')) self.conv_block.add(nn.Conv2D(in_channels=planes, channels=planes * self.expansion, kernel_size=1))
Example #2
Source File: fnasnet.py From insightface with MIT License | 6 votes |
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right): super(NormalCell, self).__init__() self.conv_prev_1x1 = nn.HybridSequential() self.conv_prev_1x1.add(nn.Activation(activation='relu')) self.conv_prev_1x1.add(nn.Conv2D(channels=out_channels_left, kernel_size=1, strides=1, use_bias=False)) self.conv_prev_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1)) self.conv_1x1 = nn.HybridSequential() self.conv_1x1.add(nn.Activation(activation='relu')) self.conv_1x1.add(nn.Conv2D(channels=out_channels_right, kernel_size=1, strides=1, use_bias=False)) self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1)) self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False) self.comb_iter_0_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1, bias=False) self.comb_iter_1_left = BranchSeparables(out_channels_left, out_channels_left, 5, 1, 2, bias=False) self.comb_iter_1_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1, bias=False) self.comb_iter_2_left = nn.AvgPool2D(3, strides=1, padding=1) self.comb_iter_3_left = nn.AvgPool2D(3, strides=1, padding=1) self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1) self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
Example #3
Source File: fnasnet.py From insightocr with MIT License | 6 votes |
def __init__(self): super(CellStem0, self).__init__() self.conv_1x1 = nn.HybridSequential() self.conv_1x1.add(nn.Activation(activation='relu')) self.conv_1x1.add(nn.Conv2D(42, 1, strides=1, use_bias=False)) self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1)) self.comb_iter_0_left = BranchSeparables(42, 42, 5, 2, 2) self.comb_iter_0_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False) self.comb_iter_1_left = nn.MaxPool2D(pool_size=3, strides=2, padding=1) self.comb_iter_1_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False) self.comb_iter_2_left = nn.AvgPool2D(pool_size=3, strides=2, padding=1) self.comb_iter_2_right = BranchSeparablesStem(96, 42, 5, 2, 2, bias=False) self.comb_iter_3_right = nn.AvgPool2D(pool_size=3, strides=1, padding=1) self.comb_iter_4_left = BranchSeparables(42, 42, 3, 1, 1, bias=False) self.comb_iter_4_right = nn.MaxPool2D(pool_size=3, strides=2, padding=1)
Example #4
Source File: deeplabv3b_plus.py From gluon-cv with Apache License 2.0 | 6 votes |
def __init__(self, nclass, c1_channels=128, norm_layer=nn.BatchNorm, norm_kwargs=None, height=240, width=240, **kwargs): super(_DeepLabHead, self).__init__() self._up_kwargs = {'height': height, 'width': width} with self.name_scope(): self.aspp = _ASPP(in_channels=4096, atrous_rates=[12, 24, 36], norm_layer=norm_layer, norm_kwargs=norm_kwargs, height=height//4, width=width//4, **kwargs) self.c1_block = nn.HybridSequential(prefix='bot_fine_') self.c1_block.add(nn.Conv2D(in_channels=c1_channels, channels=48, kernel_size=1, use_bias=False)) self.block = nn.HybridSequential(prefix='final_') self.block.add(nn.Conv2D(in_channels=304, channels=256, kernel_size=3, padding=1, use_bias=False)) self.block.add(norm_layer(in_channels=256, **({} if norm_kwargs is None else norm_kwargs))) self.block.add(nn.Activation('relu')) self.block.add(nn.Conv2D(in_channels=256, channels=256, kernel_size=3, padding=1, use_bias=False)) self.block.add(norm_layer(in_channels=256, **({} if norm_kwargs is None else norm_kwargs))) self.block.add(nn.Activation('relu')) self.block.add(nn.Conv2D(in_channels=256, channels=nclass, kernel_size=1, use_bias=False))
Example #5
Source File: test_gluon.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def test_slice_activation_slice_activation(): class Net(gluon.HybridBlock): def __init__(self, act0, act1, slice, **kwargs): super(Net, self).__init__(**kwargs) with self.name_scope(): self.slice = slice self.act0 = nn.Activation(act0) self.act1 = nn.Activation(act1) def hybrid_forward(self, F, x): x_slice = x.slice(begin=self.slice[0][0], end=self.slice[0][1]) y = self.act0(x_slice) y_slice = y.slice(begin=self.slice[1][0], end=self.slice[1][1]) out = self.act1(y_slice) return out acts = ["relu", "sigmoid", "tanh", "softrelu"] for idx0, act0 in enumerate(acts): for idx1, act1 in enumerate(acts): if idx1 == idx0: continue x = mx.nd.random.uniform(-1, 1, shape=(8, 32, 64, 64)) slice = [[(0, 16, 32, 32), (4, 32, 64, 64)], [(2, 0, 16, 16), (4, 16, 32, 32)]] net = Net(act0, act1, slice) check_layer_forward_withinput(net, x)
Example #6
Source File: test_gluon.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def test_reshape_activation_reshape_activation(): class Net(gluon.HybridBlock): def __init__(self, act0, act1, shape, **kwargs): super(Net, self).__init__(**kwargs) with self.name_scope(): self.reshape = shape self.act0 = nn.Activation(act0) self.act1 = nn.Activation(act1) def hybrid_forward(self, F, x): x_reshape = x.reshape(self.reshape[0]) y = self.act0(x_reshape) y_reshape = y.reshape(self.reshape[1]) out = self.act1(y_reshape) return out acts = ["relu", "sigmoid", "tanh", "softrelu"] for idx0, act0 in enumerate(acts): for idx1, act1 in enumerate(acts): if idx1 == idx0: continue x = mx.nd.random.uniform(-1, 1, shape=(4, 16, 32, 32)) shape = [(4, 32, 32, -1), (4, 32, 16, -1)] net = Net(act0, act1, shape) check_layer_forward_withinput(net, x)
Example #7
Source File: test_gluon.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def test_slice_activation(): class Net(gluon.HybridBlock): def __init__(self, act, slice, **kwargs): super(Net, self).__init__(**kwargs) with self.name_scope(): self.slice = slice self.act = nn.Activation(act) def hybrid_forward(self, F, x): x_slice = x.slice(begin=self.slice[0], end=self.slice[1]) out = self.act(x_slice) return out acts = ["relu", "sigmoid", "tanh", "softrelu"] for act in acts: x = mx.nd.random.uniform(-1, 1, shape=(8, 32, 64, 64)) slice = [(0, 16, 32, 32), (4, 32, 64, 64)] net = Net(act, slice) check_layer_forward_withinput(net, x)
Example #8
Source File: test_gluon.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def test_reshape_activation(): class Net(gluon.HybridBlock): def __init__(self, act, shape, **kwargs): super(Net, self).__init__(**kwargs) with self.name_scope(): self.reshape = shape self.act = nn.Activation(act) def hybrid_forward(self, F, x): x_reshape = x.reshape(self.reshape) out = self.act(x_reshape) return out acts = ["relu", "sigmoid", "tanh", "softrelu"] for act in acts: x = mx.nd.random.uniform(-1, 1, shape=(4, 16, 32, 32)) shape = (4, 32, 32, -1) net = Net(act, shape) check_layer_forward_withinput(net, x)
Example #9
Source File: test_gluon.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def test_basic(): model = nn.Sequential() model.add(nn.Dense(128, activation='tanh', in_units=10, flatten=False)) model.add(nn.Dropout(0.5)) model.add(nn.Dense(64, activation='tanh', in_units=256), nn.Dense(32, in_units=64)) model.add(nn.Activation('relu')) # symbol x = mx.sym.var('data') y = model(x) assert len(y.list_arguments()) == 7 # ndarray model.collect_params().initialize(mx.init.Xavier(magnitude=2.24)) x = model(mx.nd.zeros((32, 2, 10))) assert x.shape == (32, 32) x.wait_to_read() model.collect_params().setattr('grad_req', 'null') assert list(model.collect_params().values())[0]._grad is None model.collect_params().setattr('grad_req', 'write') assert list(model.collect_params().values())[0]._grad is not None
Example #10
Source File: net.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def forward(self, X): h = F.Activation(self.conv1_1(X), act_type='relu') h = F.Activation(self.conv1_2(h), act_type='relu') relu1_2 = h h = F.Pooling(h, pool_type='max', kernel=(2, 2), stride=(2, 2)) h = F.Activation(self.conv2_1(h), act_type='relu') h = F.Activation(self.conv2_2(h), act_type='relu') relu2_2 = h h = F.Pooling(h, pool_type='max', kernel=(2, 2), stride=(2, 2)) h = F.Activation(self.conv3_1(h), act_type='relu') h = F.Activation(self.conv3_2(h), act_type='relu') h = F.Activation(self.conv3_3(h), act_type='relu') relu3_3 = h h = F.Pooling(h, pool_type='max', kernel=(2, 2), stride=(2, 2)) h = F.Activation(self.conv4_1(h), act_type='relu') h = F.Activation(self.conv4_2(h), act_type='relu') h = F.Activation(self.conv4_3(h), act_type='relu') relu4_3 = h return [relu1_2, relu2_2, relu3_3, relu4_3]
Example #11
Source File: net.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def __init__(self, inplanes, planes, stride=2, norm_layer=InstanceNorm): super(UpBottleneck, self).__init__() self.expansion = 4 self.residual_layer = UpsampleConvLayer(inplanes, planes * self.expansion, kernel_size=1, stride=1, upsample=stride) self.conv_block = nn.Sequential() with self.conv_block.name_scope(): self.conv_block.add(norm_layer(in_channels=inplanes)) self.conv_block.add(nn.Activation('relu')) self.conv_block.add(nn.Conv2D(in_channels=inplanes, channels=planes, kernel_size=1)) self.conv_block.add(norm_layer(in_channels=planes)) self.conv_block.add(nn.Activation('relu')) self.conv_block.add(UpsampleConvLayer(planes, planes, kernel_size=3, stride=1, upsample=stride)) self.conv_block.add(norm_layer(in_channels=planes)) self.conv_block.add(nn.Activation('relu')) self.conv_block.add(nn.Conv2D(in_channels=planes, channels=planes * self.expansion, kernel_size=1))
Example #12
Source File: resnext.py From gluon-cv with Apache License 2.0 | 6 votes |
def hybrid_forward(self, F, x): residual = x x = self.body(x) if self.se: w = F.contrib.AdaptiveAvgPooling2D(x, output_size=1) w = self.se(w) x = F.broadcast_mul(x, w) if self.downsample: residual = self.downsample(residual) x = F.Activation(x + residual, act_type='relu') return x # Nets
Example #13
Source File: fnasnet.py From insightocr with MIT License | 6 votes |
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right): super(ReductionCell0, self).__init__() self.conv_prev_1x1 = nn.HybridSequential() self.conv_prev_1x1.add(nn.Activation(activation='relu')) self.conv_prev_1x1.add(nn.Conv2D(channels=out_channels_left, kernel_size=1, strides=1, use_bias=False)) self.conv_prev_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1)) self.conv_1x1 = nn.HybridSequential() self.conv_1x1.add(nn.Activation(activation='relu')) self.conv_1x1.add(nn.Conv2D(channels=out_channels_right, kernel_size=1, strides=1, use_bias=False)) self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1)) self.comb_iter_0_left = BranchSeparablesReduction(out_channels_right, out_channels_right, 5, 2, 2, bias=False) self.comb_iter_0_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 7, 2, 3, bias=False) self.comb_iter_1_left = MaxPoolPad() self.comb_iter_1_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 7, 2, 3, bias=False) self.comb_iter_2_left = AvgPoolPad() self.comb_iter_2_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 5, 2, 2, bias=False) self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1) self.comb_iter_4_left = BranchSeparablesReduction(out_channels_right, out_channels_right, 3, 1, 1, bias=False) self.comb_iter_4_right = MaxPoolPad()
Example #14
Source File: aspp_temp.py From Deep-Feature-Flow-Segmentation with MIT License | 6 votes |
def __init__(self, prefix, entry_block3_stride, use_global_stats, norm_layer): super(EntryFlow, self).__init__(prefix) with self.name_scope(): self.conv1 = nn.HybridSequential(prefix='conv1_') with self.conv1.name_scope(): self.conv1.add(nn.Conv2D(32, kernel_size=3, strides=2, padding=1, use_bias=False, prefix='1_')) self.conv1.add(norm_layer(in_channels=32, use_global_stats=use_global_stats, prefix='1_BN_')) self.conv1.add(nn.Activation("relu")) self.conv2 = nn.HybridSequential(prefix='conv1_') with self.conv2.name_scope(): self.conv2.add(nn.Conv2D(64, kernel_size=3, padding=1, use_bias=False, prefix='2_')) self.conv2.add(norm_layer(in_channels=64, use_global_stats=use_global_stats, prefix='2_BN_')) self.conv2.add(nn.Activation("relu")) self.conv3 = XceptionBlock(filters_list=[128, 128, 128], kernel_size=3, strides=2, use_global_stats=use_global_stats, norm_layer=norm_layer, dilation=1, depth_activation=False, in_filters=64, prefix='block1_') self.conv4 = XceptionBlock(filters_list=[256, 256, 256], kernel_size=3, strides=2, return_skip=True, use_global_stats=use_global_stats, norm_layer=norm_layer, dilation=1, depth_activation=False, in_filters=128, prefix='block2_') self.conv5 = XceptionBlock(filters_list=[728, 728, 728], kernel_size=3, strides=entry_block3_stride, use_shortcut_conv=True, dilation=1, depth_activation=False, in_filters=256, norm_layer=norm_layer, use_global_stats=use_global_stats, prefix='block3_')
Example #15
Source File: fnasnet.py From insightocr with MIT License | 6 votes |
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right): super(ReductionCell1, self).__init__() self.conv_prev_1x1 = nn.HybridSequential() self.conv_prev_1x1.add(nn.Activation(activation='relu')) self.conv_prev_1x1.add(nn.Conv2D(channels=out_channels_left, kernel_size=1, strides=1, use_bias=False)) self.conv_prev_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1)) self.conv_1x1 = nn.HybridSequential() self.conv_1x1.add(nn.Activation(activation='relu')) self.conv_1x1.add(nn.Conv2D(channels=out_channels_right, kernel_size=1, strides=1, use_bias=False)) self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1)) self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False) self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False) self.comb_iter_1_left = nn.MaxPool2D(3, strides=2, padding=1) self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False) self.comb_iter_2_left = nn.AvgPool2D(3, strides=2, padding=1) self.comb_iter_2_right = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False) self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1) self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False) self.comb_iter_4_right = nn.MaxPool2D(3, strides=2, padding=1)
Example #16
Source File: dla.py From gluon-cv with Apache License 2.0 | 6 votes |
def __init__(self, inplanes, planes, stride=1, dilation=1, norm_layer=BatchNorm, norm_kwargs=None, **kwargs): super(Bottleneck, self).__init__(**kwargs) if norm_kwargs is None: norm_kwargs = {} expansion = Bottleneck.expansion bottle_planes = planes // expansion with self.name_scope(): self.conv1 = nn.Conv2D(in_channels=inplanes, channels=bottle_planes, kernel_size=1, use_bias=False) self.bn1 = norm_layer(in_channels=bottle_planes, **norm_kwargs) self.conv2 = nn.Conv2D(in_channels=bottle_planes, channels=bottle_planes, kernel_size=3, strides=stride, padding=dilation, use_bias=False, dilation=dilation) self.bn2 = norm_layer(in_channels=bottle_planes, **norm_kwargs) self.conv3 = nn.Conv2D(in_channels=bottle_planes, channels=planes, kernel_size=1, use_bias=False) self.bn3 = norm_layer(**norm_kwargs) self.relu = nn.Activation('relu') self.stride = stride
Example #17
Source File: dla.py From gluon-cv with Apache License 2.0 | 6 votes |
def __init__(self, inplanes, planes, stride=1, dilation=1, norm_layer=BatchNorm, norm_kwargs=None, **kwargs): super(BottleneckX, self).__init__(**kwargs) if norm_kwargs is None: norm_kwargs = {} cardinality = BottleneckX.cardinality bottle_planes = planes * cardinality // 32 with self.name_scope(): self.conv1 = nn.Conv2D(in_channels=inplanes, channels=bottle_planes, kernel_size=1, use_bias=False) self.bn1 = norm_layer(in_channels=bottle_planes, **norm_kwargs) self.conv2 = nn.Conv2D(in_channels=bottle_planes, channels=bottle_planes, kernel_size=3, strides=stride, padding=dilation, use_bias=False, dilation=dilation, groups=cardinality) self.bn2 = norm_layer(in_channels=bottle_planes, **norm_kwargs) self.conv3 = nn.Conv2D(in_channels=bottle_planes, channels=planes, kernel_size=1, use_bias=False) self.bn3 = norm_layer(**norm_kwargs) self.relu = nn.Activation('relu') self.stride = stride
Example #18
Source File: fnasnet.py From insightface with MIT License | 6 votes |
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right): super(ReductionCell0, self).__init__() self.conv_prev_1x1 = nn.HybridSequential() self.conv_prev_1x1.add(nn.Activation(activation='relu')) self.conv_prev_1x1.add(nn.Conv2D(channels=out_channels_left, kernel_size=1, strides=1, use_bias=False)) self.conv_prev_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1)) self.conv_1x1 = nn.HybridSequential() self.conv_1x1.add(nn.Activation(activation='relu')) self.conv_1x1.add(nn.Conv2D(channels=out_channels_right, kernel_size=1, strides=1, use_bias=False)) self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1)) self.comb_iter_0_left = BranchSeparablesReduction(out_channels_right, out_channels_right, 5, 2, 2, bias=False) self.comb_iter_0_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 7, 2, 3, bias=False) self.comb_iter_1_left = MaxPoolPad() self.comb_iter_1_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 7, 2, 3, bias=False) self.comb_iter_2_left = AvgPoolPad() self.comb_iter_2_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 5, 2, 2, bias=False) self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1) self.comb_iter_4_left = BranchSeparablesReduction(out_channels_right, out_channels_right, 3, 1, 1, bias=False) self.comb_iter_4_right = MaxPoolPad()
Example #19
Source File: fnasnet.py From insightface with MIT License | 6 votes |
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right): super(ReductionCell1, self).__init__() self.conv_prev_1x1 = nn.HybridSequential() self.conv_prev_1x1.add(nn.Activation(activation='relu')) self.conv_prev_1x1.add(nn.Conv2D(channels=out_channels_left, kernel_size=1, strides=1, use_bias=False)) self.conv_prev_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1)) self.conv_1x1 = nn.HybridSequential() self.conv_1x1.add(nn.Activation(activation='relu')) self.conv_1x1.add(nn.Conv2D(channels=out_channels_right, kernel_size=1, strides=1, use_bias=False)) self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1)) self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False) self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False) self.comb_iter_1_left = nn.MaxPool2D(3, strides=2, padding=1) self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False) self.comb_iter_2_left = nn.AvgPool2D(3, strides=2, padding=1) self.comb_iter_2_right = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False) self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1) self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False) self.comb_iter_4_right = nn.MaxPool2D(3, strides=2, padding=1)
Example #20
Source File: fdensenet.py From insightface with MIT License | 6 votes |
def __init__(self, num_init_features, growth_rate, block_config, bn_size=4, dropout=0, classes=1000, **kwargs): super(DenseNet, self).__init__(**kwargs) with self.name_scope(): self.features = nn.HybridSequential(prefix='') self.features.add(nn.Conv2D(num_init_features, kernel_size=3, strides=1, padding=1, use_bias=False)) self.features.add(nn.BatchNorm()) self.features.add(nn.Activation('relu')) self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1)) # Add dense blocks num_features = num_init_features for i, num_layers in enumerate(block_config): self.features.add(_make_dense_block(num_layers, bn_size, growth_rate, dropout, i+1)) num_features = num_features + num_layers * growth_rate if i != len(block_config) - 1: self.features.add(_make_transition(num_features // 2)) num_features = num_features // 2 self.features.add(nn.BatchNorm()) self.features.add(nn.Activation('relu')) #self.features.add(nn.AvgPool2D(pool_size=7)) #self.features.add(nn.Flatten()) #self.output = nn.Dense(classes)
Example #21
Source File: dla.py From gluon-cv with Apache License 2.0 | 6 votes |
def __init__(self, inplanes, planes, stride=1, dilation=1, norm_layer=BatchNorm, norm_kwargs=None, **kwargs): super(BasicBlock, self).__init__(**kwargs) if norm_kwargs is None: norm_kwargs = {} with self.name_scope(): self.conv1 = nn.Conv2D(in_channels=inplanes, channels=planes, kernel_size=3, strides=stride, padding=dilation, use_bias=False, dilation=dilation) self.bn1 = norm_layer(in_channels=planes, **norm_kwargs) self.relu = nn.Activation('relu') self.conv2 = nn.Conv2D(in_channels=planes, channels=planes, kernel_size=3, strides=1, padding=dilation, use_bias=False, dilation=dilation) self.bn2 = norm_layer(in_channels=planes, **norm_kwargs) self.stride = stride
Example #22
Source File: fdensenet.py From insightface with MIT License | 6 votes |
def _make_dense_layer(growth_rate, bn_size, dropout): new_features = nn.HybridSequential(prefix='') new_features.add(nn.BatchNorm()) #new_features.add(nn.Activation('relu')) new_features.add(Act()) new_features.add(nn.Conv2D(bn_size * growth_rate, kernel_size=1, use_bias=False)) new_features.add(nn.BatchNorm()) #new_features.add(nn.Activation('relu')) new_features.add(Act()) new_features.add(nn.Conv2D(growth_rate, kernel_size=3, padding=1, use_bias=False)) if dropout: new_features.add(nn.Dropout(dropout)) out = gluon.contrib.nn.HybridConcurrent(axis=1, prefix='') out.add(gluon.contrib.nn.Identity()) out.add(new_features) return out
Example #23
Source File: fcn.py From gluon-cv with Apache License 2.0 | 6 votes |
def __init__(self, in_channels, channels, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs): super(_FCNHead, self).__init__() with self.name_scope(): self.block = nn.HybridSequential() inter_channels = in_channels // 4 with self.block.name_scope(): self.block.add(nn.Conv2D(in_channels=in_channels, channels=inter_channels, kernel_size=3, padding=1, use_bias=False)) self.block.add(norm_layer(in_channels=inter_channels, **({} if norm_kwargs is None else norm_kwargs))) self.block.add(nn.Activation('relu')) self.block.add(nn.Dropout(0.1)) self.block.add(nn.Conv2D(in_channels=inter_channels, channels=channels, kernel_size=1)) # pylint: disable=arguments-differ
Example #24
Source File: fastscnn.py From gluon-cv with Apache License 2.0 | 6 votes |
def __init__(self, dw_channels, out_channels, stride=1, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs): super(_DSConv, self).__init__() with self.name_scope(): self.conv = nn.HybridSequential() self.conv.add(nn.Conv2D(in_channels=dw_channels, channels=dw_channels, kernel_size=3, strides=stride, padding=1, groups=dw_channels, use_bias=False)) self.conv.add(norm_layer(in_channels=dw_channels, **({} if norm_kwargs is None else norm_kwargs))) self.conv.add(nn.Activation('relu')) self.conv.add(nn.Conv2D(in_channels=dw_channels, channels=out_channels, kernel_size=1, use_bias=False)) self.conv.add(norm_layer(in_channels=out_channels, **({} if norm_kwargs is None else norm_kwargs))) self.conv.add(nn.Activation('relu'))
Example #25
Source File: utils.py From dgl with Apache License 2.0 | 6 votes |
def get_activation(act): """Get the activation based on the act string Parameters ---------- act: str or HybridBlock Returns ------- ret: HybridBlock """ if act is None: return lambda x: x if isinstance(act, str): if act == 'leaky': return nn.LeakyReLU(0.1) elif act in ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']: return nn.Activation(act) else: raise NotImplementedError else: return act
Example #26
Source File: train_cgan.py From gluon-cv with Apache License 2.0 | 5 votes |
def __init__(self, ndf=64, n_layers=3, use_sigmoid=False): super(NLayerDiscriminator, self).__init__() self.model = nn.HybridSequential() kw = 4 padw = 1 with self.name_scope(): self.model.add( nn.Conv2D(ndf, kernel_size=kw, strides=2, padding=padw), nn.LeakyReLU(0.2), ) nf_mult = 1 for n in range(1, n_layers): nf_mult = min(2**n, 8) self.model.add( nn.Conv2D(ndf * nf_mult,kernel_size=kw, strides=2, padding=padw), nn.InstanceNorm(), nn.LeakyReLU(0.2), ) nf_mult = min(2**n_layers, 8) self.model.add( nn.Conv2D(ndf * nf_mult,kernel_size=kw, strides=1, padding=padw), nn.InstanceNorm(), nn.LeakyReLU(0.2), ) self.model.add( nn.Conv2D(1, kernel_size=kw, strides=1, padding=padw) ) if use_sigmoid: self.model.add(nn.Activation('sigmoid'))
Example #27
Source File: squeezenet.py From gluon-cv with Apache License 2.0 | 5 votes |
def __init__(self, version, classes=1000, **kwargs): super(SqueezeNet, self).__init__(**kwargs) assert version in ['1.0', '1.1'], ("Unsupported SqueezeNet version {version}:" "1.0 or 1.1 expected".format(version=version)) with self.name_scope(): self.features = nn.HybridSequential(prefix='') if version == '1.0': self.features.add(nn.Conv2D(96, kernel_size=7, strides=2)) self.features.add(nn.Activation('relu')) self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True)) self.features.add(_make_fire(16, 64, 64)) self.features.add(_make_fire(16, 64, 64)) self.features.add(_make_fire(32, 128, 128)) self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True)) self.features.add(_make_fire(32, 128, 128)) self.features.add(_make_fire(48, 192, 192)) self.features.add(_make_fire(48, 192, 192)) self.features.add(_make_fire(64, 256, 256)) self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True)) self.features.add(_make_fire(64, 256, 256)) else: self.features.add(nn.Conv2D(64, kernel_size=3, strides=2)) self.features.add(nn.Activation('relu')) self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True)) self.features.add(_make_fire(16, 64, 64)) self.features.add(_make_fire(16, 64, 64)) self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True)) self.features.add(_make_fire(32, 128, 128)) self.features.add(_make_fire(32, 128, 128)) self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True)) self.features.add(_make_fire(48, 192, 192)) self.features.add(_make_fire(48, 192, 192)) self.features.add(_make_fire(64, 256, 256)) self.features.add(_make_fire(64, 256, 256)) self.features.add(nn.Dropout(0.5)) self.output = nn.HybridSequential(prefix='') self.output.add(nn.Conv2D(classes, kernel_size=1)) self.output.add(nn.Activation('relu')) self.output.add(nn.AvgPool2D(13)) self.output.add(nn.Flatten())
Example #28
Source File: residual_attentionnet.py From gluon-cv with Apache License 2.0 | 5 votes |
def __init__(self, scale, m, classes=1000, norm_layer=BatchNorm, norm_kwargs=None, **kwargs): super(ResidualAttentionModel, self).__init__(**kwargs) assert len(scale) == 3 and len(m) == 3 m1, m2, m3 = m with self.name_scope(): self.conv1 = nn.HybridSequential() with self.conv1.name_scope(): self.conv1.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3, use_bias=False)) self.conv1.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs))) self.conv1.add(nn.Activation('relu')) self.mpool1 = nn.MaxPool2D(pool_size=3, strides=2, padding=1) self.residual_block1 = ResidualBlock(256, in_channels=64) self.attention_module1 = nn.HybridSequential() _add_block(self.attention_module1, AttentionModule_stage1, m1, 256, scale=scale, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.residual_block2 = ResidualBlock(512, in_channels=256, stride=2) self.attention_module2 = nn.HybridSequential() _add_block(self.attention_module2, AttentionModule_stage2, m2, 512, scale=scale, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.residual_block3 = ResidualBlock(1024, in_channels=512, stride=2) self.attention_module3 = nn.HybridSequential() _add_block(self.attention_module3, AttentionModule_stage3, m3, 1024, scale=scale, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.residual_block4 = ResidualBlock(2048, in_channels=1024, stride=2) self.residual_block5 = ResidualBlock(2048) self.residual_block6 = ResidualBlock(2048) self.mpool2 = nn.HybridSequential() with self.mpool2.name_scope(): self.mpool2.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs))) self.mpool2.add(nn.Activation('relu')) self.mpool2.add(nn.AvgPool2D(pool_size=7, strides=1)) self.fc = nn.Conv2D(classes, kernel_size=1)
Example #29
Source File: residual_attentionnet.py From gluon-cv with Apache License 2.0 | 5 votes |
def __init__(self, scale, m, classes=10, norm_layer=BatchNorm, norm_kwargs=None, **kwargs): super(cifar_ResidualAttentionModel, self).__init__(**kwargs) assert len(scale) == 3 and len(m) == 3 m1, m2, m3 = m with self.name_scope(): self.conv1 = nn.HybridSequential() with self.conv1.name_scope(): self.conv1.add(nn.Conv2D(32, kernel_size=3, strides=1, padding=1, use_bias=False)) self.conv1.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs))) self.conv1.add(nn.Activation('relu')) # 32 x 32 # self.mpool1 = nn.MaxPool2D(pool_size=2, strides=2, padding=0) self.residual_block1 = ResidualBlock(128, in_channels=32) self.attention_module1 = nn.HybridSequential() _add_block(self.attention_module1, AttentionModule_stage2, m1, 128, size1=32, size2=16, scale=scale, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.residual_block2 = ResidualBlock(256, in_channels=128, stride=2) self.attention_module2 = nn.HybridSequential() _add_block(self.attention_module2, AttentionModule_stage3, m2, 256, size1=16, scale=scale, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.residual_block3 = ResidualBlock(512, in_channels=256, stride=2) self.attention_module3 = nn.HybridSequential() _add_block(self.attention_module3, AttentionModule_stage4, m3, 512, scale=scale, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.residual_block4 = ResidualBlock(1024, in_channels=512) self.residual_block5 = ResidualBlock(1024) self.residual_block6 = ResidualBlock(1024) self.mpool2 = nn.HybridSequential() with self.mpool2.name_scope(): self.mpool2.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs))) self.mpool2.add(nn.Activation('relu')) self.mpool2.add(nn.AvgPool2D(pool_size=8, strides=1)) self.fc = nn.Conv2D(classes, kernel_size=1)
Example #30
Source File: p3d.py From gluon-cv with Apache License 2.0 | 5 votes |
def hybrid_forward(self, F, x): """Hybrid forward of a ResBlock in P3D.""" identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) if self.block_id < self.depth_3d: if self.design == 'A': out = self.P3DA(out) elif self.design == 'B': out = self.P3DB(out) elif self.design == 'C': out = self.P3DC(out) else: print('We do not support %s building block for P3D networks. \ Please try A, B or C.' % self.design) else: out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: identity = self.downsample(x) out = F.Activation(out + identity, act_type='relu') return out