Python mxnet.gluon.nn.Dropout() Examples

The following are 30 code examples of mxnet.gluon.nn.Dropout(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mxnet.gluon.nn , or try the search function .
Example #1
Source File: splat.py    From gluon-cv with Apache License 2.0 6 votes vote down vote up
def __init__(self, channels, kernel_size, strides=(1, 1), padding=(0, 0),
                 dilation=(1, 1), groups=1, radix=2, in_channels=None, r=2,
                 norm_layer=BatchNorm, norm_kwargs=None, drop_ratio=0,
                 *args, **kwargs):
        super(SplitAttentionConv, self).__init__()
        norm_kwargs = norm_kwargs if norm_kwargs is not None else {}
        inter_channels = max(in_channels*radix//2//r, 32)
        self.radix = radix
        self.cardinality = groups
        self.conv = Conv2D(channels*radix, kernel_size, strides, padding, dilation,
                           groups=groups*radix, *args, in_channels=in_channels, **kwargs)
        self.use_bn = norm_layer is not None
        if self.use_bn:
            self.bn = norm_layer(in_channels=channels*radix, **norm_kwargs)
        self.relu = Activation('relu')
        self.fc1 = Conv2D(inter_channels, 1, in_channels=channels, groups=self.cardinality)
        if self.use_bn:
            self.bn1 = norm_layer(in_channels=inter_channels, **norm_kwargs)
        self.relu1 = Activation('relu')
        if drop_ratio > 0:
            self.drop = nn.Dropout(drop_ratio)
        else:
            self.drop = None
        self.fc2 = Conv2D(channels*radix, 1, in_channels=inter_channels, groups=self.cardinality)
        self.channels = channels 
Example #2
Source File: model.py    From dgl with Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 rating_vals,
                 in_units,
                 num_basis_functions=2,
                 dropout_rate=0.0):
        super(BiDecoder, self).__init__()
        self.rating_vals = rating_vals
        self._num_basis_functions = num_basis_functions
        self.dropout = nn.Dropout(dropout_rate)
        self.Ps = []
        with self.name_scope():
            for i in range(num_basis_functions):
                self.Ps.append(self.params.get(
                    'Ps_%d' % i, shape=(in_units, in_units),
                    #init=mx.initializer.Orthogonal(scale=1.1, rand_type='normal'),
                    init=mx.initializer.Xavier(magnitude=math.sqrt(2.0)),
                    allow_deferred_init=True))
            self.rate_out = nn.Dense(units=len(rating_vals), flatten=False, use_bias=False) 
Example #3
Source File: fcn8sd.py    From imgclsmob with MIT License 6 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 bottleneck_factor=4,
                 **kwargs):
        super(FCNFinalBlock, self).__init__(**kwargs)
        assert (in_channels % bottleneck_factor == 0)
        mid_channels = in_channels // bottleneck_factor

        with self.name_scope():
            self.conv1 = conv3x3_block(
                in_channels=in_channels,
                out_channels=mid_channels)
            self.dropout = nn.Dropout(rate=0.1)
            self.conv2 = conv1x1(
                in_channels=mid_channels,
                out_channels=out_channels,
                use_bias=True) 
Example #4
Source File: sound_classifier.py    From AudioEmotion with MIT License 6 votes vote down vote up
def _build_custom_neural_network(num_inputs, num_labels):
        from mxnet.gluon import nn

        net = nn.Sequential(prefix='custom_')
        with net.name_scope():
            net.add(nn.Dense(512, in_units=num_inputs, activation='relu', prefix='dense0_'))
            net.add(nn.BatchNorm())
            net.add(nn.Dropout(0.5))
            net.add(nn.Dense(512, activation='relu', prefix='dense1_'))
            net.add(nn.BatchNorm())
            net.add(nn.Dropout(0.5))
            net.add(nn.Dense(256, activation='relu', prefix='dense2_'))
            net.add(nn.BatchNorm())
            net.add(nn.Dropout(0.5))
            net.add(nn.Dense(128, activation='relu', prefix='dense3_'))
            net.add(nn.BatchNorm())
            net.add(nn.Dropout(0.5))
            net.add(nn.Dense(64, activation='relu', prefix='dense4_'))
            net.add(nn.BatchNorm())
            net.add(nn.Dropout(0.5))
            net.add(nn.Dense(num_labels, prefix='dense5_'))
        return net 
Example #5
Source File: fdensenet.py    From insightface with MIT License 6 votes vote down vote up
def _make_dense_layer(growth_rate, bn_size, dropout):
    new_features = nn.HybridSequential(prefix='')
    new_features.add(nn.BatchNorm())
    #new_features.add(nn.Activation('relu'))
    new_features.add(Act())
    new_features.add(nn.Conv2D(bn_size * growth_rate, kernel_size=1, use_bias=False))
    new_features.add(nn.BatchNorm())
    #new_features.add(nn.Activation('relu'))
    new_features.add(Act())
    new_features.add(nn.Conv2D(growth_rate, kernel_size=3, padding=1, use_bias=False))
    if dropout:
        new_features.add(nn.Dropout(dropout))

    out = gluon.contrib.nn.HybridConcurrent(axis=1, prefix='')
    out.add(gluon.contrib.nn.Identity())
    out.add(new_features)

    return out 
Example #6
Source File: fcn.py    From panoptic-fpn-gluon with Apache License 2.0 6 votes vote down vote up
def __init__(self, in_channels, channels, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs):
        super(_FCNHead, self).__init__()
        with self.name_scope():
            self.block = nn.HybridSequential()
            inter_channels = in_channels // 4
            with self.block.name_scope():
                self.block.add(nn.Conv2D(in_channels=in_channels, channels=inter_channels,
                                         kernel_size=3, padding=1, use_bias=False))
                self.block.add(norm_layer(in_channels=inter_channels,
                                          **({} if norm_kwargs is None else norm_kwargs)))
                self.block.add(nn.Activation('relu'))
                self.block.add(nn.Dropout(0.1))
                self.block.add(nn.Conv2D(in_channels=inter_channels, channels=channels,
                                         kernel_size=1))

    # pylint: disable=arguments-differ 
Example #7
Source File: test_gluon.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_basic():
    model = nn.Sequential()
    model.add(nn.Dense(128, activation='tanh', in_units=10, flatten=False))
    model.add(nn.Dropout(0.5))
    model.add(nn.Dense(64, activation='tanh', in_units=256),
              nn.Dense(32, in_units=64))
    model.add(nn.Activation('relu'))

    # symbol
    x = mx.sym.var('data')
    y = model(x)
    assert len(y.list_arguments()) == 7

    # ndarray
    model.collect_params().initialize(mx.init.Xavier(magnitude=2.24))
    x = model(mx.nd.zeros((32, 2, 10)))
    assert x.shape == (32, 32)
    x.wait_to_read()

    model.collect_params().setattr('grad_req', 'null')
    assert list(model.collect_params().values())[0]._grad is None
    model.collect_params().setattr('grad_req', 'write')
    assert list(model.collect_params().values())[0]._grad is not None 
Example #8
Source File: test_exc_handling.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_exc_gluon():
    def gluon(exec_wait=True):
        model = nn.Sequential()
        model.add(nn.Dense(128, activation='tanh', in_units=10, flatten=False))
        model.add(nn.Dropout(1))
        model.add(nn.Dense(64, activation='tanh', in_units=256),
                  nn.Dense(32, in_units=64))
        x = mx.sym.var('data')
        y = model(x)
        model.collect_params().initialize(ctx=[default_context()])
        z = model(mx.nd.random.normal(10, -10, (32, 2, 10), ctx=default_context()))
        if exec_wait:
            z.wait_to_read()

    gluon(exec_wait=False)
    assert_raises(MXNetError, gluon, True) 
Example #9
Source File: vgg.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def __init__(self, layers, filters, classes=1000, batch_norm=False, **kwargs):
        super(VGG, self).__init__(**kwargs)
        assert len(layers) == len(filters)
        with self.name_scope():
            self.features = self._make_features(layers, filters, batch_norm)
            self.features.add(Dense(4096, activation='relu',
                                       weight_initializer='normal',
                                       bias_initializer='zeros'))
            self.features.add(Dropout(rate=0.5))
            self.features.add(Dense(4096, activation='relu',
                                       weight_initializer='normal',
                                       bias_initializer='zeros'))
            self.features.add(Dropout(rate=0.5))
            self.output = Dense(classes,
                                   weight_initializer='normal',
                                   bias_initializer='zeros') 
Example #10
Source File: densesageconv.py    From dgl with Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 in_feats,
                 out_feats,
                 feat_drop=0.,
                 bias=True,
                 norm=None,
                 activation=None):
        super(DenseSAGEConv, self).__init__()
        self._in_feats = in_feats
        self._out_feats = out_feats
        self._norm = norm
        with self.name_scope():
            self.feat_drop = nn.Dropout(feat_drop)
            self.activation = activation
            self.fc = nn.Dense(out_feats, in_units=in_feats, use_bias=bias,
                               weight_initializer=mx.init.Xavier(magnitude=math.sqrt(2.0))) 
Example #11
Source File: pspnet.py    From imgclsmob with MIT License 6 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 bottleneck_factor=4,
                 **kwargs):
        super(PSPFinalBlock, self).__init__(**kwargs)
        assert (in_channels % bottleneck_factor == 0)
        mid_channels = in_channels // bottleneck_factor

        with self.name_scope():
            self.conv1 = conv3x3_block(
                in_channels=in_channels,
                out_channels=mid_channels)
            self.dropout = nn.Dropout(rate=0.1)
            self.conv2 = conv1x1(
                in_channels=mid_channels,
                out_channels=out_channels,
                use_bias=True) 
Example #12
Source File: sparsenet.py    From imgclsmob with MIT License 6 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 bn_use_global_stats,
                 dropout_rate,
                 **kwargs):
        super(SparseBlock, self).__init__(**kwargs)
        self.use_dropout = (dropout_rate != 0.0)
        bn_size = 4
        mid_channels = out_channels * bn_size

        with self.name_scope():
            self.conv1 = pre_conv1x1_block(
                in_channels=in_channels,
                out_channels=mid_channels,
                bn_use_global_stats=bn_use_global_stats)
            self.conv2 = pre_conv3x3_block(
                in_channels=mid_channels,
                out_channels=out_channels,
                bn_use_global_stats=bn_use_global_stats)
            if self.use_dropout:
                self.dropout = nn.Dropout(rate=dropout_rate) 
Example #13
Source File: ibndensenet.py    From imgclsmob with MIT License 6 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 bn_use_global_stats,
                 dropout_rate,
                 conv1_ibn,
                 **kwargs):
        super(IBNDenseUnit, self).__init__(**kwargs)
        self.use_dropout = (dropout_rate != 0.0)
        bn_size = 4
        inc_channels = out_channels - in_channels
        mid_channels = inc_channels * bn_size

        with self.name_scope():
            self.conv1 = ibn_pre_conv1x1_block(
                in_channels=in_channels,
                out_channels=mid_channels,
                use_ibn=conv1_ibn,
                bn_use_global_stats=bn_use_global_stats)
            self.conv2 = pre_conv3x3_block(
                in_channels=mid_channels,
                out_channels=inc_channels,
                bn_use_global_stats=bn_use_global_stats)
            if self.use_dropout:
                self.dropout = nn.Dropout(rate=dropout_rate) 
Example #14
Source File: fcn.py    From gluon-cv with Apache License 2.0 6 votes vote down vote up
def __init__(self, in_channels, channels, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs):
        super(_FCNHead, self).__init__()
        with self.name_scope():
            self.block = nn.HybridSequential()
            inter_channels = in_channels // 4
            with self.block.name_scope():
                self.block.add(nn.Conv2D(in_channels=in_channels, channels=inter_channels,
                                         kernel_size=3, padding=1, use_bias=False))
                self.block.add(norm_layer(in_channels=inter_channels,
                                          **({} if norm_kwargs is None else norm_kwargs)))
                self.block.add(nn.Activation('relu'))
                self.block.add(nn.Dropout(0.1))
                self.block.add(nn.Conv2D(in_channels=inter_channels, channels=channels,
                                         kernel_size=1))

    # pylint: disable=arguments-differ 
Example #15
Source File: hybrid_layers.py    From STGCN with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, order_of_cheb, Kt, channels, num_of_vertices, keep_prob,
                 T, cheb_polys, activation='GLU', **kwargs):
        super(St_conv_block, self).__init__(**kwargs)
        c_si, c_t, c_oo = channels
        self.order_of_cheb = order_of_cheb
        self.Kt = Kt
        self.keep_prob = keep_prob
        self.seq = nn.HybridSequential()
        self.seq.add(
            Temporal_conv_layer(Kt, c_si, c_t, activation),
            Spatio_conv_layer(order_of_cheb, c_t, c_t,
                              num_of_vertices, T - (Kt - 1), cheb_polys),
            Temporal_conv_layer(Kt, c_t, c_oo),
            nn.LayerNorm(axis=1),
            nn.Dropout(1 - keep_prob)
        ) 
Example #16
Source File: deeplabv3.py    From imgclsmob with MIT License 6 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 bottleneck_factor=4,
                 **kwargs):
        super(DeepLabv3FinalBlock, self).__init__(**kwargs)
        assert (in_channels % bottleneck_factor == 0)
        mid_channels = in_channels // bottleneck_factor

        with self.name_scope():
            self.conv1 = conv3x3_block(
                in_channels=in_channels,
                out_channels=mid_channels)
            self.dropout = nn.Dropout(rate=0.1)
            self.conv2 = conv1x1(
                in_channels=mid_channels,
                out_channels=out_channels,
                use_bias=True) 
Example #17
Source File: ror_cifar.py    From imgclsmob with MIT License 6 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 bn_use_global_stats,
                 dropout_rate,
                 **kwargs):
        super(RoRBlock, self).__init__(**kwargs)
        self.use_dropout = (dropout_rate != 0.0)

        with self.name_scope():
            self.conv1 = conv3x3_block(
                in_channels=in_channels,
                out_channels=out_channels,
                bn_use_global_stats=bn_use_global_stats)
            self.conv2 = conv3x3_block(
                in_channels=out_channels,
                out_channels=out_channels,
                bn_use_global_stats=bn_use_global_stats,
                activation=None)
            if self.use_dropout:
                self.dropout = nn.Dropout(rate=dropout_rate) 
Example #18
Source File: vgg.py    From gluon-cv with Apache License 2.0 6 votes vote down vote up
def __init__(self, layers, filters, classes=1000, batch_norm=False, **kwargs):
        super(VGG, self).__init__(**kwargs)
        assert len(layers) == len(filters)
        with self.name_scope():
            self.features = self._make_features(layers, filters, batch_norm)
            self.features.add(nn.Dense(4096, activation='relu',
                                       weight_initializer='normal',
                                       bias_initializer='zeros'))
            self.features.add(nn.Dropout(rate=0.5))
            self.features.add(nn.Dense(4096, activation='relu',
                                       weight_initializer='normal',
                                       bias_initializer='zeros'))
            self.features.add(nn.Dropout(rate=0.5))
            self.output = nn.Dense(classes,
                                   weight_initializer='normal',
                                   bias_initializer='zeros') 
Example #19
Source File: deeplabv3_plus.py    From gluon-cv with Apache License 2.0 6 votes vote down vote up
def __init__(self, nclass, c1_channels=128, norm_layer=nn.BatchNorm, norm_kwargs=None,
                 height=128, width=128, **kwargs):
        super(_DeepLabHead, self).__init__()
        self._up_kwargs = {'height': height, 'width': width}
        with self.name_scope():
            self.aspp = _ASPP(2048, [12, 24, 36], norm_layer=norm_layer, norm_kwargs=norm_kwargs,
                              height=height//2, width=width//2, **kwargs)
            self.c1_block = nn.HybridSequential()
            self.c1_block.add(nn.Conv2D(in_channels=c1_channels, channels=48,
                                     kernel_size=3, padding=1, use_bias=False))
            self.c1_block.add(norm_layer(in_channels=48, **({} if norm_kwargs is None else norm_kwargs)))
            self.c1_block.add(nn.Activation('relu'))

            self.block = nn.HybridSequential()
            self.block.add(nn.Conv2D(in_channels=304, channels=256,
                                     kernel_size=3, padding=1, use_bias=False))
            self.block.add(norm_layer(in_channels=256, **({} if norm_kwargs is None else norm_kwargs)))
            self.block.add(nn.Activation('relu'))
            self.block.add(nn.Dropout(0.5))
            self.block.add(nn.Conv2D(in_channels=256, channels=256,
                                     kernel_size=3, padding=1, use_bias=False))
            self.block.add(norm_layer(in_channels=256, **({} if norm_kwargs is None else norm_kwargs)))
            self.block.add(nn.Activation('relu'))
            self.block.add(nn.Dropout(0.1))
            self.block.add(nn.Conv2D(in_channels=256, channels=nclass, kernel_size=1)) 
Example #20
Source File: densenet.py    From imgclsmob with MIT License 6 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 bn_use_global_stats,
                 dropout_rate,
                 **kwargs):
        super(DenseUnit, self).__init__(**kwargs)
        self.use_dropout = (dropout_rate != 0.0)
        bn_size = 4
        inc_channels = out_channels - in_channels
        mid_channels = inc_channels * bn_size

        with self.name_scope():
            self.conv1 = pre_conv1x1_block(
                in_channels=in_channels,
                out_channels=mid_channels,
                bn_use_global_stats=bn_use_global_stats)
            self.conv2 = pre_conv3x3_block(
                in_channels=mid_channels,
                out_channels=inc_channels,
                bn_use_global_stats=bn_use_global_stats)
            if self.use_dropout:
                self.dropout = nn.Dropout(rate=dropout_rate) 
Example #21
Source File: channelnet.py    From imgclsmob with MIT License 6 votes vote down vote up
def __init__(self,
                 groups,
                 dropout_rate,
                 **kwargs):
        super(ChannelwiseConv2d, self).__init__(**kwargs)
        self.use_dropout = (dropout_rate > 0.0)

        with self.name_scope():
            self.conv = nn.Conv3D(
                channels=groups,
                kernel_size=(4 * groups, 1, 1),
                strides=(groups, 1, 1),
                padding=(2 * groups - 1, 0, 0),
                use_bias=False,
                in_channels=1)
            if self.use_dropout:
                self.dropout = nn.Dropout(rate=dropout_rate) 
Example #22
Source File: diaresnet.py    From imgclsmob with MIT License 6 votes vote down vote up
def __init__(self,
                 in_x_features,
                 in_h_features,
                 num_layers,
                 dropout_rate=0.1,
                 **kwargs):
        super(DIALSTMCell, self).__init__(**kwargs)
        self.num_layers = num_layers
        out_features = 4 * in_h_features

        with self.name_scope():
            self.x_amps = nn.HybridSequential(prefix="")
            self.h_amps = nn.HybridSequential(prefix="")
            for i in range(num_layers):
                amp_class = FirstLSTMAmp if i == 0 else nn.Dense
                self.x_amps.add(amp_class(
                    in_units=in_x_features,
                    units=out_features))
                self.h_amps.add(amp_class(
                    in_units=in_h_features,
                    units=out_features))
                in_x_features = in_h_features
            self.dropout = nn.Dropout(rate=dropout_rate) 
Example #23
Source File: xdensenet_cifar.py    From imgclsmob with MIT License 6 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 bn_use_global_stats,
                 dropout_rate,
                 expand_ratio,
                 **kwargs):
        super(XDenseSimpleUnit, self).__init__(**kwargs)
        self.use_dropout = (dropout_rate != 0.0)
        inc_channels = out_channels - in_channels

        with self.name_scope():
            self.conv = pre_xconv3x3_block(
                in_channels=in_channels,
                out_channels=inc_channels,
                bn_use_global_stats=bn_use_global_stats,
                expand_ratio=expand_ratio)
            if self.use_dropout:
                self.dropout = nn.Dropout(rate=dropout_rate) 
Example #24
Source File: model.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def __init__(self, mode, vocab_size, num_embed, num_hidden,
                 num_layers, dropout=0.5, tie_weights=False, **kwargs):
        super(RNNModel, self).__init__(**kwargs)
        with self.name_scope():
            self.drop = nn.Dropout(dropout)
            self.encoder = nn.Embedding(vocab_size, num_embed,
                                        weight_initializer=mx.init.Uniform(0.1))
            if mode == 'rnn_relu':
                self.rnn = rnn.RNN(num_hidden, num_layers, dropout=dropout,
                                   input_size=num_embed)
            elif mode == 'rnn_tanh':
                self.rnn = rnn.RNN(num_hidden, num_layers, 'tanh', dropout=dropout,
                                   input_size=num_embed)
            elif mode == 'lstm':
                self.rnn = rnn.LSTM(num_hidden, num_layers, dropout=dropout,
                                    input_size=num_embed)
            elif mode == 'gru':
                self.rnn = rnn.GRU(num_hidden, num_layers, dropout=dropout,
                                   input_size=num_embed)
            else:
                raise ValueError("Invalid mode %s. Options are rnn_relu, "
                                 "rnn_tanh, lstm, and gru"%mode)

            if tie_weights:
                self.decoder = nn.Dense(vocab_size, in_units=num_hidden,
                                        params=self.encoder.params)
            else:
                self.decoder = nn.Dense(vocab_size, in_units=num_hidden)

            self.num_hidden = num_hidden 
Example #25
Source File: vgg.py    From imgclsmob with MIT License 5 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 **kwargs):
        super(VGGDense, self).__init__(**kwargs)
        with self.name_scope():
            self.fc = nn.Dense(
                units=out_channels,
                in_units=in_channels)
            self.activ = nn.Activation("relu")
            self.dropout = nn.Dropout(rate=0.5) 
Example #26
Source File: deeplabv3.py    From panoptic-fpn-gluon with Apache License 2.0 5 votes vote down vote up
def __init__(self, nclass, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs):
        super(_DeepLabHead, self).__init__()
        with self.name_scope():
            self.aspp = _ASPP(2048, [12, 24, 36], norm_layer=norm_layer,
                              norm_kwargs=norm_kwargs, **kwargs)
            self.block = nn.HybridSequential()
            self.block.add(nn.Conv2D(in_channels=256, channels=256,
                                     kernel_size=3, padding=1, use_bias=False))
            self.block.add(norm_layer(in_channels=256, **({} if norm_kwargs is None else norm_kwargs)))
            self.block.add(nn.Activation('relu'))
            self.block.add(nn.Dropout(0.1))
            self.block.add(nn.Conv2D(in_channels=256, channels=nclass,
                                     kernel_size=1)) 
Example #27
Source File: deeplabv3.py    From imgclsmob with MIT License 5 votes vote down vote up
def __init__(self,
                 in_channels,
                 upscale_out_size,
                 **kwargs):
        super(AtrousSpatialPyramidPooling, self).__init__(**kwargs)
        atrous_rates = [12, 24, 36]
        assert (in_channels % 8 == 0)
        mid_channels = in_channels // 8
        project_in_channels = 5 * mid_channels

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix="")
            self.branches.add(conv1x1_block(
                in_channels=in_channels,
                out_channels=mid_channels))
            for atrous_rate in atrous_rates:
                self.branches.add(conv3x3_block(
                    in_channels=in_channels,
                    out_channels=mid_channels,
                    padding=atrous_rate,
                    dilation=atrous_rate))
            self.branches.add(ASPPAvgBranch(
                in_channels=in_channels,
                out_channels=mid_channels,
                upscale_out_size=upscale_out_size))
            self.conv = conv1x1_block(
                in_channels=project_in_channels,
                out_channels=mid_channels)
            self.dropout = nn.Dropout(rate=0.5) 
Example #28
Source File: vgg.py    From d-SNE with Apache License 2.0 5 votes vote down vote up
def __init__(self, layers, filters, classes=1000, embed_size=512, use_droput=False, use_norm=False,
                 batch_norm=False, use_angular=False, **kwargs):
        super(VGG, self).__init__(**kwargs)
        assert len(layers) == len(filters)
        self.use_norm = use_norm
        self.use_angular = use_angular
        with self.name_scope():
            self.features = self._make_features(layers, filters, batch_norm)

            self.embeds = nn.HybridSequential(prefix='')
            if use_droput:
                self.embeds.add(nn.Dropout(rate=0.5))
            self.embeds.add(nn.Dense(4096, activation='relu', weight_initializer='normal', bias_initializer='zeros'))
            if use_droput:
                self.embeds.add(nn.Dropout(rate=0.5))
            self.embeds.add(nn.Dense(embed_size, activation='relu', weight_initializer='normal', bias_initializer='zeros'))
            if use_droput:
                self.embeds.add(nn.Dropout(rate=0.5))

            if self.use_norm:
                self.embeds.add(L2Normalization(mode='instance'))

            if self.use_angular:
                self.output = AngularLinear(classes, in_uints=embed_size)
            else:
                self.output = nn.Dense(classes) 
Example #29
Source File: xdensenet.py    From imgclsmob with MIT License 5 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 bn_use_global_stats,
                 dropout_rate,
                 expand_ratio,
                 **kwargs):
        super(XDenseUnit, self).__init__(**kwargs)
        self.use_dropout = (dropout_rate != 0.0)
        bn_size = 4
        inc_channels = out_channels - in_channels
        mid_channels = inc_channels * bn_size

        with self.name_scope():
            self.conv1 = pre_xconv1x1_block(
                in_channels=in_channels,
                out_channels=mid_channels,
                bn_use_global_stats=bn_use_global_stats,
                expand_ratio=expand_ratio)
            self.conv2 = pre_xconv3x3_block(
                in_channels=mid_channels,
                out_channels=inc_channels,
                bn_use_global_stats=bn_use_global_stats,
                expand_ratio=expand_ratio)
            if self.use_dropout:
                self.dropout = nn.Dropout(rate=dropout_rate) 
Example #30
Source File: base_layers.py    From STGCN with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, order_of_cheb, Kt, channels, keep_prob,
                 cheb_polys, activation='GLU', **kwargs):
        super(St_conv_block, self).__init__(**kwargs)
        c_si, c_t, c_oo = channels
        self.order_of_cheb = order_of_cheb
        self.Kt = Kt
        self.keep_prob = keep_prob
        self.seq = nn.Sequential()
        self.seq.add(
            Temporal_conv_layer(Kt, c_si, c_t, activation),
            Spatio_conv_layer(order_of_cheb, c_t, c_t, cheb_polys),
            Temporal_conv_layer(Kt, c_t, c_oo),
            nn.LayerNorm(axis=1),
            nn.Dropout(1 - keep_prob)
        )