Python chainer.links.ConvolutionND() Examples

The following are 17 code examples of chainer.links.ConvolutionND(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module chainer.links , or try the search function .
Example #1
Source File: losses.py    From EPG with MIT License 6 votes vote down vote up
def __init__(self, traj_dim_in):
        chan_traj_c0_c1 = 16
        chan_traj_c1_d0 = 32
        units_traj_d0_d1 = 32
        units_traj_d1_d2 = 16

        # This means, 1 input dimension (so we convolve along the temporal axis) and treat
        # each feature dimension as a channel. The temporal axis is always the same length
        # since this is fixed with a buffer that keeps track of the latest data.
        traj_c0 = L.ConvolutionND(
            ndim=1, in_channels=traj_dim_in, out_channels=chan_traj_c0_c1, ksize=6, stride=5)
        traj_c1 = L.ConvolutionND(
            ndim=1, in_channels=chan_traj_c0_c1, out_channels=chan_traj_c1_d0, ksize=4, stride=2)
        traj_d0 = L.Linear(in_size=chan_traj_c1_d0, out_size=units_traj_d0_d1)
        loss_d0 = L.Linear(in_size=traj_dim_in + units_traj_d0_d1, out_size=units_traj_d1_d2)
        loss_d1 = L.Linear(in_size=units_traj_d1_d2, out_size=1)

        Loss.__init__(self,
                      # trajectory processing
                      traj_c0=traj_c0, traj_c1=traj_c1, traj_d0=traj_d0,
                      # loss processing
                      loss_d0=loss_d0, loss_d1=loss_d1) 
Example #2
Source File: light_voxelnet.py    From voxelnet_chainer with MIT License 5 votes vote down vote up
def __init__(self, out_ch=128):
        super(FeatureVoxelNet_v6, self).__init__(
            conv1 = L.ConvolutionND(1, 7, 32, 1, nobias=True),
	        conv2 = L.ConvolutionND(1, 64, out_ch, 1),
            # conv3 = L.ConvolutionND(1, 128, out_ch, 1, nobias=True),
	        bn1 = L.BatchNormalization(32))
	        # bn2 = L.BatchNormalization(out_ch))
	        # bn3 = L.BatchNormalization(out_ch)) 
Example #3
Source File: video_discriminator.py    From tgan with MIT License 5 votes vote down vote up
def __init__(self, in_channels, top_width, mid_ch, sigma):
        super(VideoDiscriminatorNoBetaInitDefaultWithNoise, self).__init__()
        w = None
        with self.init_scope():
            self.c0 = L.ConvolutionND(3, in_channels, mid_ch, 4, 2, 1, initialW=w)
            self.c1 = L.ConvolutionND(3, mid_ch, mid_ch * 2, 4, 2, 1, initialW=w)
            self.c2 = L.ConvolutionND(3, mid_ch * 2, mid_ch * 4, 4, 2, 1, initialW=w)
            self.c3 = L.ConvolutionND(3, mid_ch * 4, mid_ch * 8, 4, 2, 1, initialW=w)
            self.c4 = L.Convolution2D(mid_ch * 8, 1, top_width, 1, 0, initialW=w)
            self.bn0 = L.BatchNormalization(mid_ch, use_beta=False)
            self.bn1 = L.BatchNormalization(mid_ch * 2, use_beta=False)
            self.bn2 = L.BatchNormalization(mid_ch * 4, use_beta=False)
            self.bn3 = L.BatchNormalization(mid_ch * 8, use_beta=False)
        self.sigma = sigma 
Example #4
Source File: video_discriminator.py    From tgan with MIT License 5 votes vote down vote up
def __init__(self, in_channels, top_width, mid_ch):
        super(VideoDiscriminatorNoBetaInitDefault, self).__init__()
        w = None
        with self.init_scope():
            self.c0 = L.ConvolutionND(3, in_channels, mid_ch, 4, 2, 1, initialW=w)
            self.c1 = L.ConvolutionND(3, mid_ch, mid_ch * 2, 4, 2, 1, initialW=w)
            self.c2 = L.ConvolutionND(3, mid_ch * 2, mid_ch * 4, 4, 2, 1, initialW=w)
            self.c3 = L.ConvolutionND(3, mid_ch * 4, mid_ch * 8, 4, 2, 1, initialW=w)
            self.c4 = L.Convolution2D(mid_ch * 8, 1, top_width, 1, 0, initialW=w)
            self.bn0 = L.BatchNormalization(mid_ch, use_beta=False)
            self.bn1 = L.BatchNormalization(mid_ch * 2, use_beta=False)
            self.bn2 = L.BatchNormalization(mid_ch * 4, use_beta=False)
            self.bn3 = L.BatchNormalization(mid_ch * 8, use_beta=False) 
Example #5
Source File: video_discriminator.py    From tgan with MIT License 5 votes vote down vote up
def __init__(self, in_channels, top_width, mid_ch, wscale=0.01):
        super(VideoDiscriminatorNoBetaInitUniform, self).__init__()
        w = chainer.initializers.Uniform(wscale)
        with self.init_scope():
            self.c0 = L.ConvolutionND(3, in_channels, mid_ch, 4, 2, 1, initialW=w)
            self.c1 = L.ConvolutionND(3, mid_ch, mid_ch * 2, 4, 2, 1, initialW=w)
            self.c2 = L.ConvolutionND(3, mid_ch * 2, mid_ch * 4, 4, 2, 1, initialW=w)
            self.c3 = L.ConvolutionND(3, mid_ch * 4, mid_ch * 8, 4, 2, 1, initialW=w)
            self.c4 = L.Convolution2D(mid_ch * 8, 1, top_width, 1, 0, initialW=w)
            self.bn0 = L.BatchNormalization(mid_ch, use_beta=False)
            self.bn1 = L.BatchNormalization(mid_ch * 2, use_beta=False)
            self.bn2 = L.BatchNormalization(mid_ch * 4, use_beta=False)
            self.bn3 = L.BatchNormalization(mid_ch * 8, use_beta=False) 
Example #6
Source File: video_discriminator.py    From tgan with MIT License 5 votes vote down vote up
def __init__(self, in_channels, top_width, mid_ch):
        super(VideoDiscriminatorInitDefault, self).__init__()
        w = None
        with self.init_scope():
            self.c0 = L.ConvolutionND(3, in_channels, mid_ch, 4, 2, 1, initialW=w)
            self.c1 = L.ConvolutionND(3, mid_ch, mid_ch * 2, 4, 2, 1, initialW=w)
            self.c2 = L.ConvolutionND(3, mid_ch * 2, mid_ch * 4, 4, 2, 1, initialW=w)
            self.c3 = L.ConvolutionND(3, mid_ch * 4, mid_ch * 8, 4, 2, 1, initialW=w)
            self.c4 = L.Convolution2D(mid_ch * 8, 1, top_width, 1, 0, initialW=w)
            self.bn0 = L.BatchNormalization(mid_ch)
            self.bn1 = L.BatchNormalization(mid_ch * 2)
            self.bn2 = L.BatchNormalization(mid_ch * 4)
            self.bn3 = L.BatchNormalization(mid_ch * 8) 
Example #7
Source File: video_discriminator.py    From tgan with MIT License 5 votes vote down vote up
def __init__(self, in_channels, top_width, mid_ch, wscale=0.01):
        super(VideoDiscriminatorInitUniform, self).__init__()
        w = chainer.initializers.Uniform(wscale)
        with self.init_scope():
            self.c0 = L.ConvolutionND(3, in_channels, mid_ch, 4, 2, 1, initialW=w)
            self.c1 = L.ConvolutionND(3, mid_ch, mid_ch * 2, 4, 2, 1, initialW=w)
            self.c2 = L.ConvolutionND(3, mid_ch * 2, mid_ch * 4, 4, 2, 1, initialW=w)
            self.c3 = L.ConvolutionND(3, mid_ch * 4, mid_ch * 8, 4, 2, 1, initialW=w)
            self.c4 = L.Convolution2D(mid_ch * 8, 1, top_width, 1, 0, initialW=w)
            self.bn0 = L.BatchNormalization(mid_ch)
            self.bn1 = L.BatchNormalization(mid_ch * 2)
            self.bn2 = L.BatchNormalization(mid_ch * 4)
            self.bn3 = L.BatchNormalization(mid_ch * 8) 
Example #8
Source File: model.py    From brain_segmentation with MIT License 5 votes vote down vote up
def __init__(self, in_channels=1, n_classes=4):
        init = chainer.initializers.HeNormal(scale=0.01)
        super().__init__()

        with self.init_scope():
            self.conv1a = L.ConvolutionND(
                3, in_channels, 32, 3, pad=1, initialW=init)
            self.bnorm1a = L.BatchNormalization(32)
            self.conv1b = L.ConvolutionND(
                3, 32, 32, 3, pad=1, initialW=init)
            self.bnorm1b = L.BatchNormalization(32)
            self.conv1c = L.ConvolutionND(
                3, 32, 64, 3, stride=2, pad=1, initialW=init)
            self.voxres2 = VoxResModule()
            self.voxres3 = VoxResModule()
            self.bnorm3 = L.BatchNormalization(64)
            self.conv4 = L.ConvolutionND(
                3, 64, 64, 3, stride=2, pad=1, initialW=init)
            self.voxres5 = VoxResModule()
            self.voxres6 = VoxResModule()
            self.bnorm6 = L.BatchNormalization(64)
            self.conv7 = L.ConvolutionND(
                3, 64, 64, 3, stride=2, pad=1, initialW=init)
            self.voxres8 = VoxResModule()
            self.voxres9 = VoxResModule()
            self.c1deconv = L.DeconvolutionND(
                3, 32, 32, 3, pad=1, initialW=init)
            self.c1conv = L.ConvolutionND(
                3, 32, n_classes, 3, pad=1, initialW=init)
            self.c2deconv = L.DeconvolutionND(
                3, 64, 64, 4, stride=2, pad=1, initialW=init)
            self.c2conv = L.ConvolutionND(
                3, 64, n_classes, 3, pad=1, initialW=init)
            self.c3deconv = L.DeconvolutionND(
                3, 64, 64, 6, stride=4, pad=1, initialW=init)
            self.c3conv = L.ConvolutionND(
                3, 64, n_classes, 3, pad=1, initialW=init)
            self.c4deconv = L.DeconvolutionND(
                3, 64, 64, 10, stride=8, pad=1, initialW=init)
            self.c4conv = L.ConvolutionND(
                3, 64, n_classes, 3, pad=1, initialW=init) 
Example #9
Source File: model.py    From brain_segmentation with MIT License 5 votes vote down vote up
def __init__(self):
        initW = chainer.initializers.HeNormal(scale=0.01)
        super().__init__()

        with self.init_scope():
            self.bnorm1 = L.BatchNormalization(size=64)
            self.conv1 = L.ConvolutionND(3, 64, 64, 3, pad=1, initialW=initW)
            self.bnorm2 = L.BatchNormalization(size=64)
            self.conv2 = L.ConvolutionND(3, 64, 64, 3, pad=1, initialW=initW) 
Example #10
Source File: light_voxelnet.py    From voxelnet_chainer with MIT License 5 votes vote down vote up
def __init__(self, out_ch=128):
        super(OrigFeatureVoxelNet, self).__init__(
            conv1 = L.ConvolutionND(1, 7, 16, 1, nobias=True),
            conv2 = L.ConvolutionND(1, 32, 64, 1, nobias=True),
            conv3 = L.ConvolutionND(1, 128, out_ch, 1),
            bn1 = BN(16), #L.BatchNormalization(16),
            bn2 = BN(64)) #L.BatchNormalization(64),
            # bn3 = BN(out_ch)) #L.BatchNormalization(out_ch)) 
Example #11
Source File: ConvolutionND.py    From chainer-compiler with MIT License 5 votes vote down vote up
def __init__(self, ndim, nobias):
        super(ConvND, self).__init__()
        with self.init_scope():
            self.l1 = L.ConvolutionND(ndim, 7, 10, 3,
                                      stride=1, pad=1, nobias=nobias) 
Example #12
Source File: light_voxelnet.py    From voxelnet_chainer with MIT License 5 votes vote down vote up
def __init__(self, out_ch=128):
        super(FeatureVoxelNet_v2, self).__init__(
            conv1 = L.ConvolutionND(1, 7, out_ch, 1, nobias=True))
            # conv2 = L.ConvolutionND(1, 32, 64, 1, nobias=True),
            # conv3 = L.ConvolutionND(1, 128, out_ch, 1, nobias=True)) 
Example #13
Source File: light_voxelnet.py    From voxelnet_chainer with MIT License 5 votes vote down vote up
def __init__(self, in_ch=128, out_ch=64):
        super(MiddleLayers, self).__init__(
            conv1 = L.ConvolutionND(3, in_ch, 32, (3, 1, 1), (2, 1, 1), (0, 0, 0), nobias=True),
            conv2 = L.ConvolutionND(3, 32, 64, (1, 3, 3), (1, 1, 1), (0, 1, 1), nobias=True),
            conv3 = L.ConvolutionND(3, 64, 32, (3, 1, 1), 1, (0, 0, 0), nobias=True),
            conv4 = L.ConvolutionND(3, 32, 64, (1, 3, 3), 1, (0, 1, 1), nobias=True),
            conv5 = L.ConvolutionND(3, 64, out_ch, (2, 3, 3), (1, 1, 1), (0, 1, 1), nobias=True),
            bn1 = L.BatchNormalization(32),
            bn2 = L.BatchNormalization(64),
            bn3 = L.BatchNormalization(32),
            bn4 = L.BatchNormalization(64),
            bn5 = L.BatchNormalization(out_ch)) 
Example #14
Source File: light_voxelnet.py    From voxelnet_chainer with MIT License 5 votes vote down vote up
def __init__(self, out_ch=128):
        super(FeatureVoxelNet, self).__init__(
            conv1 = L.ConvolutionND(1, 7, 16, 1, nobias=True),
            conv2 = L.ConvolutionND(1, 32, 64, 1, nobias=True),
            conv3 = L.ConvolutionND(1, 128, out_ch, 1),
            bn1 = BN(16), #L.BatchNormalization(16),
            bn2 = BN(64)) #L.BatchNormalization(64),
            #bn3 = BN(out_ch)) #L.BatchNormalization(out_ch)) 
Example #15
Source File: subword.py    From vecto with Mozilla Public License 2.0 5 votes vote down vote up
def __init__(self, vocab, vocab_ngram_tokens, n_units, n_units_char,
                 dropout, subword):  # dropout ratio, zero indicates no dropout
        super(CNN1D, self).__init__()
        with self.init_scope():
            self.subword = subword
            # n_units_char = 15
            self.embed = L.EmbedID(
                len(vocab_ngram_tokens.lst_words) + 2, n_units_char,
                initialW=I.Uniform(1. / n_units_char))  # ngram tokens embedding  plus 2 for OOV and end symbol.

            self.n_ngram = vocab_ngram_tokens.metadata["max_gram"] - vocab_ngram_tokens.metadata["min_gram"] + 1

            # n_filters = {i: min(200, i * 5) for i in range(1, 1 + 1)}
            # self.cnns = (L.Convolution2D(1, v, (k, n_units_char),) for k, v in n_filters.items())
            # self.out = L.Linear(sum([v for k, v in n_filters.items()]), n_units)
            if 'small' in self.subword:
                self.cnn1 = L.ConvolutionND(1, n_units_char, 50, (1,), )
                self.out = L.Linear(50, n_units)
            else:
                self.cnn1 = L.ConvolutionND(1, n_units_char, 50, (1,), )
                self.cnn2 = L.ConvolutionND(1, n_units_char, 100, (2,), )
                self.cnn3 = L.ConvolutionND(1, n_units_char, 150, (3,), )
                self.cnn4 = L.ConvolutionND(1, n_units_char, 200, (4,), )
                self.cnn5 = L.ConvolutionND(1, n_units_char, 200, (5,), )
                self.cnn6 = L.ConvolutionND(1, n_units_char, 200, (6,), )
                self.cnn7 = L.ConvolutionND(1, n_units_char, 200, (7,), )
                self.out = L.Linear(1100, n_units)

            self.dropout = dropout
            self.vocab = vocab
            self.vocab_ngram_tokens = vocab_ngram_tokens 
Example #16
Source File: module.py    From fpl with MIT License 5 votes vote down vote up
def __init__(self, nb_in, nb_out, ksize=1, pad=0, no_bn=False):
        super(Conv_BN, self).__init__()
        self.no_bn = no_bn
        with self.init_scope():
            self.conv = L.ConvolutionND(1, nb_in, nb_out, ksize=ksize, pad=pad)
            if not no_bn:
                self.bn = L.BatchNormalization(nb_out) 
Example #17
Source File: module.py    From fpl with MIT License 5 votes vote down vote up
def __init__(self, nb_in, nb_out, ksize=1, pad=0, no_bn=False):
        super(Conv_BN, self).__init__()
        self.no_bn = no_bn
        with self.init_scope():
            self.conv = L.ConvolutionND(1, nb_in, nb_out, ksize=ksize, pad=pad)
            if not no_bn:
                self.bn = L.BatchNormalization(nb_out)