Python chainer.links.connection.linear.Linear() Examples
The following are 19
code examples of chainer.links.connection.linear.Linear().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.links.connection.linear
, or try the search function
.
Example #1
Source File: ln_lstm.py From knmt with GNU General Public License v3.0 | 6 votes |
def __init__(self, in_size, out_size, lateral_init=None, upward_init=None, bias_init=0, forget_bias_init=0): super(LNStatelessLSTM, self).__init__( upward=linear.Linear(in_size, 4 * out_size, initialW=0), lateral=linear.Linear(out_size, 4 * out_size, initialW=0, nobias=True), upward_ln = LayerNormalization(), lateral_ln = LayerNormalization(), output_ln = LayerNormalization() ) self.state_size = out_size self.lateral_init = lateral_init self.upward_init = upward_init self.bias_init = bias_init self.forget_bias_init = forget_bias_init if in_size is not None: self._initialize_params()
Example #2
Source File: StatelessLSTM.py From chainer-compiler with MIT License | 6 votes |
def __init__(self, in_size, out_size=None, lateral_init=None, upward_init=None, bias_init=None, forget_bias_init=None): if out_size is None: out_size, in_size = in_size, None super(LSTMBase, self).__init__() if bias_init is None: bias_init = 0 if forget_bias_init is None: forget_bias_init = 1 self.state_size = out_size self.lateral_init = lateral_init self.upward_init = upward_init self.bias_init = bias_init self.forget_bias_init = forget_bias_init with self.init_scope(): self.upward = linear.Linear(in_size, 4 * out_size, initialW=0) self.lateral = linear.Linear(out_size, 4 * out_size, initialW=0, nobias=True) if in_size is not None: self._initialize_params()
Example #3
Source File: StatelessLSTM.py From chainer-compiler with MIT License | 6 votes |
def __init__(self, in_size, out_size=None, lateral_init=None, upward_init=None, bias_init=None, forget_bias_init=None): if out_size is None: out_size, in_size = in_size, None super(LSTMBase, self).__init__() if bias_init is None: bias_init = 0 if forget_bias_init is None: forget_bias_init = 1 self.state_size = out_size self.lateral_init = lateral_init self.upward_init = upward_init self.bias_init = bias_init self.forget_bias_init = forget_bias_init with self.init_scope(): self.upward = linear.Linear(in_size, 4 * out_size, initialW=0) self.lateral = linear.Linear(out_size, 4 * out_size, initialW=0, nobias=True) if in_size is not None: self._initialize_params()
Example #4
Source File: gru.py From chainer with MIT License | 6 votes |
def __init__(self, in_size, out_size, init=None, inner_init=None, bias_init=None): super(GRUBase, self).__init__() with self.init_scope(): self.W_r = linear.Linear( in_size, out_size, initialW=init, initial_bias=bias_init) self.U_r = linear.Linear( out_size, out_size, initialW=inner_init, initial_bias=bias_init) self.W_z = linear.Linear( in_size, out_size, initialW=init, initial_bias=bias_init) self.U_z = linear.Linear( out_size, out_size, initialW=inner_init, initial_bias=bias_init) self.W = linear.Linear( in_size, out_size, initialW=init, initial_bias=bias_init) self.U = linear.Linear( out_size, out_size, initialW=inner_init, initial_bias=bias_init)
Example #5
Source File: lstm.py From chainer with MIT License | 6 votes |
def __init__(self, in_size, out_size=None, lateral_init=None, upward_init=None, bias_init=None, forget_bias_init=None): if out_size is None: out_size, in_size = in_size, None super(LSTMBase, self).__init__() if bias_init is None: bias_init = 0 if forget_bias_init is None: forget_bias_init = 1 self.state_size = out_size self.lateral_init = lateral_init self.upward_init = upward_init self.bias_init = bias_init self.forget_bias_init = forget_bias_init with self.init_scope(): self.upward = linear.Linear(in_size, 4 * out_size, initialW=0) self.lateral = linear.Linear(out_size, 4 * out_size, initialW=0, nobias=True) if in_size is not None: self._initialize_params()
Example #6
Source File: zoneoutlstm.py From chainer with MIT License | 6 votes |
def __init__(self, in_size, out_size, c_ratio=0.5, h_ratio=0.5, **kwargs): if kwargs: argument.check_unexpected_kwargs( kwargs, train='train argument is not supported anymore. ' 'Use chainer.using_config') argument.assert_kwargs_empty(kwargs) super(StatefulZoneoutLSTM, self).__init__() self.state_size = out_size self.c_ratio = c_ratio self.h_ratio = h_ratio self.reset_state() with self.init_scope(): self.upward = linear.Linear(in_size, 4 * out_size) self.lateral = linear.Linear(out_size, 4 * out_size, nobias=True)
Example #7
Source File: faster_gru.py From knmt with GNU General Public License v3.0 | 5 votes |
def __init__(self, n_units, n_inputs=None, init=None, bias_init=None): if n_inputs is None: n_inputs = n_units super(GRUBase, self).__init__( W_r_z_h=linear.Linear(n_inputs, n_units * 3, initialW=init, initial_bias=bias_init), U_r_z=linear.Linear(n_units, n_units * 2, initialW=init, initial_bias=bias_init), # W_r=linear.Linear(n_inputs, n_units), # U_r = linear.Linear(n_units, n_units), # W_z=linear.Linear(n_inputs, n_units), # U_z=linear.Linear(n_units, n_units), # W=linear.Linear(n_inputs, n_units), U=linear.Linear(n_units, n_units), ) self.n_units = n_units
Example #8
Source File: highway.py From chainer with MIT License | 5 votes |
def __init__(self, in_out_size, nobias=False, activate=relu.relu, init_Wh=None, init_Wt=None, init_bh=None, init_bt=-1): super(Highway, self).__init__() self.activate = activate with self.init_scope(): self.plain = linear.Linear( in_out_size, in_out_size, nobias=nobias, initialW=init_Wh, initial_bias=init_bh) self.transform = linear.Linear( in_out_size, in_out_size, nobias=nobias, initialW=init_Wt, initial_bias=init_bt)
Example #9
Source File: resnet.py From chainer with MIT License | 5 votes |
def __init__(self, pretrained_model, n_layers, downsample_fb=False): super(ResNetLayers, self).__init__() if pretrained_model: # As a sampling process is time-consuming, # we employ a zero initializer for faster computation. conv_kwargs = {'initialW': constant.Zero()} else: # employ default initializers used in the original paper conv_kwargs = {'initialW': normal.HeNormal(scale=1.0)} kwargs = conv_kwargs.copy() kwargs['downsample_fb'] = downsample_fb if n_layers == 50: block = [3, 4, 6, 3] elif n_layers == 101: block = [3, 4, 23, 3] elif n_layers == 152: block = [3, 8, 36, 3] else: raise ValueError('The n_layers argument should be either 50, 101,' ' or 152, but {} was given.'.format(n_layers)) with self.init_scope(): self.conv1 = Convolution2D(3, 64, 7, 2, 3, **conv_kwargs) self.bn1 = BatchNormalization(64) self.res2 = BuildingBlock(block[0], 64, 64, 256, 1, **kwargs) self.res3 = BuildingBlock(block[1], 256, 128, 512, 2, **kwargs) self.res4 = BuildingBlock(block[2], 512, 256, 1024, 2, **kwargs) self.res5 = BuildingBlock(block[3], 1024, 512, 2048, 2, **kwargs) self.fc6 = Linear(2048, 1000) if pretrained_model and pretrained_model.endswith('.caffemodel'): _retrieve(n_layers, 'ResNet-{}-model.npz'.format(n_layers), pretrained_model, self) elif pretrained_model: npz.load_npz(pretrained_model, self)
Example #10
Source File: peephole.py From chainer with MIT License | 5 votes |
def __init__(self, in_size, out_size): super(StatefulPeepholeLSTM, self).__init__() self.state_size = out_size self.reset_state() with self.init_scope(): self.upward = linear.Linear(in_size, 4 * out_size) self.lateral = linear.Linear(out_size, 4 * out_size, nobias=True) self.peep_i = linear.Linear(out_size, out_size, nobias=True) self.peep_f = linear.Linear(out_size, out_size, nobias=True) self.peep_o = linear.Linear(out_size, out_size, nobias=True)
Example #11
Source File: tree_lstm.py From chainer with MIT License | 5 votes |
def __init__(self, in_size, out_size): super(ChildSumTreeLSTM, self).__init__() with self.init_scope(): self.W_x = linear.Linear(in_size, 4 * out_size) self.W_h_aio = linear.Linear(out_size, 3 * out_size, nobias=True) self.W_h_f = linear.Linear(out_size, out_size, nobias=True) self.in_size = in_size self.state_size = out_size
Example #12
Source File: mgu.py From chainer with MIT License | 5 votes |
def __init__(self, n_inputs, n_units): super(MGUBase, self).__init__() with self.init_scope(): self.W_f = linear.Linear(n_inputs + n_units, n_units) self.W_h = linear.Linear(n_inputs + n_units, n_units)
Example #13
Source File: chainer_chain.py From chainer-compiler with MIT License | 5 votes |
def __init__(self, pretrained_model='auto', n_layers=16): super(VGGLayers, self).__init__() kwargs = {} if n_layers not in [16, 19]: raise ValueError( 'The n_layers argument should be either 16 or 19,' 'but {} was given.'.format(n_layers) ) with self.init_scope(): self.conv1_1 = Convolution2D(3, 64, 3, 1, 1, **kwargs) self.conv1_2 = Convolution2D(64, 64, 3, 1, 1, **kwargs) self.conv2_1 = Convolution2D(64, 128, 3, 1, 1, **kwargs) self.conv2_2 = Convolution2D(128, 128, 3, 1, 1, **kwargs) self.conv3_1 = Convolution2D(128, 256, 3, 1, 1, **kwargs) self.conv3_2 = Convolution2D(256, 256, 3, 1, 1, **kwargs) self.conv3_3 = Convolution2D(256, 256, 3, 1, 1, **kwargs) self.conv4_1 = Convolution2D(256, 512, 3, 1, 1, **kwargs) self.conv4_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv4_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_1 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.fc6 = Linear(512 * 7 * 7, 4096, **kwargs) self.fc7 = Linear(4096, 4096, **kwargs) self.fc8 = Linear(4096, 1000, **kwargs) if n_layers == 19: self.conv3_4 = Convolution2D(256, 256, 3, 1, 1, **kwargs) self.conv4_4 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_4 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
Example #14
Source File: resnet_layer.py From nips17-adversarial-attack with MIT License | 5 votes |
def __init__(self, pretrained_model, n_layers): super(ResNetLayers, self).__init__() if pretrained_model: # As a sampling process is time-consuming, # we employ a zero initializer for faster computation. kwargs = {'initialW': constant.Zero()} else: # employ default initializers used in the original paper kwargs = {'initialW': normal.HeNormal(scale=1.0)} if n_layers == 50: block = [3, 4, 6, 3] elif n_layers == 101: block = [3, 4, 23, 3] elif n_layers == 152: block = [3, 8, 36, 3] else: raise ValueError('The n_layers argument should be either 50, 101,' ' or 152, but {} was given.'.format(n_layers)) with self.init_scope(): self.conv1 = Convolution2D(3, 64, 7, 2, 3, **kwargs) self.bn1 = BatchNormalization(64) self.res2 = BuildingBlock(block[0], 64, 64, 256, 1, **kwargs) self.res3 = BuildingBlock(block[1], 256, 128, 512, 2, **kwargs) self.res4 = BuildingBlock(block[2], 512, 256, 1024, 2, **kwargs) self.res5 = BuildingBlock(block[3], 1024, 512, 2048, 2, **kwargs) self.fc6 = Linear(2048, 1000) if pretrained_model and pretrained_model.endswith('.caffemodel'): _retrieve(n_layers, 'ResNet-{}-model.npz'.format(n_layers), pretrained_model, self) elif pretrained_model: npz.load_npz(pretrained_model, self)
Example #15
Source File: faster_gru.py From knmt with GNU General Public License v3.0 | 5 votes |
def __init__(self, n_units, n_inputs=None): if n_inputs is None: n_inputs = n_units super(GRUBase2, self).__init__( W_r_z=linear.Linear(n_inputs + n_units, n_units * 2), W_h=linear.Linear(n_inputs + n_units, n_units), ) self.n_units = n_units
Example #16
Source File: maxout.py From chainer with MIT License | 4 votes |
def __init__(self, in_size, out_size, pool_size, initialW=None, initial_bias=0): super(Maxout, self).__init__() linear_out_size = out_size * pool_size if initialW is None or \ numpy.isscalar(initialW) or \ isinstance(initialW, initializer.Initializer): pass elif isinstance(initialW, chainer.get_array_types()): if initialW.ndim != 3: raise ValueError('initialW.ndim should be 3') initialW = initialW.reshape(linear_out_size, in_size) elif callable(initialW): initialW_orig = initialW def initialW(array): array.shape = (out_size, pool_size, in_size) initialW_orig(array) array.shape = (linear_out_size, in_size) if initial_bias is None or \ numpy.isscalar(initial_bias) or \ isinstance(initial_bias, initializer.Initializer): pass elif isinstance(initial_bias, chainer.get_array_types()): if initial_bias.ndim != 2: raise ValueError('initial_bias.ndim should be 2') initial_bias = initial_bias.reshape(linear_out_size) elif callable(initial_bias): initial_bias_orig = initial_bias def initial_bias(array): array.shape = (out_size, pool_size) initial_bias_orig(array) array.shape = linear_out_size, with self.init_scope(): self.linear = linear.Linear( in_size, linear_out_size, nobias=initial_bias is None, initialW=initialW, initial_bias=initial_bias) self.out_size = out_size self.pool_size = pool_size
Example #17
Source File: vgg.py From chainer with MIT License | 4 votes |
def __init__(self, pretrained_model='auto', n_layers=16): super(VGGLayers, self).__init__() if pretrained_model: # As a sampling process is time-consuming, # we employ a zero initializer for faster computation. init = constant.Zero() kwargs = {'initialW': init, 'initial_bias': init} else: # employ default initializers used in the original paper kwargs = { 'initialW': normal.Normal(0.01), 'initial_bias': constant.Zero(), } if n_layers not in [16, 19]: raise ValueError( 'The n_layers argument should be either 16 or 19, ' 'but {} was given.'.format(n_layers) ) with self.init_scope(): self.conv1_1 = Convolution2D(3, 64, 3, 1, 1, **kwargs) self.conv1_2 = Convolution2D(64, 64, 3, 1, 1, **kwargs) self.conv2_1 = Convolution2D(64, 128, 3, 1, 1, **kwargs) self.conv2_2 = Convolution2D(128, 128, 3, 1, 1, **kwargs) self.conv3_1 = Convolution2D(128, 256, 3, 1, 1, **kwargs) self.conv3_2 = Convolution2D(256, 256, 3, 1, 1, **kwargs) self.conv3_3 = Convolution2D(256, 256, 3, 1, 1, **kwargs) self.conv4_1 = Convolution2D(256, 512, 3, 1, 1, **kwargs) self.conv4_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv4_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_1 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.fc6 = Linear(512 * 7 * 7, 4096, **kwargs) self.fc7 = Linear(4096, 4096, **kwargs) self.fc8 = Linear(4096, 1000, **kwargs) if n_layers == 19: self.conv3_4 = Convolution2D(256, 256, 3, 1, 1, **kwargs) self.conv4_4 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_4 = Convolution2D(512, 512, 3, 1, 1, **kwargs) if pretrained_model == 'auto': if n_layers == 16: _retrieve( 'VGG_ILSVRC_16_layers.npz', 'https://www.robots.ox.ac.uk/%7Evgg/software/very_deep/' 'caffe/VGG_ILSVRC_16_layers.caffemodel', self) else: _retrieve( 'VGG_ILSVRC_19_layers.npz', 'http://www.robots.ox.ac.uk/%7Evgg/software/very_deep/' 'caffe/VGG_ILSVRC_19_layers.caffemodel', self) elif pretrained_model: npz.load_npz(pretrained_model, self)
Example #18
Source File: googlenet.py From chainer with MIT License | 4 votes |
def __init__(self, pretrained_model='auto'): super(GoogLeNet, self).__init__() if pretrained_model: # As a sampling process is time-consuming, # we employ a zero initializer for faster computation. kwargs = {'initialW': constant.Zero()} else: # employ default initializers used in BVLC. For more detail, see # https://github.com/chainer/chainer/pull/2424#discussion_r109642209 kwargs = {'initialW': uniform.LeCunUniform(scale=1.0)} with self.init_scope(): self.conv1 = Convolution2D(3, 64, 7, stride=2, pad=3, **kwargs) self.conv2_reduce = Convolution2D(64, 64, 1, **kwargs) self.conv2 = Convolution2D(64, 192, 3, stride=1, pad=1, **kwargs) self.inc3a = Inception(192, 64, 96, 128, 16, 32, 32) self.inc3b = Inception(256, 128, 128, 192, 32, 96, 64) self.inc4a = Inception(480, 192, 96, 208, 16, 48, 64) self.inc4b = Inception(512, 160, 112, 224, 24, 64, 64) self.inc4c = Inception(512, 128, 128, 256, 24, 64, 64) self.inc4d = Inception(512, 112, 144, 288, 32, 64, 64) self.inc4e = Inception(528, 256, 160, 320, 32, 128, 128) self.inc5a = Inception(832, 256, 160, 320, 32, 128, 128) self.inc5b = Inception(832, 384, 192, 384, 48, 128, 128) self.loss3_fc = Linear(1024, 1000, **kwargs) self.loss1_conv = Convolution2D(512, 128, 1, **kwargs) self.loss1_fc1 = Linear(2048, 1024, **kwargs) self.loss1_fc2 = Linear(1024, 1000, **kwargs) self.loss2_conv = Convolution2D(528, 128, 1, **kwargs) self.loss2_fc1 = Linear(2048, 1024, **kwargs) self.loss2_fc2 = Linear(1024, 1000, **kwargs) if pretrained_model == 'auto': _retrieve( 'bvlc_googlenet.npz', 'http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel', self) elif pretrained_model: npz.load_npz(pretrained_model, self)
Example #19
Source File: c3d_ft.py From tgan with MIT License | 4 votes |
def __init__(self, pretrained_model='auto'): if pretrained_model: # As a sampling process is time-consuming, # we employ a zero initializer for faster computation. init = constant.Zero() conv_kwargs = {'initialW': init, 'initial_bias': init} fc_kwargs = conv_kwargs else: # employ default initializers used in the original paper conv_kwargs = { 'initialW': normal.Normal(0.01), 'initial_bias': constant.Zero(), } fc_kwargs = { 'initialW': normal.Normal(0.005), 'initial_bias': constant.One(), } super(C3DVersion1, self).__init__( conv1a=ConvolutionND(3, 3, 64, 3, 1, 1, **conv_kwargs), conv2a=ConvolutionND(3, 64, 128, 3, 1, 1, **conv_kwargs), conv3a=ConvolutionND(3, 128, 256, 3, 1, 1, **conv_kwargs), conv3b=ConvolutionND(3, 256, 256, 3, 1, 1, **conv_kwargs), conv4a=ConvolutionND(3, 256, 512, 3, 1, 1, **conv_kwargs), conv4b=ConvolutionND(3, 512, 512, 3, 1, 1, **conv_kwargs), conv5a=ConvolutionND(3, 512, 512, 3, 1, 1, **conv_kwargs), conv5b=ConvolutionND(3, 512, 512, 3, 1, 1, **conv_kwargs), fc6=Linear(512 * 4 * 4, 4096, **fc_kwargs), fc7=Linear(4096, 4096, **fc_kwargs), fc8=Linear(4096, 101, **fc_kwargs), ) if pretrained_model == 'auto': _retrieve( 'conv3d_deepnetA_ucf.npz', 'http://vlg.cs.dartmouth.edu/c3d/' 'c3d_ucf101_finetune_whole_iter_20000', self) elif pretrained_model: npz.load_npz(pretrained_model, self) self.functions = collections.OrderedDict([ ('conv1a', [self.conv1a, relu]), ('pool1', [_max_pooling_2d]), ('conv2a', [self.conv2a, relu]), ('pool2', [_max_pooling_3d]), ('conv3a', [self.conv3a, relu]), ('conv3b', [self.conv3b, relu]), ('pool3', [_max_pooling_3d]), ('conv4a', [self.conv4a, relu]), ('conv4b', [self.conv4b, relu]), ('pool4', [_max_pooling_3d]), ('conv5a', [self.conv5a, relu]), ('conv5b', [self.conv5b, relu]), ('pool5', [_max_pooling_3d]), ('fc6', [self.fc6, relu, dropout]), ('fc7', [self.fc7, relu, dropout]), ('fc8', [self.fc8]), ('prob', [softmax]), ])