Python chainer.initializers.Uniform() Examples
The following are 14
code examples of chainer.initializers.Uniform().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.initializers
, or try the search function
.
Example #1
Source File: subword.py From vecto with Mozilla Public License 2.0 | 6 votes |
def __init__(self, vocab, vocab_ngram_tokens, n_units, n_units_char, dropout, subword): # dropout ratio, zero indicates no dropout super(RNN, self).__init__() with self.init_scope(): self.embed = L.EmbedID( len(vocab_ngram_tokens.lst_words) + 2, n_units_char, initialW=I.Uniform(1. / n_units_char)) # ngram tokens embedding plus 2 for OOV and end symbol. if 'lstm' in subword: self.mid = L.LSTM(n_units_char, n_units_char * 2) self.out = L.Linear(n_units_char * 2, n_units_char) # the feed-forward output layer if 'bilstm' in subword: self.mid_b = L.LSTM(n_units_char, n_units_char * 2) self.out_b = L.Linear(n_units_char * 2, n_units_char) self.n_ngram = vocab_ngram_tokens.metadata["max_gram"] - vocab_ngram_tokens.metadata["min_gram"] + 1 self.final_out = L.Linear(n_units * (self.n_ngram), n_units) self.dropout = dropout self.vocab = vocab self.vocab_ngram_tokens = vocab_ngram_tokens self.subword = subword
Example #2
Source File: subword.py From vecto with Mozilla Public License 2.0 | 6 votes |
def __init__(self, vocab, vocab_ngram_tokens, n_units, n_units_char, dropout, subword): # dropout ratio, zero indicates no dropout super(SUMAVG, self).__init__() with self.init_scope(): if subword.startswith('sum'): self.f_sumavg = F.sum if subword.startswith('avg'): self.f_sumavg = F.average self.embed = L.EmbedID( len(vocab_ngram_tokens.lst_words) + 2, n_units_char, initialW=I.Uniform(1. / n_units_char)) # ngram tokens embedding plus 2 for OOV and end symbol. self.n_ngram = vocab_ngram_tokens.metadata["max_gram"] - vocab_ngram_tokens.metadata["min_gram"] + 1 self.dropout = dropout self.vocab = vocab self.vocab_ngram_tokens = vocab_ngram_tokens
Example #3
Source File: net.py From models with MIT License | 6 votes |
def __init__(self, n_codebooks, n_centroids, n_vocab, embed_dim, tau, embed_mat): super(EmbeddingCompressor, self).__init__() """ M: number of codebooks (subcodes) K: number of vectors in each codebook """ self.M = n_codebooks self.K = n_centroids self.n_vocab = n_vocab self.embed_dim = embed_dim self.tau = tau M = self.M K = self.K u_init = I.Uniform(scale=0.01) with self.init_scope(): self.embed_mat = L.EmbedID(n_vocab, embed_dim, initialW=embed_mat) self.l1 = L.Linear(embed_dim, M * K // 2, initialW=u_init, initial_bias=u_init) self.l2 = L.Linear(M * K // 2, M * K, initialW=u_init, initial_bias=u_init) self.codebook = chainer.Parameter(initializer=u_init, shape=(M * K, embed_dim))
Example #4
Source File: rnn_cells.py From knmt with GNU General Public License v3.0 | 5 votes |
def create_initializer(init_type, scale=None, fillvalue=None): if init_type == 'identity': return initializers.Identity() if scale is None else initializers.Identity(scale=scale) if init_type == 'constant': return initializers.Constant(fillvalue) if init_type == 'zero': return initializers.Zero() if init_type == 'one': return initializers.One() if init_type == 'normal': return initializers.Normal() if scale is None else initializers.Normal(scale) if init_type == 'glorotNormal': return initializers.GlorotNormal() if scale is None else initializers.GlorotNormal(scale) if init_type == 'heNormal': return initializers.HeNormal() if scale is None else initializers.HeNormal(scale) if init_type == 'orthogonal': return initializers.Orthogonal( scale) if scale is None else initializers.Orthogonal(scale) if init_type == 'uniform': return initializers.Uniform( scale) if scale is None else initializers.Uniform(scale) if init_type == 'leCunUniform': return initializers.LeCunUniform( scale) if scale is None else initializers.LeCunUniform(scale) if init_type == 'glorotUniform': return initializers.GlorotUniform( scale) if scale is None else initializers.GlorotUniform(scale) if init_type == 'heUniform': return initializers.HeUniform( scale) if scale is None else initializers.HeUniform(scale) raise ValueError("Unknown initializer type: {0}".format(init_type))
Example #5
Source File: bbox_head.py From chainer-compiler with MIT License | 5 votes |
def __call__(self, array): scale = 1 / np.sqrt(array.shape[-1]) initializers.Uniform(scale)(array)
Example #6
Source File: subword.py From vecto with Mozilla Public License 2.0 | 5 votes |
def __init__(self, vocab, vocab_ngram_tokens, n_units, n_units_char, dropout, subword): # dropout ratio, zero indicates no dropout super(CNN1D, self).__init__() with self.init_scope(): self.subword = subword # n_units_char = 15 self.embed = L.EmbedID( len(vocab_ngram_tokens.lst_words) + 2, n_units_char, initialW=I.Uniform(1. / n_units_char)) # ngram tokens embedding plus 2 for OOV and end symbol. self.n_ngram = vocab_ngram_tokens.metadata["max_gram"] - vocab_ngram_tokens.metadata["min_gram"] + 1 # n_filters = {i: min(200, i * 5) for i in range(1, 1 + 1)} # self.cnns = (L.Convolution2D(1, v, (k, n_units_char),) for k, v in n_filters.items()) # self.out = L.Linear(sum([v for k, v in n_filters.items()]), n_units) if 'small' in self.subword: self.cnn1 = L.ConvolutionND(1, n_units_char, 50, (1,), ) self.out = L.Linear(50, n_units) else: self.cnn1 = L.ConvolutionND(1, n_units_char, 50, (1,), ) self.cnn2 = L.ConvolutionND(1, n_units_char, 100, (2,), ) self.cnn3 = L.ConvolutionND(1, n_units_char, 150, (3,), ) self.cnn4 = L.ConvolutionND(1, n_units_char, 200, (4,), ) self.cnn5 = L.ConvolutionND(1, n_units_char, 200, (5,), ) self.cnn6 = L.ConvolutionND(1, n_units_char, 200, (6,), ) self.cnn7 = L.ConvolutionND(1, n_units_char, 200, (7,), ) self.out = L.Linear(1100, n_units) self.dropout = dropout self.vocab = vocab self.vocab_ngram_tokens = vocab_ngram_tokens
Example #7
Source File: subword.py From vecto with Mozilla Public License 2.0 | 5 votes |
def __init__(self, subword, vocab, vocab_ngram_tokens, dimensions, loss_func, dropout=0): # dropout ratio, zero indicates no dropout super(SkipGram, self).__init__() with self.init_scope(): self.subword = subword self.vocab = vocab self.vocab_ngram_tokens = vocab_ngram_tokens self.n_ngram = vocab_ngram_tokens.metadata["max_gram"] - vocab_ngram_tokens.metadata["min_gram"] + 1 if 'none' in subword: self.word_embed = L.EmbedID(len(vocab.lst_words) + 2, dimensions, initialW=I.Uniform(1. / dimensions)) # plus 2 for OOV and end symbol. else: self.word_embed = None if subword.startswith('_none'): self.f = None # if subword.startswith('cnn_'): # self.f = CNN(vocab, vocab_ngram_tokens, dimensions, dimensions, dropout) if subword.startswith('cnn1d'): self.f = CNN1D(vocab, vocab_ngram_tokens, dimensions, dimensions, dropout, args.subword) if subword.startswith('bilstm') or subword.startswith('lstm'): self.f = RNN(vocab, vocab_ngram_tokens, dimensions, dimensions, dropout, args.subword) if subword.startswith('avg') or subword.startswith('sum'): self.f = SUMAVG(vocab, vocab_ngram_tokens, dimensions, dimensions, dropout, args.subword) self.loss_func = loss_func
Example #8
Source File: word.py From vecto with Mozilla Public License 2.0 | 5 votes |
def __init__(self, n_vocab, n_units, loss_func): super(ContinuousBoW, self).__init__() with self.init_scope(): self.embed = L.EmbedID(n_vocab + 2, n_units, initialW=I.Uniform(1. / n_units)) # plus 2 for OOV and end symbol. self.loss_func = loss_func
Example #9
Source File: train_word2vec.py From chainer with MIT License | 5 votes |
def __init__(self, n_vocab, n_units, loss_func): super(ContinuousBoW, self).__init__() with self.init_scope(): self.embed = L.EmbedID( n_vocab, n_units, initialW=I.Uniform(1. / n_units)) self.loss_func = loss_func
Example #10
Source File: train_word2vec.py From chainer with MIT License | 5 votes |
def __init__(self, n_vocab, n_units, loss_func): super(SkipGram, self).__init__() with self.init_scope(): self.embed = L.EmbedID( n_vocab, n_units, initialW=I.Uniform(1. / n_units)) self.loss_func = loss_func
Example #11
Source File: test_convolution_nd.py From chainer with MIT License | 5 votes |
def generate_params(self): initial_bias = initializers.Uniform(scale=1., dtype=self.dtype) return initial_bias,
Example #12
Source File: test_deconvolution_nd.py From chainer with MIT License | 5 votes |
def generate_params(self): initial_bias = initializers.Uniform(scale=1., dtype=self.dtype) return initial_bias,
Example #13
Source File: test_link_n_step_rnn.py From chainer with MIT License | 5 votes |
def get_initializers(self): if self.initialW == 'zero': weight_initializer = initializers.constant.Zero() elif self.initialW == 'random': weight_initializer = initializers.GlorotUniform( rng=numpy.random.RandomState(seed=0)) if self.initial_bias == 'zero': bias_initializer = initializers.constant.Zero() elif self.initial_bias == 'random': bias_initializer = initializers.Uniform( rng=numpy.random.RandomState(seed=0)) return weight_initializer, bias_initializer
Example #14
Source File: bbox_head.py From chainercv with MIT License | 5 votes |
def __call__(self, array): scale = 1 / np.sqrt(array.shape[-1]) initializers.Uniform(scale)(array)