Python chainer.links.EmbedID() Examples
The following are 30
code examples of chainer.links.EmbedID().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.links
, or try the search function
.
Example #1
Source File: nets.py From chainer with MIT License | 6 votes |
def __init__(self, n_layers, n_vocab, n_units, dropout=0.1): out_units = n_units // 3 super(CNNEncoder, self).__init__() with self.init_scope(): self.embed = L.EmbedID(n_vocab, n_units, ignore_label=-1, initialW=embed_init) self.cnn_w3 = L.Convolution2D( n_units, out_units, ksize=(3, 1), stride=1, pad=(2, 0), nobias=True) self.cnn_w4 = L.Convolution2D( n_units, out_units, ksize=(4, 1), stride=1, pad=(3, 0), nobias=True) self.cnn_w5 = L.Convolution2D( n_units, out_units, ksize=(5, 1), stride=1, pad=(4, 0), nobias=True) self.mlp = MLP(n_layers, out_units * 3, dropout) self.out_units = out_units * 3 self.dropout = dropout
Example #2
Source File: decoder.py From knmt with GNU General Public License v3.0 | 6 votes |
def __init__(self, V, d_model=512, n_heads=8, d_ff=2048, experimental_relu=False, dropout=None, nb_layers=6, residual_mode="normal", no_normalize=False): super(Decoder, self).__init__( emb = L.EmbedID(V, d_model), encoding_layers = DecoderMultiLayer(d_model, n_heads, d_ff=d_ff, experimental_relu=experimental_relu, dropout=dropout, nb_layers=nb_layers, residual_mode=residual_mode, no_normalize=no_normalize), logits_layer = L.Linear(d_model, V + 1) ) self.dropout = dropout self.n_heads = n_heads self.d_model = d_model self.cached_pos_vect = None self.add_param("bos_encoding", (1, 1, d_model)) self.bos_encoding.data[...] = np.random.randn(d_model) self.V = V self.eos_idx = V
Example #3
Source File: nets.py From vecto with Mozilla Public License 2.0 | 6 votes |
def __init__(self, n_layers, n_vocab, n_units, dropout=0.1, wv=None): out_units = n_units // 3 super(CNNEncoder, self).__init__() with self.init_scope(): if wv is None: self.embed = L.EmbedID(n_vocab, n_units, ignore_label=-1, initialW=embed_init) else: self.embed = L.EmbedID(n_vocab, n_units, ignore_label=-1, initialW=wv) self.cnn_w3 = L.Convolution2D( n_units, out_units, ksize=(3, 1), stride=1, pad=(2, 0), nobias=True) self.cnn_w4 = L.Convolution2D( n_units, out_units, ksize=(4, 1), stride=1, pad=(3, 0), nobias=True) self.cnn_w5 = L.Convolution2D( n_units, out_units, ksize=(5, 1), stride=1, pad=(4, 0), nobias=True) self.mlp = MLP(n_layers, out_units * 3, dropout) self.out_units = out_units * 3 self.dropout = dropout
Example #4
Source File: nets.py From vecto with Mozilla Public License 2.0 | 6 votes |
def __init__(self, n_layers, n_vocab, n_units, dropout=0.1, wv=None): super(RNNEncoder, self).__init__() with self.init_scope(): if wv is None: self.embed = L.EmbedID(n_vocab, n_units, ignore_label=-1, initialW=embed_init) else: # TODO: this implementation was allowing for dynamic embeddings # think about how to support both continuous embeddings # and function pointers # self.embed = self.get_embed_from_wv self.embed = L.EmbedID(n_vocab, n_units, ignore_label=-1, initialW=wv) self.encoder = L.NStepLSTM(n_layers, n_units, n_units, dropout) self.n_layers = n_layers self.out_units = n_units self.dropout = dropout
Example #5
Source File: encoders.py From knmt with GNU General Public License v3.0 | 6 votes |
def __init__(self, Vi, Ei, Hi, init_orth=False, use_bn_length=0, cell_type=rnn_cells.LSTMCell): gru_f = cell_type(Ei, Hi) gru_b = cell_type(Ei, Hi) log.info("constructing encoder [%s]" % (cell_type,)) super(Encoder, self).__init__( emb=L.EmbedID(Vi, Ei), # gru_f = L.GRU(Hi, Ei), # gru_b = L.GRU(Hi, Ei) gru_f=gru_f, gru_b=gru_b ) self.Hi = Hi if use_bn_length > 0: self.add_link("bn_f", BNList(Hi, use_bn_length)) # self.add_link("bn_b", BNList(Hi, use_bn_length)) #TODO self.use_bn_length = use_bn_length if init_orth: ortho_init(self.gru_f) ortho_init(self.gru_b)
Example #6
Source File: nets.py From vecto with Mozilla Public License 2.0 | 6 votes |
def block_embed(embed, x, dropout=0.): """Embedding function followed by convolution Args: embed (callable): A :func:`~chainer.functions.embed_id` function or :class:`~chainer.links.EmbedID` link. x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \ :class:`cupy.ndarray`): Input variable, which is a :math:`(B, L)`-shaped int array. Its first dimension :math:`(B)` is assumed to be the *minibatch dimension*. The second dimension :math:`(L)` is the length of padded sentences. dropout (float): Dropout ratio. Returns: ~chainer.Variable: Output variable. A float array with shape of :math:`(B, N, L, 1)`. :math:`(N)` is the number of dimensions of word embedding. """ e = embed(x) e = F.dropout(e, ratio=dropout) e = F.transpose(e, (0, 2, 1)) e = e[:, :, :, None] return e
Example #7
Source File: lstm.py From seq2seq with MIT License | 6 votes |
def __init__( self, embed_dim: int, n_units: int=1000, gpu: int=-1, ): super(LSTM, self).__init__( embed=L.EmbedID(embed_dim, n_units), # word embedding l1=L.Linear(n_units, n_units * 4), h1=L.Linear(n_units, n_units * 4), l2=L.Linear(n_units, n_units * 4), h2=L.Linear(n_units, n_units * 4), l3=L.Linear(n_units, embed_dim), ) self.embed_dim = embed_dim self.n_units = n_units self.gpu = gpu
Example #8
Source File: nets.py From qb with MIT License | 6 votes |
def block_embed(embed, x, dropout=0.): """Embedding function followed by convolution Args: embed (callable): A :func:`~chainer.functions.embed_id` function or :class:`~chainer.links.EmbedID` link. x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \ :class:`cupy.ndarray`): Input variable, which is a :math:`(B, L)`-shaped int array. Its first dimension :math:`(B)` is assumed to be the *minibatch dimension*. The second dimension :math:`(L)` is the length of padded sentences. dropout (float): Dropout ratio. Returns: ~chainer.Variable: Output variable. A float array with shape of :math:`(B, N, L, 1)`. :math:`(N)` is the number of dimensions of word embedding. """ e = embed(x) e = F.dropout(e, ratio=dropout) e = F.transpose(e, (0, 2, 1)) e = e[:, :, :, None] return e
Example #9
Source File: model.py From pfio with MIT License | 6 votes |
def __init__(self, vocab_size, hidden_size, dropout_ratio, ignore_label): super(LSTMLanguageModel, self).__init__() with self.init_scope(): self.embed_word = L.EmbedID( vocab_size, hidden_size, initialW=initializers.Normal(1.0), ignore_label=ignore_label ) self.embed_img = L.Linear( hidden_size, initialW=initializers.Normal(0.01) ) self.lstm = L.LSTM(hidden_size, hidden_size) self.out_word = L.Linear( hidden_size, vocab_size, initialW=initializers.Normal(0.01) ) self.dropout_ratio = dropout_ratio
Example #10
Source File: model.py From pfio with MIT License | 6 votes |
def __init__(self, vocab_size, hidden_size, dropout_ratio, ignore_label): super(NStepLSTMLanguageModel, self).__init__() with self.init_scope(): self.embed_word = L.EmbedID( vocab_size, hidden_size, initialW=initializers.Normal(1.0), ignore_label=ignore_label ) self.embed_img = L.Linear( hidden_size, initialW=initializers.Normal(0.01) ) self.lstm = L.NStepLSTM(1, hidden_size, hidden_size, dropout_ratio) self.decode_caption = L.Linear( hidden_size, vocab_size, initialW=initializers.Normal(0.01) ) self.dropout_ratio = dropout_ratio
Example #11
Source File: net.py From convolutional_seq2seq with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, n_layers, n_source_vocab, n_target_vocab, n_units, max_length=50, dropout=0.2, width=3): init_emb = chainer.initializers.Normal(0.1) init_out = VarInNormal(1.) super(Seq2seq, self).__init__( embed_x=L.EmbedID(n_source_vocab, n_units, ignore_label=-1, initialW=init_emb), embed_y=L.EmbedID(n_target_vocab, n_units, ignore_label=-1, initialW=init_emb), embed_position_x=L.EmbedID(max_length, n_units, initialW=init_emb), embed_position_y=L.EmbedID(max_length, n_units, initialW=init_emb), encoder=ConvGLUEncoder(n_layers, n_units, width, dropout), decoder=ConvGLUDecoder(n_layers, n_units, width, dropout), W=L.Linear(n_units, n_target_vocab, initialW=init_out), ) self.n_layers = n_layers self.n_units = n_units self.n_target_vocab = n_target_vocab self.max_length = max_length self.width = width self.dropout = dropout
Example #12
Source File: train_dqn_batch_grasping.py From chainerrl with MIT License | 6 votes |
def __init__(self, n_actions, max_episode_steps): super().__init__() with self.init_scope(): self.embed = L.EmbedID(max_episode_steps + 1, 3136) self.image2hidden = chainerrl.links.Sequence( L.Convolution2D(None, 32, 8, stride=4), F.relu, L.Convolution2D(None, 64, 4, stride=2), F.relu, L.Convolution2D(None, 64, 3, stride=1), functools.partial(F.reshape, shape=(-1, 3136)), ) self.hidden2out = chainerrl.links.Sequence( L.Linear(None, 512), F.relu, L.Linear(None, n_actions), DiscreteActionValue, )
Example #13
Source File: model.py From chainer with MIT License | 6 votes |
def __init__(self, vocab_size, hidden_size, dropout_ratio, ignore_label): super(LSTMLanguageModel, self).__init__() with self.init_scope(): self.embed_word = L.EmbedID( vocab_size, hidden_size, initialW=initializers.Normal(1.0), ignore_label=ignore_label ) self.embed_img = L.Linear( hidden_size, initialW=initializers.Normal(0.01) ) self.lstm = L.LSTM(hidden_size, hidden_size) self.out_word = L.Linear( hidden_size, vocab_size, initialW=initializers.Normal(0.01) ) self.dropout_ratio = dropout_ratio
Example #14
Source File: test_connections.py From chainer with MIT License | 6 votes |
def setUp(self): class Model(chainer.Chain): def __init__(self, link, args, kwargs): super(Model, self).__init__() with self.init_scope(): self.l1 = link(*args, **kwargs) def __call__(self, x): return self.l1(x) self.model = Model(self.link, self.args, self.kwargs) if self.link is L.EmbedID: self.x = np.random.randint(0, self.args[0], size=self.in_shape) self.x = self.x.astype(self.in_type) else: self.x = input_generator.increasing( *self.in_shape, dtype=self.in_type)
Example #15
Source File: nets.py From contextual_augmentation with MIT License | 6 votes |
def block_embed(embed, x, dropout=0.): """Embedding function followed by convolution Args: embed (callable): A :func:`~chainer.functions.embed_id` function or :class:`~chainer.links.EmbedID` link. x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \ :class:`cupy.ndarray`): Input variable, which is a :math:`(B, L)`-shaped int array. Its first dimension :math:`(B)` is assumed to be the *minibatch dimension*. The second dimension :math:`(L)` is the length of padded sentences. dropout (float): Dropout ratio. Returns: ~chainer.Variable: Output variable. A float array with shape of :math:`(B, N, L, 1)`. :math:`(N)` is the number of dimensions of word embedding. """ e = embed(x) e = F.dropout(e, ratio=dropout) e = F.transpose(e, (0, 2, 1)) e = e[:, :, :, None] return e
Example #16
Source File: nets.py From contextual_augmentation with MIT License | 6 votes |
def __init__(self, n_layers, n_vocab, n_units, dropout=0.1): out_units = n_units // 3 super(CNNEncoder, self).__init__( embed=L.EmbedID(n_vocab, n_units, ignore_label=-1, initialW=embed_init), cnn_w3=L.Convolution2D( n_units, out_units, ksize=(3, 1), stride=1, pad=(2, 0), nobias=True), cnn_w4=L.Convolution2D( n_units, out_units, ksize=(4, 1), stride=1, pad=(3, 0), nobias=True), cnn_w5=L.Convolution2D( n_units, out_units, ksize=(5, 1), stride=1, pad=(4, 0), nobias=True), mlp=MLP(n_layers, out_units * 3, dropout) ) self.out_units = out_units * 3 self.dropout = dropout self.use_predict_embed = False
Example #17
Source File: model.py From chainer with MIT License | 6 votes |
def __init__(self, vocab_size, hidden_size, dropout_ratio, ignore_label): super(NStepLSTMLanguageModel, self).__init__() with self.init_scope(): self.embed_word = L.EmbedID( vocab_size, hidden_size, initialW=initializers.Normal(1.0), ignore_label=ignore_label ) self.embed_img = L.Linear( hidden_size, initialW=initializers.Normal(0.01) ) self.lstm = L.NStepLSTM(1, hidden_size, hidden_size, dropout_ratio) self.decode_caption = L.Linear( hidden_size, vocab_size, initialW=initializers.Normal(0.01) ) self.dropout_ratio = dropout_ratio
Example #18
Source File: subword.py From vecto with Mozilla Public License 2.0 | 6 votes |
def __init__(self, vocab, vocab_ngram_tokens, n_units, n_units_char, dropout, subword): # dropout ratio, zero indicates no dropout super(RNN, self).__init__() with self.init_scope(): self.embed = L.EmbedID( len(vocab_ngram_tokens.lst_words) + 2, n_units_char, initialW=I.Uniform(1. / n_units_char)) # ngram tokens embedding plus 2 for OOV and end symbol. if 'lstm' in subword: self.mid = L.LSTM(n_units_char, n_units_char * 2) self.out = L.Linear(n_units_char * 2, n_units_char) # the feed-forward output layer if 'bilstm' in subword: self.mid_b = L.LSTM(n_units_char, n_units_char * 2) self.out_b = L.Linear(n_units_char * 2, n_units_char) self.n_ngram = vocab_ngram_tokens.metadata["max_gram"] - vocab_ngram_tokens.metadata["min_gram"] + 1 self.final_out = L.Linear(n_units * (self.n_ngram), n_units) self.dropout = dropout self.vocab = vocab self.vocab_ngram_tokens = vocab_ngram_tokens self.subword = subword
Example #19
Source File: lstm_decoder.py From DSTC6-End-to-End-Conversation-Modeling with MIT License | 6 votes |
def __init__(self, n_layers, in_size, out_size, embed_size, hidden_size, proj_size, dropout=0.5): """Initialize encoder with structure parameters Args: n_layers (int): Number of layers. in_size (int): Dimensionality of input vectors. out_size (int): Dimensionality of output vectors. embed_size (int): Dimensionality of word embedding. hidden_size (int) : Dimensionality of hidden vectors. proj_size (int) : Dimensionality of projection before softmax. dropout (float): Dropout ratio. """ super(LSTMDecoder, self).__init__( embed = L.EmbedID(in_size, embed_size), lstm = L.NStepLSTM(n_layers, embed_size, hidden_size, dropout), proj = L.Linear(hidden_size, proj_size), out = L.Linear(proj_size, out_size) ) self.dropout = dropout for param in self.params(): param.data[...] = np.random.uniform(-0.1, 0.1, param.data.shape)
Example #20
Source File: lstm_decoder.py From DSTC6-End-to-End-Conversation-Modeling with MIT License | 6 votes |
def __init__(self, n_layers, in_size, out_size, embed_size, hidden_size, proj_size, dropout=0.5): """Initialize encoder with structure parameters Args: n_layers (int): Number of layers. in_size (int): Dimensionality of input vectors. out_size (int): Dimensionality of output vectors. embed_size (int): Dimensionality of word embedding. hidden_size (int) : Dimensionality of hidden vectors. proj_size (int) : Dimensionality of projection before softmax. dropout (float): Dropout ratio. """ super(LSTMDecoder, self).__init__( embed = L.EmbedID(in_size, embed_size), lstm = L.NStepLSTM(n_layers, embed_size, hidden_size, dropout), proj = L.Linear(hidden_size, proj_size), out = L.Linear(proj_size, out_size) ) self.dropout = dropout for param in self.params(): param.data[...] = np.random.uniform(-0.1, 0.1, param.data.shape)
Example #21
Source File: subword.py From vecto with Mozilla Public License 2.0 | 6 votes |
def __init__(self, vocab, vocab_ngram_tokens, n_units, n_units_char, dropout, subword): # dropout ratio, zero indicates no dropout super(SUMAVG, self).__init__() with self.init_scope(): if subword.startswith('sum'): self.f_sumavg = F.sum if subword.startswith('avg'): self.f_sumavg = F.average self.embed = L.EmbedID( len(vocab_ngram_tokens.lst_words) + 2, n_units_char, initialW=I.Uniform(1. / n_units_char)) # ngram tokens embedding plus 2 for OOV and end symbol. self.n_ngram = vocab_ngram_tokens.metadata["max_gram"] - vocab_ngram_tokens.metadata["min_gram"] + 1 self.dropout = dropout self.vocab = vocab self.vocab_ngram_tokens = vocab_ngram_tokens
Example #22
Source File: lstm_decoder.py From DSTC6-End-to-End-Conversation-Modeling with MIT License | 6 votes |
def __init__(self, n_layers, in_size, out_size, embed_size, hidden_size, proj_size, dropout=0.5): """Initialize encoder with structure parameters Args: n_layers (int): Number of layers. in_size (int): Dimensionality of input vectors. out_size (int): Dimensionality of output vectors. embed_size (int): Dimensionality of word embedding. hidden_size (int) : Dimensionality of hidden vectors. proj_size (int) : Dimensionality of projection before softmax. dropout (float): Dropout ratio. """ super(LSTMDecoder, self).__init__( embed = L.EmbedID(in_size, embed_size), lstm = L.NStepLSTM(n_layers, embed_size, hidden_size, dropout), proj = L.Linear(hidden_size, proj_size), out = L.Linear(proj_size, out_size) ) self.dropout = dropout for param in self.params(): param.data[...] = np.random.uniform(-0.1, 0.1, param.data.shape)
Example #23
Source File: lstm_decoder.py From DSTC6-End-to-End-Conversation-Modeling with MIT License | 6 votes |
def __init__(self, n_layers, in_size, out_size, embed_size, hidden_size, proj_size, dropout=0.5): """Initialize encoder with structure parameters Args: n_layers (int): Number of layers. in_size (int): Dimensionality of input vectors. out_size (int): Dimensionality of output vectors. embed_size (int): Dimensionality of word embedding. hidden_size (int) : Dimensionality of hidden vectors. proj_size (int) : Dimensionality of projection before softmax. dropout (float): Dropout ratio. """ super(LSTMDecoder, self).__init__( embed = L.EmbedID(in_size, embed_size), lstm = L.NStepLSTM(n_layers, embed_size, hidden_size, dropout), proj = L.Linear(hidden_size, proj_size), out = L.Linear(proj_size, out_size) ) self.dropout = dropout for param in self.params(): param.data[...] = np.random.uniform(-0.1, 0.1, param.data.shape)
Example #24
Source File: train_recursive_minibatch.py From chainer with MIT License | 5 votes |
def __init__(self, n_vocab, n_units, n_label): super(ThinStackRecursiveNet, self).__init__( embed=L.EmbedID(n_vocab, n_units), l=L.Linear(n_units * 2, n_units), w=L.Linear(n_units, n_label)) self.n_units = n_units
Example #25
Source File: nets.py From chainer with MIT License | 5 votes |
def __init__(self, n_layers, n_vocab, n_units, dropout=0.1): super(RNNEncoder, self).__init__() with self.init_scope(): self.embed = L.EmbedID(n_vocab, n_units, initialW=embed_init) self.encoder = L.NStepLSTM(n_layers, n_units, n_units, dropout) self.n_layers = n_layers self.out_units = n_units self.dropout = dropout
Example #26
Source File: seq2seq.py From chainer with MIT License | 5 votes |
def __init__(self, n_layers, n_source_vocab, n_target_vocab, n_units): super(Seq2seq, self).__init__( embed_x=L.EmbedID(n_source_vocab, n_units), embed_y=L.EmbedID(n_target_vocab, n_units), encoder=L.NStepLSTM(n_layers, n_units, n_units, 0.1), decoder=L.NStepLSTM(n_layers, n_units, n_units, 0.1), W=L.Linear(n_units, n_target_vocab), ) self.n_layers = n_layers self.n_units = n_units
Example #27
Source File: nets.py From vecto with Mozilla Public License 2.0 | 5 votes |
def sequence_embed(embed, xs, dropout=0.): """Efficient embedding function for variable-length sequences This output is equally to "return [F.dropout(embed(x), ratio=dropout) for x in xs]". However, calling the functions is one-shot and faster. Args: embed (callable): A :func:`~chainer.functions.embed_id` function or :class:`~chainer.links.EmbedID` link. xs (list of :class:`~chainer.Variable` or :class:`numpy.ndarray` or \ :class:`cupy.ndarray`): i-th element in the list is an input variable, which is a :math:`(L_i, )`-shaped int array. dropout (float): Dropout ratio. Returns: list of ~chainer.Variable: Output variables. i-th element in the list is an output variable, which is a :math:`(L_i, N)`-shaped float array. :math:`(N)` is the number of dimensions of word embedding. """ x_len = [len(x) for x in xs] x_section = numpy.cumsum(x_len[:-1]) ex = embed(F.concat(xs, axis=0)) ex = F.dropout(ex, ratio=dropout) exs = F.split_axis(ex, x_section, 0) return exs
Example #28
Source File: seq2seq_mp1.py From chainer with MIT License | 5 votes |
def __init__( self, comm, n_layers, n_source_vocab, n_target_vocab, n_units): super(Encoder, self).__init__( embed_x=L.EmbedID(n_source_vocab, n_units), # Corresponding decoder LSTM will be invoked on process 1. mn_encoder=chainermn.links.create_multi_node_n_step_rnn( L.NStepLSTM(n_layers, n_units, n_units, 0.1), comm, rank_in=None, rank_out=1 ), ) self.comm = comm self.n_layers = n_layers self.n_units = n_units
Example #29
Source File: seq2seq_mp1.py From chainer with MIT License | 5 votes |
def __init__( self, comm, n_layers, n_source_vocab, n_target_vocab, n_units): super(Decoder, self).__init__( embed_y=L.EmbedID(n_target_vocab, n_units), # Corresponding encoder LSTM will be invoked on process 0. mn_decoder=chainermn.links.create_multi_node_n_step_rnn( L.NStepLSTM(n_layers, n_units, n_units, 0.1), comm, rank_in=0, rank_out=None), W=L.Linear(n_units, n_target_vocab), ) self.comm = comm self.n_layers = n_layers self.n_units = n_units
Example #30
Source File: nets.py From chainer with MIT License | 5 votes |
def sequence_embed(embed, xs, dropout=0.): """Efficient embedding function for variable-length sequences This output is equally to "return [F.dropout(embed(x), ratio=dropout) for x in xs]". However, calling the functions is one-shot and faster. Args: embed (callable): A :func:`~chainer.functions.embed_id` function or :class:`~chainer.links.EmbedID` link. xs (list of :class:`~chainer.Variable` or :class:`numpy.ndarray` or \ :class:`cupy.ndarray`): i-th element in the list is an input variable, which is a :math:`(L_i, )`-shaped int array. dropout (float): Dropout ratio. Returns: list of ~chainer.Variable: Output variables. i-th element in the list is an output variable, which is a :math:`(L_i, N)`-shaped float array. :math:`(N)` is the number of dimensions of word embedding. """ x_len = [len(x) for x in xs] x_section = numpy.cumsum(x_len[:-1]) ex = embed(F.concat(xs, axis=0)) ex = F.dropout(ex, ratio=dropout) exs = F.split_axis(ex, x_section, 0) return exs