Python mxnet.gluon.nn.Embedding() Examples
The following are 23
code examples of mxnet.gluon.nn.Embedding().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
mxnet.gluon.nn
, or try the search function
.
Example #1
Source File: lstm_crf.py From SNIPER-mxnet with Apache License 2.0 | 6 votes |
def __init__(self, vocab_size, tag2idx, embedding_dim, hidden_dim): super(BiLSTM_CRF, self).__init__() with self.name_scope(): self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.vocab_size = vocab_size self.tag2idx = tag2idx self.tagset_size = len(tag2idx) self.word_embeds = nn.Embedding(vocab_size, embedding_dim) self.lstm = rnn.LSTM(hidden_dim // 2, num_layers=1, bidirectional=True) # Maps the output of the LSTM into tag space. self.hidden2tag = nn.Dense(self.tagset_size) # Matrix of transition parameters. Entry i,j is the score of # transitioning *to* i *from* j. self.transitions = nd.random.normal(shape=(self.tagset_size, self.tagset_size)) self.hidden = self.init_hidden()
Example #2
Source File: lstm_crf.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def __init__(self, vocab_size, tag2idx, embedding_dim, hidden_dim): super(BiLSTM_CRF, self).__init__() with self.name_scope(): self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.vocab_size = vocab_size self.tag2idx = tag2idx self.tagset_size = len(tag2idx) self.word_embeds = nn.Embedding(vocab_size, embedding_dim) self.lstm = rnn.LSTM(hidden_dim // 2, num_layers=1, bidirectional=True) # Maps the output of the LSTM into tag space. self.hidden2tag = nn.Dense(self.tagset_size) # Matrix of transition parameters. Entry i,j is the score of # transitioning *to* i *from* j. self.transitions = self.params.get("crf_transition_matrix", shape=(self.tagset_size, self.tagset_size)) self.hidden = self.init_hidden()
Example #3
Source File: test_gluon.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def test_summary(): net = gluon.model_zoo.vision.resnet50_v1() net.initialize() net.summary(mx.nd.ones((32, 3, 224, 224))) net2 = nn.Sequential() with net2.name_scope(): net2.add(nn.Embedding(40, 30)) net2.add(gluon.rnn.LSTM(30)) net2.add(nn.Dense(40, flatten=False, params=net2[0].params)) net2.initialize() net2.summary(mx.nd.ones((80, 32))) net3 = gluon.rnn.LSTM(30) net3.initialize() begin_state = net3.begin_state(32) net3.summary(mx.nd.ones((80, 32, 5)), begin_state) net.hybridize() assert_raises(AssertionError, net.summary, mx.nd.ones((32, 3, 224, 224)))
Example #4
Source File: model.py From NER_BiLSTM_CRF_Chinese with Apache License 2.0 | 6 votes |
def __init__(self, vocab_size, tag2idx, embedding_dim, hidden_dim,START_TAG = "<START>",STOP_TAG = "<STOP>",ctx=mx.cpu()): super(BiLSTM_CRF, self).__init__() with self.name_scope(): self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.vocab_size = vocab_size self.tag2idx = tag2idx self.START_TAG = START_TAG self.STOP_TAG = STOP_TAG self.tagset_size = len(tag2idx) self.ctx = ctx self.word_embeds = nn.Embedding(vocab_size, embedding_dim) self.lstm = rnn.LSTM(hidden_dim // 2, num_layers=1, bidirectional=True) self.hidden2tag = nn.Dense(self.tagset_size) self.transitions = nd.random.normal(shape=(self.tagset_size, self.tagset_size),ctx=ctx) self.hidden = self.init_hidden()
Example #5
Source File: embedding.py From gluon-ts with Apache License 2.0 | 6 votes |
def __init__( self, num_bins: int, size: Optional[int] = None, *args, **kwargs ): super().__init__(*args, **kwargs) self.num_bins = num_bins if size is None: self.size = round(self.num_bins ** (1 / 4)) else: self.size = size self.embedding = nn.Embedding( input_dim=self.num_bins, output_dim=self.size ) # noinspection PyMethodOverriding
Example #6
Source File: lstm_crf.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def __init__(self, vocab_size, tag2idx, embedding_dim, hidden_dim): super(BiLSTM_CRF, self).__init__() with self.name_scope(): self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.vocab_size = vocab_size self.tag2idx = tag2idx self.tagset_size = len(tag2idx) self.word_embeds = nn.Embedding(vocab_size, embedding_dim) self.lstm = rnn.LSTM(hidden_dim // 2, num_layers=1, bidirectional=True) # Maps the output of the LSTM into tag space. self.hidden2tag = nn.Dense(self.tagset_size) # Matrix of transition parameters. Entry i,j is the score of # transitioning *to* i *from* j. self.transitions = self.params.get("crf_transition_matrix", shape=(self.tagset_size, self.tagset_size)) self.hidden = self.init_hidden()
Example #7
Source File: net.py From comment_toxic_CapsuleNet with MIT License | 6 votes |
def net_define_eu(): net = nn.Sequential() with net.name_scope(): net.add(nn.Embedding(config.MAX_WORDS, config.EMBEDDING_DIM)) net.add(rnn.GRU(128,layout='NTC',bidirectional=True, num_layers=1, dropout=0.2)) net.add(transpose(axes=(0,2,1))) net.add(nn.GlobalMaxPool1D()) ''' net.add(FeatureBlock1()) ''' net.add(extendDim(axes=3)) net.add(PrimeConvCap(16, 32, kernel_size=(1,1), padding=(0,0),strides=(1,1))) net.add(CapFullyNGBlock(16, num_cap=12, input_units=32, units=16, route_num=3)) net.add(nn.Dropout(0.2)) net.add(nn.Dense(6, activation='sigmoid')) net.initialize(init=init.Xavier()) return net
Example #8
Source File: net.py From comment_toxic_CapsuleNet with MIT License | 6 votes |
def net_define(): net = nn.Sequential() with net.name_scope(): net.add(nn.Embedding(config.MAX_WORDS, config.EMBEDDING_DIM)) net.add(rnn.GRU(128,layout='NTC',bidirectional=True, num_layers=2, dropout=0.2)) net.add(transpose(axes=(0,2,1))) # net.add(nn.MaxPool2D(pool_size=(config.MAX_LENGTH,1))) # net.add(nn.Conv2D(128, kernel_size=(101,1), padding=(50,0), groups=128,activation='relu')) net.add(PrimeConvCap(8,32, kernel_size=(1,1), padding=(0,0))) # net.add(AdvConvCap(8,32,8,32, kernel_size=(1,1), padding=(0,0))) net.add(CapFullyBlock(8*(config.MAX_LENGTH)/2, num_cap=12, input_units=32, units=16, route_num=5)) # net.add(CapFullyBlock(8*(config.MAX_LENGTH-8), num_cap=12, input_units=32, units=16, route_num=5)) # net.add(CapFullyBlock(8, num_cap=12, input_units=32, units=16, route_num=5)) net.add(nn.Dropout(0.2)) # net.add(LengthBlock()) net.add(nn.Dense(6, activation='sigmoid')) net.initialize(init=init.Xavier()) return net
Example #9
Source File: tree_lstm.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def __init__(self, sim_hidden_size, rnn_hidden_size, embed_in_size, embed_dim, num_classes): super(SimilarityTreeLSTM, self).__init__() with self.name_scope(): self.embed = nn.Embedding(embed_in_size, embed_dim, prefix='word_embed_') self.childsumtreelstm = ChildSumLSTMCell(rnn_hidden_size, input_size=embed_dim) self.similarity = Similarity(sim_hidden_size, rnn_hidden_size, num_classes)
Example #10
Source File: tree_lstm.py From SNIPER-mxnet with Apache License 2.0 | 5 votes |
def __init__(self, sim_hidden_size, rnn_hidden_size, embed_in_size, embed_dim, num_classes): super(SimilarityTreeLSTM, self).__init__() with self.name_scope(): self.embed = nn.Embedding(embed_in_size, embed_dim, prefix='word_embed_') self.childsumtreelstm = ChildSumLSTMCell(rnn_hidden_size, input_size=embed_dim) self.similarity = Similarity(sim_hidden_size, rnn_hidden_size, num_classes)
Example #11
Source File: model.py From SNIPER-mxnet with Apache License 2.0 | 5 votes |
def __init__(self, mode, vocab_size, num_embed, num_hidden, num_layers, dropout=0.5, tie_weights=False, **kwargs): super(RNNModel, self).__init__(**kwargs) with self.name_scope(): self.drop = nn.Dropout(dropout) self.encoder = nn.Embedding(vocab_size, num_embed, weight_initializer=mx.init.Uniform(0.1)) if mode == 'rnn_relu': self.rnn = rnn.RNN(num_hidden, num_layers, dropout=dropout, input_size=num_embed) elif mode == 'rnn_tanh': self.rnn = rnn.RNN(num_hidden, num_layers, 'tanh', dropout=dropout, input_size=num_embed) elif mode == 'lstm': self.rnn = rnn.LSTM(num_hidden, num_layers, dropout=dropout, input_size=num_embed) elif mode == 'gru': self.rnn = rnn.GRU(num_hidden, num_layers, dropout=dropout, input_size=num_embed) else: raise ValueError("Invalid mode %s. Options are rnn_relu, " "rnn_tanh, lstm, and gru"%mode) if tie_weights: self.decoder = nn.Dense(vocab_size, in_units=num_hidden, params=self.encoder.params) else: self.decoder = nn.Dense(vocab_size, in_units=num_hidden) self.num_hidden = num_hidden
Example #12
Source File: rl_controller.py From autogluon with Apache License 2.0 | 5 votes |
def __init__(self, kwspaces, softmax_temperature=1.0, hidden_size=100, ctx=mx.cpu(), **kwargs): super().__init__(**kwargs) self.softmax_temperature = softmax_temperature self.spaces = list(kwspaces.items()) self.hidden_size = hidden_size self.context = ctx # only support Categorical space for now self.num_tokens = [] for _, space in self.spaces: assert isinstance(space, Categorical) self.num_tokens.append(len(space)) num_total_tokens = sum(self.num_tokens) # controller lstm self.encoder = nn.Embedding(num_total_tokens, hidden_size) self.lstm = mx.gluon.rnn.LSTMCell(input_size=hidden_size, hidden_size=hidden_size) self.decoders = nn.Sequential() for idx, size in enumerate(self.num_tokens): decoder = nn.Dense(in_units=hidden_size, units=size) self.decoders.add(decoder) def _init_hidden(batch_size): zeros = mx.nd.zeros((batch_size, hidden_size), ctx=self.context) return zeros, zeros.copy() def _get_default_hidden(key): return mx.nd.zeros((key, hidden_size), ctx=self.context) self.static_init_hidden = keydefaultdict(_init_hidden) self.static_inputs = keydefaultdict(_get_default_hidden)
Example #13
Source File: feature.py From gluon-ts with Apache License 2.0 | 5 votes |
def __init__( self, cardinalities: List[int], embedding_dims: List[int], dtype: DType = np.float32, **kwargs, ) -> None: super().__init__(**kwargs) assert ( len(cardinalities) > 0 ), "Length of `cardinalities` list must be greater than zero" assert len(cardinalities) == len( embedding_dims ), "Length of `embedding_dims` and `embedding_dims` should match" assert all( [c > 0 for c in cardinalities] ), "Elements of `cardinalities` should be > 0" assert all( [d > 0 for d in embedding_dims] ), "Elements of `embedding_dims` should be > 0" self.__num_features = len(cardinalities) self.dtype = dtype def create_embedding(i: int, c: int, d: int) -> nn.Embedding: embedding = nn.Embedding( c, d, prefix=f"cat_{i}_embedding_", dtype=self.dtype ) self.register_child(embedding) return embedding with self.name_scope(): self.__embedders = [ create_embedding(i, c, d) for i, (c, d) in enumerate(zip(cardinalities, embedding_dims)) ] # noinspection PyMethodOverriding,PyPep8Naming
Example #14
Source File: model.py From deeplearning-benchmark with Apache License 2.0 | 5 votes |
def __init__(self, mode, vocab_size, num_embed, num_hidden, num_layers, dropout=0.5, tie_weights=False, **kwargs): super(RNNModel, self).__init__(**kwargs) with self.name_scope(): self.drop = nn.Dropout(dropout) self.encoder = nn.Embedding(vocab_size, num_embed, weight_initializer=mx.init.Uniform(0.1)) if mode == 'rnn_relu': self.rnn = rnn.RNN(num_hidden, 'relu', num_layers, dropout=dropout, input_size=num_embed) elif mode == 'rnn_tanh': self.rnn = rnn.RNN(num_hidden, num_layers, dropout=dropout, input_size=num_embed) elif mode == 'lstm': self.rnn = rnn.LSTM(num_hidden, num_layers, dropout=dropout, input_size=num_embed) elif mode == 'gru': self.rnn = rnn.GRU(num_hidden, num_layers, dropout=dropout, input_size=num_embed) else: raise ValueError("Invalid mode %s. Options are rnn_relu, " "rnn_tanh, lstm, and gru"%mode) if tie_weights: self.decoder = nn.Dense(vocab_size, in_units=num_hidden, params=self.encoder.params) else: self.decoder = nn.Dense(vocab_size, in_units=num_hidden) self.num_hidden = num_hidden
Example #15
Source File: mxnet_model.py From char-rnn-text-generation with MIT License | 5 votes |
def __init__(self, vocab_size=VOCAB_SIZE, embedding_size=32, rnn_size=128, num_layers=2, drop_rate=0.0, **kwargs): super(Model, self).__init__(**kwargs) self.args = {"vocab_size": vocab_size, "embedding_size": embedding_size, "rnn_size": rnn_size, "num_layers": num_layers, "drop_rate": drop_rate} with self.name_scope(): self.encoder = nn.Embedding(vocab_size, embedding_size) self.dropout = nn.Dropout(drop_rate) self.rnn = rnn.LSTM(rnn_size, num_layers, dropout=drop_rate, input_size=embedding_size) self.decoder = nn.Dense(vocab_size, in_units=rnn_size)
Example #16
Source File: model.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def __init__(self, mode, vocab_size, num_embed, num_hidden, num_layers, dropout=0.5, tie_weights=False, **kwargs): super(RNNModel, self).__init__(**kwargs) with self.name_scope(): self.drop = nn.Dropout(dropout) self.encoder = nn.Embedding(vocab_size, num_embed, weight_initializer=mx.init.Uniform(0.1)) if mode == 'rnn_relu': self.rnn = rnn.RNN(num_hidden, num_layers, dropout=dropout, input_size=num_embed) elif mode == 'rnn_tanh': self.rnn = rnn.RNN(num_hidden, num_layers, 'tanh', dropout=dropout, input_size=num_embed) elif mode == 'lstm': self.rnn = rnn.LSTM(num_hidden, num_layers, dropout=dropout, input_size=num_embed) elif mode == 'gru': self.rnn = rnn.GRU(num_hidden, num_layers, dropout=dropout, input_size=num_embed) else: raise ValueError("Invalid mode %s. Options are rnn_relu, " "rnn_tanh, lstm, and gru"%mode) if tie_weights: self.decoder = nn.Dense(vocab_size, in_units=num_hidden, params=self.encoder.params) else: self.decoder = nn.Dense(vocab_size, in_units=num_hidden) self.num_hidden = num_hidden
Example #17
Source File: model.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def __init__(self, mode, vocab_size, num_embed, num_hidden, num_layers, dropout=0.5, tie_weights=False, **kwargs): super(RNNModel, self).__init__(**kwargs) with self.name_scope(): self.drop = nn.Dropout(dropout) self.encoder = nn.Embedding(vocab_size, num_embed, weight_initializer=mx.init.Uniform(0.1)) if mode == 'rnn_relu': self.rnn = rnn.RNN(num_hidden, num_layers, dropout=dropout, input_size=num_embed) elif mode == 'rnn_tanh': self.rnn = rnn.RNN(num_hidden, num_layers, 'tanh', dropout=dropout, input_size=num_embed) elif mode == 'lstm': self.rnn = rnn.LSTM(num_hidden, num_layers, dropout=dropout, input_size=num_embed) elif mode == 'gru': self.rnn = rnn.GRU(num_hidden, num_layers, dropout=dropout, input_size=num_embed) else: raise ValueError("Invalid mode %s. Options are rnn_relu, " "rnn_tanh, lstm, and gru"%mode) if tie_weights: self.decoder = nn.Dense(vocab_size, in_units=num_hidden, params=self.encoder.params) else: self.decoder = nn.Dense(vocab_size, in_units=num_hidden) self.num_hidden = num_hidden
Example #18
Source File: TextEXAM_multi-class.py From AAAI_2019_EXAM with GNU General Public License v2.0 | 5 votes |
def __init__(self): super(Net, self).__init__() with self.name_scope(): self.embedding = nn.Embedding(vocab_size,region_size*emb_size) self.embedding_region = nn.Embedding(vocab_size,emb_size) self.max_pool = nn.GlobalMaxPool1D() self.dense = nn.Dense(n_classes) self.dense1 = nn.Dense(max_sequence_length*2,activation='relu') self.dense2 = nn.Dense(1)
Example #19
Source File: TextEXAM_multi-label.py From AAAI_2019_EXAM with GNU General Public License v2.0 | 5 votes |
def __init__(self,**kwargs): super(SMN_Last,self).__init__(**kwargs) with self.name_scope(): self.Embed = nn.Embedding(411721,256) # agg param self.gru = rnn.GRU(1024,2,layout='NTC') self.mlp_1 = nn.Dense(units=60,flatten=False,activation='relu') self.mlp_2 = nn.Dense(units=1,flatten=False) # lstm param self.topic_embedding = self.params.get('param_test',shape=(1024,2000))
Example #20
Source File: test_gluon.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def test_sparse_hybrid_block_grad(): class Embedding(mx.gluon.HybridBlock): def __init__(self, num_tokens, embedding_size): super(Embedding, self).__init__() self.num_tokens = num_tokens with self.name_scope(): self.embedding = mx.gluon.nn.Embedding( num_tokens, embedding_size, sparse_grad=True) def hybrid_forward(self, F, words): emb = self.embedding(words) return emb + F.ones_like(emb) embedding = Embedding(20, 3) embedding.initialize() embedding.hybridize() with mx.autograd.record(): emb0 = embedding(mx.nd.arange(10)).sum() emb1 = embedding(mx.nd.arange(10)).sum() loss = emb0 + emb1 loss.backward() grad = embedding.embedding.weight.grad().asnumpy() assert (grad[:10] == 2).all() assert (grad[10:] == 0).all()
Example #21
Source File: test_gluon.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def test_dtype(): net = mx.gluon.model_zoo.vision.resnet18_v1() net.initialize() net.cast('float64') with mx.autograd.record(): y = net(mx.nd.ones((16, 3, 32, 32), dtype='float64')) y.backward() net = mx.gluon.model_zoo.vision.resnet18_v1() net.initialize() net.hybridize() net(mx.nd.ones((16, 3, 32, 32), dtype='float32')) net.cast('float64') net(mx.nd.ones((16, 3, 32, 32), dtype='float64')) mx.nd.waitall() class Net(gluon.Block): def __init__(self, in_dim, output_dim): super(Net, self).__init__() with self.name_scope(): self.embed = gluon.nn.Embedding(input_dim=in_dim, output_dim=output_dim,dtype=np.float64) self.dense = gluon.nn.Dense(2, dtype=np.float64) def forward(self, x): e = self.embed(x) assert(e.dtype == np.float64) y = self.dense(e) assert(y.dtype == np.float64) return y net = Net(5, 10) net.initialize() out = net(mx.nd.ones((3,), dtype=np.float64)) mx.nd.waitall()
Example #22
Source File: test_gluon.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def test_embedding(): def check_embedding(sparse_grad): layer = gluon.nn.Embedding(10, 100, sparse_grad=sparse_grad) layer.initialize() x = mx.nd.array([3,4,2,0,1]) with mx.autograd.record(): y = layer(x) y.backward() assert (layer.weight.grad().asnumpy()[:5] == 1).all() assert (layer.weight.grad().asnumpy()[5:] == 0).all() def check_embedding_large_input(sparse_grad): embedding = mx.gluon.nn.Embedding(10, 1, sparse_grad=True) embedding.initialize() embedding.hybridize() shape = (20481,) with mx.autograd.record(): emb_in = embedding(mx.nd.ones(shape)) loss = emb_in.sum() loss.backward() assert embedding.weight.grad().data.sum().asscalar() == 20481 check_embedding(True) check_embedding(False) check_embedding_large_input(True) check_embedding_large_input(False)
Example #23
Source File: tree_lstm.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def __init__(self, sim_hidden_size, rnn_hidden_size, embed_in_size, embed_dim, num_classes): super(SimilarityTreeLSTM, self).__init__() with self.name_scope(): self.embed = nn.Embedding(embed_in_size, embed_dim, prefix='word_embed_') self.childsumtreelstm = ChildSumLSTMCell(rnn_hidden_size, input_size=embed_dim) self.similarity = Similarity(sim_hidden_size, rnn_hidden_size, num_classes)