Python torch.nn.functional.max_pool1d() Examples
The following are 30
code examples of torch.nn.functional.max_pool1d().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.nn.functional
, or try the search function
.
Example #1
Source File: model.py From Sentiment-Analysis-in-Event-Driven-Stock-Price-Movement-Prediction with MIT License | 6 votes |
def forward(self, x): x = self.embed(x) # (N, W, D) if self.args.static: x = Variable(x) x = x.unsqueeze(1) # (N, Ci, W, D) x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] # [(N, Co, W), ...]*len(Ks) x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # [(N, Co), ...]*len(Ks) x = torch.cat(x, 1) ''' x1 = self.conv_and_pool(x,self.conv13) #(N,Co) x2 = self.conv_and_pool(x,self.conv14) #(N,Co) x3 = self.conv_and_pool(x,self.conv15) #(N,Co) x = torch.cat((x1, x2, x3), 1) # (N,len(Ks)*Co) ''' x = self.dropout(x) # (N, len(Ks)*Co) logit = self.fc1(x) # (N, C) return logit
Example #2
Source File: train_clf.py From controlled-text-generation with BSD 3-Clause "New" or "Revised" License | 6 votes |
def forward(self, inputs): inputs = self.word_emb(inputs) inputs = inputs.unsqueeze(1) x3 = F.relu(self.conv3(inputs)).squeeze() x4 = F.relu(self.conv4(inputs)).squeeze() x5 = F.relu(self.conv5(inputs)).squeeze() # Max-over-time-pool x3 = F.max_pool1d(x3, x3.size(2)).squeeze() x4 = F.max_pool1d(x4, x4.size(2)).squeeze() x5 = F.max_pool1d(x5, x5.size(2)).squeeze() x = torch.cat([x3, x4, x5], dim=1) y = self.discriminator(x) return y
Example #3
Source File: model.py From Text-Classification-Models-Pytorch with MIT License | 6 votes |
def forward(self, x): # x.shape = (seq_len, batch_size) embedded_sent = self.embeddings(x) # embedded_sent.shape = (seq_len, batch_size, embed_size) lstm_out, (h_n,c_n) = self.lstm(embedded_sent) # lstm_out.shape = (seq_len, batch_size, 2 * hidden_size) input_features = torch.cat([lstm_out,embedded_sent], 2).permute(1,0,2) # final_features.shape = (batch_size, seq_len, embed_size + 2*hidden_size) linear_output = self.tanh( self.W(input_features) ) # linear_output.shape = (batch_size, seq_len, hidden_size_linear) linear_output = linear_output.permute(0,2,1) # Reshaping fot max_pool max_out_features = F.max_pool1d(linear_output, linear_output.shape[2]).squeeze(2) # max_out_features.shape = (batch_size, hidden_size_linear) max_out_features = self.dropout(max_out_features) final_out = self.fc(max_out_features) return self.softmax(final_out)
Example #4
Source File: modules.py From BAMnet with Apache License 2.0 | 6 votes |
def forward(self, x, x_len=None): """x: [batch_size * max_length] x_len: reserved """ x = self.embed(x) if self.dropout: x = F.dropout(x, p=self.dropout, training=self.training) # Trun(batch_size, seq_len, embed_size) to (batch_size, embed_size, seq_len) for cnn1d x = x.transpose(1, 2) z = [conv(x) for conv in self.cnns] output = [F.max_pool1d(i, kernel_size=i.size(-1)).squeeze(-1) for i in z] if len(output) > 1: output = self.fc(torch.cat(output, -1)) else: output = output[0] return None, output
Example #5
Source File: modules.py From BAMnet with Apache License 2.0 | 6 votes |
def update_coatt_cat_maxpool(self, query_embed, in_memory_embed, out_memory_embed, query_att, atten_mask=None, ctx_mask=None, query_mask=None): attention = torch.bmm(query_embed, in_memory_embed.view(in_memory_embed.size(0), -1, in_memory_embed.size(-1))\ .transpose(1, 2)).view(query_embed.size(0), query_embed.size(1), in_memory_embed.size(1), -1) # bs * N * M * k if ctx_mask is not None: attention[:, :, :, -1] = ctx_mask.unsqueeze(1) * attention[:, :, :, -1].clone() - (1 - ctx_mask).unsqueeze(1) * INF if atten_mask is not None: attention = atten_mask.unsqueeze(1).unsqueeze(-1) * attention - (1 - atten_mask).unsqueeze(1).unsqueeze(-1) * INF if query_mask is not None: attention = query_mask.unsqueeze(2).unsqueeze(-1) * attention - (1 - query_mask).unsqueeze(2).unsqueeze(-1) * INF # Importance module kb_feature_att = F.max_pool1d(attention.view(attention.size(0), attention.size(1), -1).transpose(1, 2), kernel_size=attention.size(1)).squeeze(-1).view(attention.size(0), -1, attention.size(-1)) kb_feature_att = torch.softmax(kb_feature_att, dim=-1).view(-1, kb_feature_att.size(-1)).unsqueeze(1) in_memory_embed = torch.bmm(kb_feature_att, in_memory_embed.view(-1, in_memory_embed.size(2), in_memory_embed.size(-1))).squeeze(1).view(in_memory_embed.size(0), in_memory_embed.size(1), -1) out_memory_embed = out_memory_embed.sum(2) # Enhanced module attention = F.max_pool1d(attention.view(attention.size(0), -1, attention.size(-1)), kernel_size=attention.size(-1)).squeeze(-1).view(attention.size(0), attention.size(1), attention.size(2)) probs = torch.softmax(attention, dim=-1) new_query_embed = query_embed + query_att.unsqueeze(2) * torch.bmm(probs, out_memory_embed) probs2 = torch.softmax(attention, dim=1) kb_att = torch.bmm(query_att.unsqueeze(1), probs).squeeze(1) in_memory_embed = in_memory_embed + kb_att.unsqueeze(2) * torch.bmm(probs2.transpose(1, 2), new_query_embed) return new_query_embed, in_memory_embed, out_memory_embed
Example #6
Source File: Modules.py From GST-Tacotron with MIT License | 6 votes |
def max_pool1d(inputs, kernel_size, stride=1, padding='same'): ''' inputs: [N, T, C] outputs: [N, T // stride, C] ''' inputs = inputs.transpose(1, 2) # [N, C, T] if padding == 'same': left = (kernel_size - 1) // 2 right = (kernel_size - 1) - left pad = (left, right) else: pad = (0, 0) inputs = F.pad(inputs, pad) outputs = F.max_pool1d(inputs, kernel_size, stride) # [N, C, T] outputs = outputs.transpose(1, 2) # [N, T, C] return outputs
Example #7
Source File: model.py From controlled-text-generation with BSD 3-Clause "New" or "Revised" License | 6 votes |
def forward_discriminator_embed(self, inputs): """ Inputs must be embeddings: mbsize x seq_len x emb_dim """ inputs = inputs.unsqueeze(1) # mbsize x 1 x seq_len x emb_dim x3 = F.relu(self.conv3(inputs)).squeeze() x4 = F.relu(self.conv4(inputs)).squeeze() x5 = F.relu(self.conv5(inputs)).squeeze() # Max-over-time-pool x3 = F.max_pool1d(x3, x3.size(2)).squeeze() x4 = F.max_pool1d(x4, x4.size(2)).squeeze() x5 = F.max_pool1d(x5, x5.size(2)).squeeze() x = torch.cat([x3, x4, x5], dim=1) y = self.disc_fc(x) return y
Example #8
Source File: pytorch_model_test.py From onnx-coreml with MIT License | 6 votes |
def test_conv1d_pool1d(self, minimum_ios_deployment_target='13'): class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv1d(in_channels=4, out_channels=32, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv1d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1) def forward(self, x): x = x.permute(0, 2, 1) x = self.conv1(x) x = F.relu(x) x = F.max_pool1d(x, 2) x = self.conv2(x) x = F.relu(x) return x torch_model = Net() # type: ignore torch_model.train(False) _test_torch_model_single_io(torch_model, (2, 10, 4), (2, 10, 4), minimum_ios_deployment_target=minimum_ios_deployment_target)
Example #9
Source File: conv_maxpool.py From fastNLP with Apache License 2.0 | 6 votes |
def forward(self, x, mask=None): r""" :param torch.FloatTensor x: batch_size x max_len x input_size, 一般是经过embedding后的值 :param mask: batch_size x max_len, pad的地方为0。不影响卷积运算,max-pool一定不会pool到pad为0的位置 :return: """ # [N,L,C] -> [N,C,L] x = torch.transpose(x, 1, 2) # convolution xs = [self.activation(conv(x)) for conv in self.convs] # [[N,C,L], ...] if mask is not None: mask = mask.unsqueeze(1) # B x 1 x L xs = [x.masked_fill_(mask.eq(False), float('-inf')) for x in xs] # max-pooling xs = [F.max_pool1d(input=i, kernel_size=i.size(2)).squeeze(2) for i in xs] # [[N, C], ...] return torch.cat(xs, dim=-1) # [N, C]
Example #10
Source File: Encoder.py From fastNLP with Apache License 2.0 | 6 votes |
def forward(self, input): # input: a batch of Example object [batch_size, N, seq_len] batch_size, N, _ = input.size() input = input.view(-1, input.size(2)) # [batch_size*N, L] input_sent_len = ((input!=0).sum(dim=1)).int() # [batch_size*N, 1] enc_embed_input = self.embed(input) # [batch_size*N, L, D] input_pos = torch.Tensor([np.hstack((np.arange(1, sentlen + 1), np.zeros(self.sent_max_len - sentlen))) for sentlen in input_sent_len]) if self._hps.cuda: input_pos = input_pos.cuda() enc_pos_embed_input = self.position_embedding(input_pos.long()) # [batch_size*N, D] # print(enc_embed_input.size()) # print(enc_pos_embed_input.size()) enc_conv_input = enc_embed_input + enc_pos_embed_input enc_conv_input = enc_conv_input.unsqueeze(1) # (batch * N,Ci,L,D) enc_conv_output = [F.relu(conv(enc_conv_input)).squeeze(3) for conv in self.convs] # kernel_sizes * (batch*N, Co, W) enc_maxpool_output = [F.max_pool1d(x, x.size(2)).squeeze(2) for x in enc_conv_output] # kernel_sizes * (batch*N, Co) sent_embedding = torch.cat(enc_maxpool_output, 1) # (batch*N, Co * kernel_sizes) sent_embedding = sent_embedding.view(batch_size, N, -1) return sent_embedding
Example #11
Source File: decoder.py From KET with GNU General Public License v3.0 | 6 votes |
def forward(self, x, src_states, src_mask, tgt_mask): """ x: (batch_size, tgt_seq_len, d_model) src_states: (batch_size, src_seq_len, d_model) src_mask: (batch_size, 1, src_seq_len) tgt_mask: (batch_size, tgt_seq_len, tgt_seq_len) """ if print_dims: print("{0}: x: type: {1}, shape: {2}".format(self.__class__.__name__, x.type(), x.shape)) print("{0}: src_states: type: {1}, shape: {2}".format(self.__class__.__name__, src_states.type(), src_states.shape)) print("{0}: src_mask: type: {1}, shape: {2}".format(self.__class__.__name__, src_mask.type(), src_mask.shape)) print("{0}: tgt_mask: type: {1}, shape: {2}".format(self.__class__.__name__, tgt_mask.type(), tgt_mask.shape)) for layer in self.layers: x = layer(x, src_states, src_mask, tgt_mask) x = self.norm(x) # (batch_size, tgt_seq_len, d_model) if print_dims: print("{0}: x (output): type: {1}, shape: {2}".format(self.__class__.__name__, x.type(), x.shape)) # add max pooling across sequences x = F.max_pool1d(x.permute(0,2,1), x.shape[1]).squeeze(-1) # (batch_size, d_model) return x
Example #12
Source File: model.py From conv-emotion with MIT License | 6 votes |
def forward(self, x, umask): num_utt, batch, num_words = x.size() x = x.type(LongTensor) # (num_utt, batch, num_words) x = x.view(-1, num_words) # (num_utt, batch, num_words) -> (num_utt * batch, num_words) emb = self.embedding(x) # (num_utt * batch, num_words) -> (num_utt * batch, num_words, embedding_dim) emb = emb.transpose(-2, -1).contiguous() # (num_utt * batch, num_words, embedding_dim) -> (num_utt * batch, embedding_dim, num_words) convoluted = [F.relu(conv(emb)) for conv in self.convs] pooled = [F.max_pool1d(c, c.size(2)).squeeze() for c in convoluted] concated = torch.cat(pooled, 1) features = F.relu(self.fc(self.dropout(concated))) # (num_utt * batch, embedding_dim//2) -> (num_utt * batch, output_size) features = features.view(num_utt, batch, -1) # (num_utt * batch, output_size) -> (num_utt, batch, output_size) mask = umask.unsqueeze(-1).type(FloatTensor) # (batch, num_utt) -> (batch, num_utt, 1) mask = mask.transpose(0, 1) # (batch, num_utt, 1) -> (num_utt, batch, 1) mask = mask.repeat(1, 1, self.feature_dim) # (num_utt, batch, 1) -> (num_utt, batch, output_size) features = (features * mask) # (num_utt, batch, output_size) -> (num_utt, batch, output_size) return features
Example #13
Source File: model.py From conv-emotion with MIT License | 6 votes |
def forward(self, x, umask): num_utt, batch, num_words = x.size() x = x.type(LongTensor) # (num_utt, batch, num_words) x = x.view(-1, num_words) # (num_utt, batch, num_words) -> (num_utt * batch, num_words) emb = self.embedding(x) # (num_utt * batch, num_words) -> (num_utt * batch, num_words, 300) emb = emb.transpose(-2, -1).contiguous() # (num_utt * batch, num_words, 300) -> (num_utt * batch, 300, num_words) convoluted = [F.relu(conv(emb)) for conv in self.convs] pooled = [F.max_pool1d(c, c.size(2)).squeeze() for c in convoluted] concated = torch.cat(pooled, 1) features = F.relu(self.fc(self.dropout(concated))) # (num_utt * batch, 150) -> (num_utt * batch, 100) features = features.view(num_utt, batch, -1) # (num_utt * batch, 100) -> (num_utt, batch, 100) mask = umask.unsqueeze(-1).type(FloatTensor) # (batch, num_utt) -> (batch, num_utt, 1) mask = mask.transpose(0, 1) # (batch, num_utt, 1) -> (num_utt, batch, 1) mask = mask.repeat(1, 1, self.feature_dim) # (num_utt, batch, 1) -> (num_utt, batch, 100) features = (features * mask) # (num_utt, batch, 100) -> (num_utt, batch, 100) return features
Example #14
Source File: model.py From conv-emotion with MIT License | 6 votes |
def forward(self, x, umask): num_utt, batch, num_words = x.size() x = x.type(LongTensor) # (num_utt, batch, num_words) x = x.view(-1, num_words) # (num_utt, batch, num_words) -> (num_utt * batch, num_words) emb = self.embedding(x) # (num_utt * batch, num_words) -> (num_utt * batch, num_words, 300) emb = emb.transpose(-2, -1).contiguous() # (num_utt * batch, num_words, 300) -> (num_utt * batch, 300, num_words) convoluted = [F.relu(conv(emb)) for conv in self.convs] pooled = [F.max_pool1d(c, c.size(2)).squeeze() for c in convoluted] concated = torch.cat(pooled, 1) features = F.relu(self.fc(self.dropout(concated))) # (num_utt * batch, 150) -> (num_utt * batch, 100) features = features.view(num_utt, batch, -1) # (num_utt * batch, 100) -> (num_utt, batch, 100) mask = umask.unsqueeze(-1).type(FloatTensor) # (batch, num_utt) -> (batch, num_utt, 1) mask = mask.transpose(0, 1) # (batch, num_utt, 1) -> (num_utt, batch, 1) mask = mask.repeat(1, 1, self.feature_dim) # (num_utt, batch, 1) -> (num_utt, batch, 100) features = (features * mask) # (num_utt, batch, 100) -> (num_utt, batch, 100) return features
Example #15
Source File: model_BiGRU.py From cnn-lstm-bilstm-deepcnn-clstm-in-pytorch with Apache License 2.0 | 6 votes |
def forward(self, input): embed = self.embed(input) embed = self.dropout(embed) input = embed.view(len(input), embed.size(1), -1) # gru gru_out, _ = self.bigru(input) gru_out = torch.transpose(gru_out, 0, 1) gru_out = torch.transpose(gru_out, 1, 2) # pooling # gru_out = F.tanh(gru_out) gru_out = F.max_pool1d(gru_out, gru_out.size(2)).squeeze(2) gru_out = F.tanh(gru_out) # linear y = self.hidden2label(gru_out) logit = y return logit
Example #16
Source File: model_CLSTM.py From cnn-lstm-bilstm-deepcnn-clstm-in-pytorch with Apache License 2.0 | 6 votes |
def forward(self, x): embed = self.embed(x) # CNN cnn_x = embed cnn_x = self.dropout(cnn_x) cnn_x = cnn_x.unsqueeze(1) cnn_x = [F.relu(conv(cnn_x)).squeeze(3) for conv in self.convs1] # [(N,Co,W), ...]*len(Ks) cnn_x = torch.cat(cnn_x, 0) cnn_x = torch.transpose(cnn_x, 1, 2) # LSTM lstm_out, _ = self.lstm(cnn_x) lstm_out = torch.transpose(lstm_out, 0, 1) lstm_out = torch.transpose(lstm_out, 1, 2) lstm_out = F.max_pool1d(lstm_out, lstm_out.size(2)).squeeze(2) # linear cnn_lstm_out = self.hidden2label1(F.tanh(lstm_out)) cnn_lstm_out = self.hidden2label2(F.tanh(cnn_lstm_out)) # output logit = cnn_lstm_out return logit
Example #17
Source File: model_CGRU.py From cnn-lstm-bilstm-deepcnn-clstm-in-pytorch with Apache License 2.0 | 6 votes |
def forward(self, x): embed = self.embed(x) # CNN cnn_x = embed cnn_x = self.dropout(cnn_x) cnn_x = cnn_x.unsqueeze(1) cnn_x = [F.relu(conv(cnn_x)).squeeze(3) for conv in self.convs1] # [(N,Co,W), ...]*len(Ks) cnn_x = torch.cat(cnn_x, 0) cnn_x = torch.transpose(cnn_x, 1, 2) # GRU lstm_out, _ = self.gru(cnn_x) lstm_out = torch.transpose(lstm_out, 0, 1) lstm_out = torch.transpose(lstm_out, 1, 2) lstm_out = F.max_pool1d(lstm_out, lstm_out.size(2)).squeeze(2) # linear cnn_lstm_out = self.hidden2label1(F.tanh(lstm_out)) cnn_lstm_out = self.hidden2label2(F.tanh(cnn_lstm_out)) # output logit = cnn_lstm_out return logit
Example #18
Source File: model_LSTM.py From cnn-lstm-bilstm-deepcnn-clstm-in-pytorch with Apache License 2.0 | 6 votes |
def forward(self, x): embed = self.embed(x) embed = self.dropout_embed(embed) x = embed.view(len(x), embed.size(1), -1) # lstm lstm_out, _ = self.lstm(x) # lstm_out, self.hidden = self.lstm(x, self.hidden) lstm_out = torch.transpose(lstm_out, 0, 1) lstm_out = torch.transpose(lstm_out, 1, 2) # pooling lstm_out = F.tanh(lstm_out) lstm_out = F.max_pool1d(lstm_out, lstm_out.size(2)).squeeze(2) lstm_out = F.tanh(lstm_out) # linear logit = self.hidden2label(lstm_out) return logit
Example #19
Source File: model_CNN_MUI.py From cnn-lstm-bilstm-deepcnn-clstm-in-pytorch with Apache License 2.0 | 6 votes |
def forward(self, x): x_no_static = self.embed_no_static(x) x_static = self.embed_static(x) x = torch.stack([x_static, x_no_static], 1) x = self.dropout(x) if self.args.batch_normalizations is True: x = [F.relu(self.convs1_bn(conv(x))).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks) x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks) else: x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks) x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # [(N,Co), ...]*len(Ks) x = torch.cat(x, 1) x = self.dropout(x) # (N,len(Ks)*Co) if self.args.batch_normalizations is True: x = self.fc1(x) logit = self.fc2(F.relu(x)) else: x = self.fc1(x) logit = self.fc2(F.relu(x)) return logit
Example #20
Source File: model_CBiLSTM.py From cnn-lstm-bilstm-deepcnn-clstm-in-pytorch with Apache License 2.0 | 6 votes |
def forward(self, x): embed = self.embed(x) # CNN embed = self.dropout(embed) cnn_x = embed cnn_x = cnn_x.unsqueeze(1) cnn_x = [F.relu(conv(cnn_x)).squeeze(3) for conv in self.convs1] # [(N,Co,W), ...]*len(Ks) cnn_x = torch.cat(cnn_x, 0) cnn_x = torch.transpose(cnn_x, 1, 2) # BiLSTM bilstm_out, _ = self.bilstm(cnn_x) bilstm_out = torch.transpose(bilstm_out, 0, 1) bilstm_out = torch.transpose(bilstm_out, 1, 2) bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2) # linear cnn_bilstm_out = self.hidden2label1(F.tanh(bilstm_out)) cnn_bilstm_out = self.hidden2label2(F.tanh(cnn_bilstm_out)) # dropout logit = self.dropout(cnn_bilstm_out) return logit
Example #21
Source File: model_HighWay_BiLSTM_1.py From cnn-lstm-bilstm-deepcnn-clstm-in-pytorch with Apache License 2.0 | 6 votes |
def forward(self, x): x = self.embed(x) x = self.dropout(x) bilstm_out, _ = self.bilstm(x) bilstm_out = torch.transpose(bilstm_out, 0, 1) bilstm_out = torch.transpose(bilstm_out, 1, 2) bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)) bilstm_out = bilstm_out.squeeze(2) hidden2lable = self.hidden2label1(F.tanh(bilstm_out)) gate_layer = F.sigmoid(self.gate_layer(bilstm_out)) # calculate highway layer values gate_hidden_layer = torch.mul(hidden2lable, gate_layer) # if write like follow ,can run,but not equal the HighWay NetWorks formula # gate_input = torch.mul((1 - gate_layer), hidden2lable) gate_input = torch.mul((1 - gate_layer), bilstm_out) highway_output = torch.add(gate_hidden_layer, gate_input) logit = self.logit_layer(highway_output) return logit
Example #22
Source File: model_CNN.py From cnn-lstm-bilstm-deepcnn-clstm-in-pytorch with Apache License 2.0 | 6 votes |
def forward(self, x): x = self.embed(x) # (N,W,D) x = self.dropout_embed(x) x = x.unsqueeze(1) # (N,Ci,W,D) if self.args.batch_normalizations is True: x = [self.convs1_bn(F.tanh(conv(x))).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks) x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks) else: x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks) x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks) x = torch.cat(x, 1) x = self.dropout(x) # (N,len(Ks)*Co) if self.args.batch_normalizations is True: x = self.fc1_bn(self.fc1(x)) logit = self.fc2_bn(self.fc2(F.tanh(x))) else: logit = self.fc(x) return logit
Example #23
Source File: model_DeepCNN.py From cnn-lstm-bilstm-deepcnn-clstm-in-pytorch with Apache License 2.0 | 6 votes |
def forward(self, x): one_layer = self.embed(x) # (N,W,D) # torch.Size([64, 43, 300]) one_layer = one_layer.unsqueeze(1) # (N,Ci,W,D) # torch.Size([64, 1, 43, 300]) # one layer one_layer = [torch.transpose(F.relu(conv(one_layer)).squeeze(3), 1, 2) for conv in self.convs1] # torch.Size([64, 100, 36]) # two layer two_layer = [F.relu(conv(one_layer.unsqueeze(1))).squeeze(3) for (conv, one_layer) in zip(self.convs2, one_layer)] # print("two_layer {}".format(two_layer[0].size())) # pooling output = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in two_layer] # torch.Size([64, 100]) torch.Size([64, 100]) output = torch.cat(output, 1) # torch.Size([64, 300]) # dropout output = self.dropout(output) # linear output = self.fc1(F.relu(output)) logit = self.fc2(F.relu(output)) return logit
Example #24
Source File: cnn.py From Distributional-Signatures with MIT License | 6 votes |
def compute_score(self, data, normalize=False): # preparing the input ebd = self.ebd(data) aux = self.aux(data) # (batch_size, doc_len, input_dim) x = torch.cat([ebd, aux], dim=-1).detach() # (out_channels, in_channels, kernel_size) w = [c.weight.data for c in self.convs] # (kernel_size, out_channels, in_channels) w = [c.permute(2,0,1) for c in w] # (out_channels * kernel_size, in_channels) w = [c.reshape(-1, self.input_dim) for c in w] # (batch_size, doc_len, out_channels * kernel_size) x = [x @ c.t() for c in w] # (batch_size, doc_len) x = [F.max_pool1d(z, z.shape[-1]).squeeze(-1) for z in x] if normalize: x = [x / np.mean(s) for x, s in zip(x, self.scores)] return x
Example #25
Source File: test_ops.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_max_pool1d( self, context, input_shape, kernel_size, stride, pad, ceil_mode ): if pad > kernel_size / 2: # Because this test is xfail, we have to fail rather than # just return here, otherwise these test cases unexpectedly pass. # This can be changed to `return` once the above radar # is fixed and the test is no longer xfail. raise ValueError("pad must be less than half the kernel size") test_input = torch.rand(input_shape) expected_result = F.max_pool1d( test_input, kernel_size=kernel_size, stride=stride, padding=pad, ceil_mode=ceil_mode, ) self._test_pool( context, test_input, [[kernel_size], [stride], [pad], [1], ceil_mode], "max_pool1d", ops.max_pool1d, expected_result, )
Example #26
Source File: encoders.py From Topic_Disc with MIT License | 6 votes |
def forward(self, x): x = self.embed(x) # (N, W, D) if self.static: x = Variable(x) x = x.unsqueeze(1) # (N, Ci, W, D) x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] # [(N, Co, W), ...]*len(Ks) x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # [(N, Co), ...]*len(Ks) x = torch.cat(x, 1) ''' x1 = self.conv_and_pool(x,self.conv13) #(N,Co) x2 = self.conv_and_pool(x,self.conv14) #(N,Co) x3 = self.conv_and_pool(x,self.conv15) #(N,Co) x = torch.cat((x1, x2, x3), 1) # (N,len(Ks)*Co) ''' x = self.dropout(x) # (N, len(Ks)*Co) logit = self.fc1(x) # (N, C) return logit
Example #27
Source File: model.py From hedwig with Apache License 2.0 | 6 votes |
def forward(self, x, **kwargs): if self.mode == 'rand': word_input = self.embed(x) # (batch, sent_len, embed_dim) x = word_input.unsqueeze(1) # (batch, channel_input, sent_len, embed_dim) elif self.mode == 'static': static_input = self.static_embed(x) x = static_input.unsqueeze(1) # (batch, channel_input, sent_len, embed_dim) elif self.mode == 'non-static': non_static_input = self.non_static_embed(x) x = non_static_input.unsqueeze(1) # (batch, channel_input, sent_len, embed_dim) elif self.mode == 'multichannel': non_static_input = self.non_static_embed(x) static_input = self.static_embed(x) x = torch.stack([non_static_input, static_input], dim=1) # (batch, channel_input=2, sent_len, embed_dim) else: print("Unsupported Mode") exit() x = [F.relu(self.conv1(x)).squeeze(3), F.relu(self.conv2(x)).squeeze(3), F.relu(self.conv3(x)).squeeze(3)] # (batch, channel_output, ~=sent_len) * ks x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # max-over-time pooling # (batch, channel_output) * ks x = torch.cat(x, 1) # (batch, channel_output * ks) x = self.dropout(x) logit = self.fc1(x) # (batch, target_size) return logit
Example #28
Source File: model.py From hedwig with Apache License 2.0 | 6 votes |
def forward(self, x, **kwargs): if torch.cuda.is_available() and self.is_cuda_enabled: x = x.transpose(1, 2).type(torch.cuda.FloatTensor) else: x = x.transpose(1, 2).type(torch.FloatTensor) x = F.max_pool1d(F.relu(self.conv1(x)), 3) x = F.max_pool1d(F.relu(self.conv2(x)), 3) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) x = F.relu(self.conv5(x)) x = F.relu(self.conv6(x)) x = F.max_pool1d(x, x.size(2)).squeeze(2) x = F.relu(self.fc1(x.view(x.size(0), -1))) x = self.dropout(x) x = F.relu(self.fc2(x)) x = self.dropout(x) return self.fc3(x)
Example #29
Source File: model.py From PJ_NLP with Apache License 2.0 | 6 votes |
def forward(self, input_tensors): feature = self.word_rep(input_tensors) aspect_i = input_tensors[2] aspect_v = self.AE(aspect_i) # (N, L', D) feature = feature.view(1, feature.size()[0], -1) x = [F.tanh(conv(feature.transpose(1, 2))) for conv in self.convs1] # [(N,Co,L), ...]*len(Ks) y = [F.relu(conv(feature.transpose(1, 2)) + self.fc_aspect(aspect_v).unsqueeze(2)) for conv in self.convs2] x = [i * j for i, j in zip(x, y)] # pooling method x0 = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # [(N,Co), ...]*len(Ks) x0 = [i.view(i.size(0), -1) for i in x0] x0 = torch.cat(x0, 1) logit = self.fc1(x0) # (N,C) return logit
Example #30
Source File: CNN_BiLSTM.py From korean-ner-cnn-bilstm with MIT License | 5 votes |
def forward(self, x, x_char, x_pos, x_lex_embedding, lengths): x_word_embedding = self.embed(x) # (batch,words,word_embedding) trainable_x_word_embedding = self.trainable_embed(x) char_output = [] for i in range(x_char.size(1)): x_char_embedding = self.char_embed(x_char[:,i]).unsqueeze(1) # (batch,channel_input,words,word_embedding) h_convs1 = [F.relu(conv(x_char_embedding)).squeeze(3) for conv in self.convs1] h_pools1 = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in h_convs1] # [(batch,channel_out), ...]*len(kernel_sizes) h_pools1 = torch.cat(h_pools1, 1) # 리스트에 있는걸 쭉 쌓아서 Tensor로 만듬! h_pools1 = self.dropout(h_pools1) # (N,len(Ks)*Co) out = h_pools1.unsqueeze(1) # 단어단위 고려 char_output.append(out) # print("out:",out) char_output = torch.cat(char_output, 1) # 단어 단위끼리 붙이고 # torch.cat((h_pools1, h_lexicon_pools1), 1) x_pos_embedding = self.pos_embed(x_pos) enhanced_embedding = torch.cat((char_output, x_word_embedding, trainable_x_word_embedding, x_pos_embedding), 2) # 임베딩 차원(2)으로 붙이고 enhanced_embedding = self.dropout(enhanced_embedding) enhanced_embedding = torch.cat((enhanced_embedding, x_lex_embedding), 2) packed = pack_padded_sequence(enhanced_embedding, lengths, batch_first=True) #packed -> (batch_size * real_length), embedding_dim!! -> it can calculate loss bw/ packed output_word, state_word = self.lstm(packed) logit = self.fc1(output_word[0]) #for packed return logit