Python torch.nn.functional.avg_pool1d() Examples
The following are 16
code examples of torch.nn.functional.avg_pool1d().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.nn.functional
, or try the search function
.
Example #1
Source File: test_ops.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_avg_pool1d( self, context, input_shape, kernel_size, stride, pad, include_pad, ceil_mode, ): if pad > kernel_size / 2: return test_input = torch.rand(input_shape) expected_result = F.avg_pool1d( test_input, kernel_size=kernel_size, stride=stride, padding=pad, ceil_mode=ceil_mode, count_include_pad=include_pad, ) self._test_pool( context, test_input, [[kernel_size], [stride], [pad], ceil_mode, not include_pad], "avg_pool1d", ops.avg_pool1d, expected_result, )
Example #2
Source File: networks.py From 2D-Motion-Retargeting with MIT License | 5 votes |
def __init__(self, mot_en_channels, body_en_channels, view_en_channels, de_channels): super(AutoEncoder3x, self).__init__() assert mot_en_channels[0] == de_channels[-1] and \ mot_en_channels[-1] + body_en_channels[-1] + view_en_channels[-1] == de_channels[0] self.mot_encoder = Encoder(mot_en_channels) self.body_encoder = Encoder(body_en_channels, kernel_size=7, global_pool=F.max_pool1d, convpool=nn.MaxPool1d, compress=True) self.view_encoder = Encoder(view_en_channels, kernel_size=7, global_pool=F.avg_pool1d, convpool=nn.AvgPool1d, compress=True) self.decoder = Decoder(de_channels)
Example #3
Source File: network.py From reconstructing_faces_from_voices with GNU General Public License v3.0 | 5 votes |
def forward(self, x): x = self.model(x) x = F.avg_pool1d(x, x.size()[2], stride=1) x = x.view(x.size()[0], -1, 1, 1) return x
Example #4
Source File: model.py From ZeroSpeech-TTS-without-T with MIT License | 5 votes |
def conv_block(self, x, conv_layers, norm_layers, seg_len, res=True): out = x for layer in conv_layers: out = pad_layer(out, layer, self.seg_len) out = F.leaky_relu(out, negative_slope=self.ns) for layer in norm_layers: out = layer(out) if res: x_pad = F.pad(x, pad=(0, x.size(2) % 2), mode='constant' if seg_len < 64 else 'reflect') x_down = F.avg_pool1d(x_pad, kernel_size=2) out = x_down + out return out
Example #5
Source File: Decoder.py From pytorch_Joint-Word-Segmentation-and-POS-Tagging with Apache License 2.0 | 5 votes |
def batch_wordLstm(self, id_char, batch_length, encoder_out, state): """ :param id_char: id word :param batch_length: batch count :param encoder_out: Encoder output :param state: Decoder state :return: """ if id_char is 0: h, c, z = self.init_hidden_cell(batch_length) else: h, c = state.word_hiddens[-1], state.word_cells[-1] # copy with the pos features last_pos = torch.zeros(batch_length, device=self.device, requires_grad=True).long() pos_id_array = np.array(state.pos_id[-1]) last_pos.data.copy_(torch.from_numpy(pos_id_array)) last_pos_embed = self.dropout(self.pos_embed(last_pos)) # copy with the word features batch_char_embed = [] for id_batch, id_batch_value in enumerate(state.words_startindex[-1]): chars_embed = [] last_word_len = 0 if id_batch_value is -1: word_bucket = torch.zeros(1, 2 * self.config.rnn_hidden_dim, device=self.device, requires_grad=True) batch_char_embed.append(word_bucket) continue last_word_len = id_char - id_batch_value chars_embed.append((encoder_out.permute(1, 0, 2)[id_batch][id_batch_value:id_char].unsqueeze(0))) chars_embed = torch.cat(chars_embed, 1).permute(0, 2, 1) last_word_embed = F.avg_pool1d(chars_embed, chars_embed.size(2)).squeeze(2) batch_char_embed.append(last_word_embed) batch_char_embed = torch.cat(batch_char_embed, 0) concat = torch.cat((last_pos_embed, batch_char_embed), 1) z = self.dropout(torch.tanh(self.combine_linear(concat))) h_now, c_now = self.lstmcell(z, (h, c)) return h_now, c_now
Example #6
Source File: layers.py From nni with MIT License | 5 votes |
def forward(self, input_tensor): return functional.avg_pool1d(input_tensor, input_tensor.size()[2:]).view( input_tensor.size()[:2] )
Example #7
Source File: models.py From caml-mimic with MIT License | 5 votes |
def forward(self, x, target, desc_data=None, get_attention=False): #get embeddings and apply dropout x = self.embed(x) x = self.embed_drop(x) x = x.transpose(1, 2) if self.pool == 'max': import pdb; pdb.set_trace() x = F.max_pool1d(x) else: x = F.avg_pool1d(x) logits = F.sigmoid(self.final(x)) loss = self._get_loss(logits, target, diffs) return yhat, loss, None
Example #8
Source File: CNN_RNN.py From SummaRuNNer with MIT License | 5 votes |
def avg_pool1d(self,x,seq_lens): # x:[N,L,O_in] out = [] for index,t in enumerate(x): t = t[:seq_lens[index],:] t = torch.t(t).unsqueeze(0) out.append(F.avg_pool1d(t,t.size(2))) out = torch.cat(out).squeeze(2) return out
Example #9
Source File: RNN_RNN.py From SummaRuNNer with MIT License | 5 votes |
def avg_pool1d(self,x,seq_lens): # x:[N,L,O_in] out = [] for index,t in enumerate(x): t = t[:seq_lens[index],:] t = torch.t(t).unsqueeze(0) out.append(F.avg_pool1d(t,t.size(2))) out = torch.cat(out).squeeze(2) return out
Example #10
Source File: model.py From castor with Apache License 2.0 | 5 votes |
def _get_blocks_for_sentence(self, sent): block_a = {} block_b = {} for ws in self.filter_widths: if np.isinf(ws): sent_flattened, sent_flattened_size = sent.contiguous().view(sent.size(0), 1, -1), sent.size(1) * sent.size(2) block_a[ws] = { 'max': F.max_pool1d(sent_flattened, sent_flattened_size).view(sent.size(0), -1), 'min': F.max_pool1d(-1 * sent_flattened, sent_flattened_size).view(sent.size(0), -1), 'mean': F.avg_pool1d(sent_flattened, sent_flattened_size).view(sent.size(0), -1) } continue holistic_conv_out_max = self.holistic_conv_layers_max[ws - 1](sent) holistic_conv_out_min = self.holistic_conv_layers_min[ws - 1](sent) holistic_conv_out_mean = self.holistic_conv_layers_mean[ws - 1](sent) block_a[ws] = { 'max': F.max_pool1d(holistic_conv_out_max, holistic_conv_out_max.size(2)).contiguous().view(-1, self.n_holistic_filters), 'min': F.max_pool1d(-1 * holistic_conv_out_min, holistic_conv_out_min.size(2)).contiguous().view(-1, self.n_holistic_filters), 'mean': F.avg_pool1d(holistic_conv_out_mean, holistic_conv_out_mean.size(2)).contiguous().view(-1, self.n_holistic_filters) } per_dim_conv_out_max = self.per_dim_conv_layers_max[ws - 1](sent) per_dim_conv_out_min = self.per_dim_conv_layers_min[ws - 1](sent) block_b[ws] = { 'max': F.max_pool1d(per_dim_conv_out_max, per_dim_conv_out_max.size(2)).contiguous().view(-1, self.in_channels, self.n_per_dim_filters), 'min': F.max_pool1d(-1 * per_dim_conv_out_min, per_dim_conv_out_min.size(2)).contiguous().view(-1, self.in_channels, self.n_per_dim_filters) } return block_a, block_b
Example #11
Source File: test_pyprof_nvtx.py From apex with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_avg_pool1d(self): inp = torch.randn(1, 1, 28, device='cuda', dtype=self.dtype) out = F.avg_pool1d(inp, kernel_size=5, stride=2, padding=2, ceil_mode=True, count_include_pad=False)
Example #12
Source File: aggregation_layers.py From vidreid_cosegmentation with Apache License 2.0 | 5 votes |
def forward(self, x, b, t): x = F.avg_pool2d(x, x.size()[2:]) x = x.view(b, t, -1) x = x.permute(0, 2, 1) f = F.avg_pool1d(x, t) f = f.view(b, self.feat_dim) return f
Example #13
Source File: aggregation_layers.py From vidreid_cosegmentation with Apache License 2.0 | 5 votes |
def forward(self, x, b, t): x = F.avg_pool2d(x, x.size()[2:]) x = x.view(b, t, -1) # apply LSTM output, (h_n, c_n) = self.lstm(x) output = output.permute(0, 2, 1) f = F.avg_pool1d(output, t) f = f.view(b, self.hidden_dim) return f
Example #14
Source File: mpcnn.py From sentence-similarity with MIT License | 5 votes |
def _get_blocks_for_sentence(self, sent): block_a = {} block_b = {} for ws in self.filter_widths: if np.isinf(ws): sent_flattened, sent_flattened_size = sent.contiguous().view(sent.size(0), 1, -1), sent.size(1) * sent.size(2) block_a[ws] = { 'max': F.max_pool1d(sent_flattened, sent_flattened_size).view(sent.size(0), -1), 'min': F.max_pool1d(-1 * sent_flattened, sent_flattened_size).view(sent.size(0), -1), 'mean': F.avg_pool1d(sent_flattened, sent_flattened_size).view(sent.size(0), -1) } continue holistic_conv_out = self.holistic_conv_layers[ws - 1](sent) block_a[ws] = { 'max': F.max_pool1d(holistic_conv_out, holistic_conv_out.size(2)).contiguous().view(-1, self.n_holistic_filters), 'min': F.max_pool1d(-1 * holistic_conv_out, holistic_conv_out.size(2)).contiguous().view(-1, self.n_holistic_filters), 'mean': F.avg_pool1d(holistic_conv_out, holistic_conv_out.size(2)).contiguous().view(-1, self.n_holistic_filters) } per_dim_conv_out = self.per_dim_conv_layers[ws - 1](sent) block_b[ws] = { 'max': F.max_pool1d(per_dim_conv_out, per_dim_conv_out.size(2)).contiguous().view(-1, self.in_channels, self.n_per_dim_filters), 'min': F.max_pool1d(-1 * per_dim_conv_out, per_dim_conv_out.size(2)).contiguous().view(-1, self.in_channels, self.n_per_dim_filters) } return block_a, block_b
Example #15
Source File: RNN_RNN.py From SummaRuNNer with MIT License | 4 votes |
def forward(self,x,doc_lens): sent_lens = torch.sum(torch.sign(x),dim=1).data x = self.embed(x) # (N,L,D) # word level GRU H = self.args.hidden_size x = self.word_RNN(x)[0] # (N,2*H,L) #word_out = self.avg_pool1d(x,sent_lens) word_out = self.max_pool1d(x,sent_lens) # make sent features(pad with zeros) x = self.pad_doc(word_out,doc_lens) # sent level GRU sent_out = self.sent_RNN(x)[0] # (B,max_doc_len,2*H) #docs = self.avg_pool1d(sent_out,doc_lens) # (B,2*H) docs = self.max_pool1d(sent_out,doc_lens) # (B,2*H) probs = [] for index,doc_len in enumerate(doc_lens): valid_hidden = sent_out[index,:doc_len,:] # (doc_len,2*H) doc = F.tanh(self.fc(docs[index])).unsqueeze(0) s = Variable(torch.zeros(1,2*H)) if self.args.device is not None: s = s.cuda() for position, h in enumerate(valid_hidden): h = h.view(1, -1) # (1,2*H) # get position embeddings abs_index = Variable(torch.LongTensor([[position]])) if self.args.device is not None: abs_index = abs_index.cuda() abs_features = self.abs_pos_embed(abs_index).squeeze(0) rel_index = int(round((position + 1) * 9.0 / doc_len)) rel_index = Variable(torch.LongTensor([[rel_index]])) if self.args.device is not None: rel_index = rel_index.cuda() rel_features = self.rel_pos_embed(rel_index).squeeze(0) # classification layer content = self.content(h) salience = self.salience(h,doc) novelty = -1 * self.novelty(h,F.tanh(s)) abs_p = self.abs_pos(abs_features) rel_p = self.rel_pos(rel_features) prob = F.sigmoid(content + salience + novelty + abs_p + rel_p + self.bias) s = s + torch.mm(prob,h) probs.append(prob) return torch.cat(probs).squeeze()
Example #16
Source File: operations.py From NNEF-Tools with Apache License 2.0 | 4 votes |
def _box_impl(input, # type: torch.Tensor size, # type: List[int] border, # type: str padding, # type: List[Tuple[int, int]] stride, # type: List[int] dilation, # type: List[int] normalize, # type: bool ): # type: (...)->torch.Tensor assert 3 <= len(input.shape) <= 5 assert len(input.shape) == len(size) == len(padding) == len(stride) == len(dilation) assert padding[:2] == [(0, 0), (0, 0)] assert size[:2] == stride[:2] == dilation[:2] if dilation and any(d != 1 for d in dilation): raise utils.NNEFToolsException( "Box (avg or sum pooling) is only implemented for dilation = 1." ) spatial_dims = len(input.shape) - 2 pad = nnef_pad(input=input, padding=padding, border='constant' if border == 'ignore' else border) avg_pool = {1: F.avg_pool1d, 2: F.avg_pool2d, 3: F.avg_pool3d}[spatial_dims]( input=pad, kernel_size=size[2:], stride=stride[2:], padding=0) if border == 'ignore' and normalize: ones = torch.ones_like(input) padded_ones = nnef_pad(input=ones, padding=padding, border='constant') avg_pool_ones = {1: F.avg_pool1d, 2: F.avg_pool2d, 3: F.avg_pool3d}[spatial_dims]( input=padded_ones, kernel_size=size[2:], stride=stride[2:], padding=0) # If padding is big, zero averages can happen on the border, don't divide by zero avg_pool_ones = nnef_select(avg_pool_ones > 0, avg_pool_ones, torch.ones_like(avg_pool_ones)) avg_pool /= avg_pool_ones if normalize: return avg_pool else: return avg_pool * utils.product(size)