Python torch.nn.utils.rnn.PackedSequence() Examples
The following are 30
code examples of torch.nn.utils.rnn.PackedSequence().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.nn.utils.rnn
, or try the search function
.
Example #1
Source File: recurrent.py From Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch with MIT License | 6 votes |
def forward(self, inputs, hidden=None): hidden = hidden or tuple([None] * len(self)) next_hidden = [] for i, module in enumerate(self._modules.values()): output, h = module(inputs, hidden[i]) next_hidden.append(h) if self.residual and inputs.size(-1) == output.size(-1): inputs = output + inputs else: inputs = output if isinstance(inputs, PackedSequence): data = nn.functional.dropout( inputs.data, self.dropout, self.training) inputs = PackedSequence(data, inputs.batch_sizes) else: inputs = nn.functional.dropout( inputs, self.dropout, self.training) return output, tuple(next_hidden)
Example #2
Source File: test_utils.py From skorch with BSD 3-Clause "New" or "Revised" License | 6 votes |
def tensors_equal(self, x, y): """"Test that tensors in diverse containers are equal.""" if isinstance(x, PackedSequence): return self.tensors_equal(x[0], y[0]) and self.tensors_equal(x[1], y[1]) if isinstance(x, dict): return ( (x.keys() == y.keys()) and all(self.tensors_equal(x[k], y[k]) for k in x) ) if isinstance(x, (list, tuple)): return all(self.tensors_equal(xi, yi) for xi, yi in zip(x, y)) if x.is_sparse is not y.is_sparse: return False if x.is_sparse: x, y = x.to_dense(), y.to_dense() return (x == y).all() # pylint: disable=no-method-argument
Example #3
Source File: rel_model.py From neural-motifs with MIT License | 6 votes |
def edge_ctx(self, obj_feats, obj_dists, im_inds, obj_preds, box_priors=None): """ Object context and object classification. :param obj_feats: [num_obj, img_dim + object embedding0 dim] :param obj_dists: [num_obj, #classes] :param im_inds: [num_obj] the indices of the images :return: edge_ctx: [num_obj, #feats] For later! """ # Only use hard embeddings obj_embed2 = self.obj_embed2(obj_preds) # obj_embed3 = F.softmax(obj_dists, dim=1) @ self.obj_embed3.weight inp_feats = torch.cat((obj_embed2, obj_feats), 1) # Sort by the confidence of the maximum detection. confidence = F.softmax(obj_dists, dim=1).data.view(-1)[ obj_preds.data + arange(obj_preds.data) * self.num_classes] perm, inv_perm, ls_transposed = self.sort_rois(im_inds.data, confidence, box_priors) edge_input_packed = PackedSequence(inp_feats[perm], ls_transposed) edge_reps = self.edge_ctx_rnn(edge_input_packed)[0][0] # now we're good! unperm edge_ctx = edge_reps[inv_perm] return edge_ctx
Example #4
Source File: torch_utils.py From molecule-chef with GNU General Public License v3.0 | 6 votes |
def prepend_tensor_to_start_of_packed_seq(packed_seq: rnn.PackedSequence, value_to_add): """ This function shifts the whole sequence down and adds value_to_add to the start. """ data, batch_sizes, *others = packed_seq # We're gonna be a bit cheeky and construct a Packed Sequence manually at the bottom of this function -- which the # docs tell us not to do but have seen others do it, eg # https://github.com/pytorch/pytorch/issues/8921#issuecomment-400552029 # Originally we coded this in PyTorch 1.0 and PackedSequence was a thinner wrapper on a NamedTuple # to continue to check that we are still using enforce_sorted=True Packed Sequences if len(others): assert others[0] is None assert others[1] is None num_in_first_batch =batch_sizes[0] front = torch.zeros_like(data[:num_in_first_batch]) front[...] = value_to_add new_packed_seq_data = torch.cat([front, data], dim=0) new_length_at_beginning = batch_sizes[:1].clone() new_packed_seq = rnn.PackedSequence(new_packed_seq_data, torch.cat([new_length_at_beginning, packed_seq.batch_sizes])) return new_packed_seq
Example #5
Source File: model.py From pytorch-fast-elmo with MIT License | 6 votes |
def exec_backward_lstm( self, inputs: PackedSequence, ) -> List[PackedSequence]: """ Backward LSTM. """ if self.exec_managed_lstm_bos_eos: max_batch_size = int(inputs.batch_sizes.data[0]) # EOS. self.exec_backward_lstm_eos(max_batch_size) elif self.exec_managed_lstm_reset_states: self.backward_lstm.reset_states() # Feed inputs. outputs, _ = self.backward_lstm(inputs.data, inputs.batch_sizes) if self.exec_managed_lstm_bos_eos: # BOS. self.exec_backward_lstm_bos(max_batch_size) # To list of `PackedSequence`. return [PackedSequence(output, inputs.batch_sizes) for output in outputs]
Example #6
Source File: model.py From open_stt_e2e with MIT License | 6 votes |
def forward(self, x, lengths, head=True): # Apply 2d convolutions x, lengths = self.conv(x, lengths) # Pack padded batch of sequences for RNN module x = pack_padded_sequence(x, lengths) # Forward pass through GRU x, _ = self.rnn(x) # Sum bidirectional GRU outputs f, b = x.data.split(self.rnn.hidden_size, 1) data = self.prj(f + b) if head: data = self.fc(data) data = log_softmax(data, dim=-1) x = PackedSequence(data, x.batch_sizes, x.sorted_indices, x.unsorted_indices) x, _ = pad_packed_sequence(x) return x, lengths
Example #7
Source File: decoder.py From molecule-chef with GNU General Public License v3.0 | 6 votes |
def nlog_like_of_obs(self, obs: rnn.PackedSequence): """ Here we calculate the negative log likelihood of the sequence. For each we feed in the previous observation ie if you use this function during training then doing teacher forcing. """ # Set up the ground truth inputs from previous time-steps to be fed into the bottom of the RNN symbol_seq_packed_minus_last = torch_utils.remove_last_from_packed_seq(obs) embeddings = self.embedder.forward_on_packed_sequence(symbol_seq_packed_minus_last, stops_pre_filtered_flag=True) inputs = torch_utils.prepend_tensor_to_start_of_packed_seq(embeddings, mchef_config.SOS_TOKEN) # Feed the emebeddings through the network initial_hidden = self._initial_hidden_after_update outputs, _ = self.gru(inputs, initial_hidden) outputs_mapped = self.mlp_out(outputs.data) self.decoder_top.update(outputs_mapped) # Now work out the nll for each element of each sequence and then sum over the whole sequence length. nll_per_obs = self.decoder_top.nlog_like_of_obs(obs.data) nll_packed = rnn.PackedSequence(nll_per_obs, *obs[1:]) nll_padded, _ = rnn.pad_packed_sequence(nll_packed, batch_first=True, padding_value=0.0) nll_per_seq = nll_padded.sum(dim=tuple(range(1, len(nll_padded.shape)))) return nll_per_seq
Example #8
Source File: model.py From pytorch-fast-elmo with MIT License | 6 votes |
def combine_char_cnn_and_bilstm_outputs( self, char_cnn_packed: PackedSequence, bilstm_packed: List[PackedSequence], ) -> List[PackedSequence]: """ Combine the outputs of Char CNN & BiLSTM for scalar mix. """ # Simply duplicate the output of char cnn. duplicated_char_cnn_packed = PackedSequence( torch.cat([char_cnn_packed.data, char_cnn_packed.data], dim=-1), char_cnn_packed.batch_sizes, ) combined = [duplicated_char_cnn_packed] combined.extend(bilstm_packed) return combined
Example #9
Source File: LSTM.py From TimeSeries with Apache License 2.0 | 6 votes |
def forward(self, x: torch.Tensor) -> torch.Tensor: if not self.training or self.dropout <= 0.: return x is_packed = isinstance(x, PackedSequence) if is_packed: x, batch_sizes = x max_batch_size = int(batch_sizes[0]) else: batch_sizes = None max_batch_size = x.size(0) # Drop same mask across entire sequence if self.batch_first: m = x.new_empty(max_batch_size, 1, x.size(2), requires_grad=False).bernoulli_(1 - self.dropout) else: m = x.new_empty(1, max_batch_size, x.size(2), requires_grad=False).bernoulli_(1 - self.dropout) x = x.masked_fill(m == 0, 0) / (1 - self.dropout) if is_packed: return PackedSequence(x, batch_sizes) else: return x
Example #10
Source File: bucket_iterator.py From verb-attributes with MIT License | 6 votes |
def _defns_to_packed_seq(defns, field, cuda=torch.cuda.is_available(), volatile=False): """ Pads a list of definitions (in sorted order!) :param tokenized_defns: List of lists containing tokenized definitions OR List of string containind definitions :param field: Contains padding and vocab functions. :param cuda: if true, we'll cudaize it :param volatile: :return: PackedSequence with a Variable. """ tokenized_defns = [field.preprocess(x) for x in defns] defns_padded, lengths = field.pad(tokenized_defns) if not all(lengths[i] >= lengths[i + 1] for i in range(len(lengths) - 1)): raise ValueError("Sequences must be in decreasing order") defns_tensor = torch.LongTensor([ [field.vocab.stoi[x] for x in ex] for ex in defns_padded ]) defns_packed_ = pack_padded_sequence(defns_tensor, lengths, batch_first=True) packed_data = Variable(defns_packed_.data, volatile=volatile) if cuda: packed_data = packed_data.cuda() return PackedSequence(packed_data, defns_packed_.batch_sizes)
Example #11
Source File: att_prediction.py From verb-attributes with MIT License | 6 votes |
def forward(self, defns, word_embeds=None): """ Forward pass :param defns: PackedSequence with definitions :param word_embeds: [batch_size, array] of word embeddings :return: """ batch_embed = PackedSequence(self.embed(defns.data), defns.batch_sizes) output, h_n = self.gru(batch_embed) h_rep = h_n.transpose(0, 1).contiguous().view(-1, self.hidden_size * 2) h_rep = self.d(h_rep) if self.embed_input and (word_embeds is None): raise ValueError("Must supply word embedding") elif self.embed_input: h_rep = torch.cat((h_rep, word_embeds),1) return self.fc(h_rep)
Example #12
Source File: model.py From pytorch-fast-elmo with MIT License | 6 votes |
def exec_bilstm_and_scalar_mix( self, token_repr: PackedSequence, ) -> List[PackedSequence]: """ Common combination. """ # BiLSTM. bilstm_repr = self.exec_bilstm(token_repr) # Scalar Mix. conbimed_repr = self.combine_char_cnn_and_bilstm_outputs( token_repr, self.concat_packed_sequences(bilstm_repr), ) mixed_reprs = self.exec_scalar_mix(conbimed_repr) return mixed_reprs
Example #13
Source File: sequence.py From DeepCTR-Torch with Apache License 2.0 | 6 votes |
def forward(self, input, att_scores=None, hx=None): if not isinstance(input, PackedSequence) or not isinstance(att_scores, PackedSequence): raise NotImplementedError("DynamicGRU only supports packed input and att_scores") input, batch_sizes, sorted_indices, unsorted_indices = input att_scores, _, _, _ = att_scores max_batch_size = int(batch_sizes[0]) if hx is None: hx = torch.zeros(max_batch_size, self.hidden_size, dtype=input.dtype, device=input.device) outputs = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device) begin = 0 for batch in batch_sizes: new_hx = self.rnn( input[begin:begin + batch], hx[0:batch], att_scores[begin:begin + batch]) outputs[begin:begin + batch] = new_hx hx = new_hx begin += batch return PackedSequence(outputs, batch_sizes, sorted_indices, unsorted_indices)
Example #14
Source File: model.py From pytorch-fast-elmo with MIT License | 5 votes |
def execute(self, inputs: PackedSequence) -> List[PackedSequence]: token_repr = self.exec_word_embedding(inputs) return self.exec_backward_vocab_prob_distrib(token_repr)
Example #15
Source File: ops.py From nlp_classification with MIT License | 5 votes |
def forward(self, x: PackedSequence) -> torch.Tensor: outputs, hc = self._ops(x) if self._using_sequence: hiddens = pad_packed_sequence(outputs)[0].permute(1, 0, 2) return hiddens else: feature = torch.cat([*hc[0]], dim=1) return feature
Example #16
Source File: ops.py From nlp_classification with MIT License | 5 votes |
def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> PackedSequence: fmap, fmap_length = x fmap = fmap.permute(0, 2, 1) if self._permuting else fmap return pack_padded_sequence( fmap, fmap_length, batch_first=True, enforce_sorted=False )
Example #17
Source File: ops.py From nlp_classification with MIT License | 5 votes |
def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> PackedSequence: fmap, fmap_length = x fmap = fmap.permute(0, 2, 1) if self._permuting else fmap return pack_padded_sequence(fmap, fmap_length, batch_first=True, enforce_sorted=False)
Example #18
Source File: ops.py From nlp_classification with MIT License | 5 votes |
def forward(self, x: PackedSequence) -> torch.Tensor: outputs, hc = self._ops(x) if self._using_sequence: hiddens = pad_packed_sequence(outputs)[0].permute(1, 0, 2) return hiddens else: feature = torch.cat([*hc[0]], dim=1) return feature
Example #19
Source File: ops.py From nlp_classification with MIT License | 5 votes |
def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> PackedSequence: fmap, fmap_length = x fmap = fmap.permute(0, 2, 1) if self._permuting else fmap return pack_padded_sequence(fmap, fmap_length, batch_first=True, enforce_sorted=False)
Example #20
Source File: ops.py From nlp_classification with MIT License | 5 votes |
def forward(self, x: PackedSequence) -> torch.Tensor: outputs, hc = self._ops(x) if self._using_sequence: hiddens = pad_packed_sequence(outputs)[0].permute(1, 0, 2) return hiddens else: feature = torch.cat([*hc[0]], dim=1) return feature
Example #21
Source File: bilstm.py From parser with MIT License | 5 votes |
def forward(self, sequence, hx=None): x, batch_sizes = sequence.data, sequence.batch_sizes.tolist() batch_size = batch_sizes[0] h_n, c_n = [], [] if hx is None: ih = x.new_zeros(self.num_layers * 2, batch_size, self.hidden_size) h, c = ih, ih else: h, c = self.permute_hidden(hx, sequence.sorted_indices) h = h.view(self.num_layers, 2, batch_size, self.hidden_size) c = c.view(self.num_layers, 2, batch_size, self.hidden_size) for i in range(self.num_layers): x = torch.split(x, batch_sizes) if self.training: mask = SharedDropout.get_mask(x[0], self.dropout) x = [i * mask[:len(i)] for i in x] x_f, (h_f, c_f) = self.layer_forward(x=x, hx=(h[i, 0], c[i, 0]), cell=self.f_cells[i], batch_sizes=batch_sizes) x_b, (h_b, c_b) = self.layer_forward(x=x, hx=(h[i, 1], c[i, 1]), cell=self.b_cells[i], batch_sizes=batch_sizes, reverse=True) x = torch.cat((x_f, x_b), -1) h_n.append(torch.stack((h_f, h_b))) c_n.append(torch.stack((c_f, c_b))) x = PackedSequence(x, sequence.batch_sizes, sequence.sorted_indices, sequence.unsorted_indices) hx = torch.cat(h_n, 0), torch.cat(c_n, 0) hx = self.permute_hidden(hx, sequence.unsorted_indices) return x, hx
Example #22
Source File: utils.py From skorch with BSD 3-Clause "New" or "Revised" License | 5 votes |
def is_torch_data_type(x): # pylint: disable=protected-access return isinstance(x, (torch.Tensor, PackedSequence))
Example #23
Source File: rel_model.py From neural-motifs with MIT License | 5 votes |
def obj_ctx(self, obj_feats, obj_dists, im_inds, obj_labels=None, box_priors=None, boxes_per_cls=None): """ Object context and object classification. :param obj_feats: [num_obj, img_dim + object embedding0 dim] :param obj_dists: [num_obj, #classes] :param im_inds: [num_obj] the indices of the images :param obj_labels: [num_obj] the GT labels of the image :param boxes: [num_obj, 4] boxes. We'll use this for NMS :return: obj_dists: [num_obj, #classes] new probability distribution. obj_preds: argmax of that distribution. obj_final_ctx: [num_obj, #feats] For later! """ # Sort by the confidence of the maximum detection. confidence = F.softmax(obj_dists, dim=1).data[:, 1:].max(1)[0] perm, inv_perm, ls_transposed = self.sort_rois(im_inds.data, confidence, box_priors) # Pass object features, sorted by score, into the encoder LSTM obj_inp_rep = obj_feats[perm].contiguous() input_packed = PackedSequence(obj_inp_rep, ls_transposed) encoder_rep = self.obj_ctx_rnn(input_packed)[0][0] # Decode in order if self.mode != 'predcls': decoder_inp = PackedSequence(torch.cat((obj_inp_rep, encoder_rep), 1) if self.pass_in_obj_feats_to_decoder else encoder_rep, ls_transposed) obj_dists, obj_preds = self.decoder_rnn( decoder_inp, #obj_dists[perm], labels=obj_labels[perm] if obj_labels is not None else None, boxes_for_nms=boxes_per_cls[perm] if boxes_per_cls is not None else None, ) obj_preds = obj_preds[inv_perm] obj_dists = obj_dists[inv_perm] else: assert obj_labels is not None obj_preds = obj_labels obj_dists = Variable(to_onehot(obj_preds.data, self.num_classes)) encoder_rep = encoder_rep[inv_perm] return obj_dists, obj_preds, encoder_rep
Example #24
Source File: model.py From pytorch-fast-elmo with MIT License | 5 votes |
def execute(self, inputs: PackedSequence) -> List[PackedSequence]: token_repr = self.exec_context_independent_repr(inputs) # BiLSTM. bilstm_repr = self.exec_bilstm(token_repr) # Scalar Mix. conbimed_repr = self.combine_char_cnn_and_bilstm_outputs( token_repr, self.concat_packed_sequences(bilstm_repr), ) return conbimed_repr
Example #25
Source File: model.py From pytorch-fast-elmo with MIT License | 5 votes |
def exec_context_independent_repr(self, inputs: PackedSequence) -> PackedSequence: return self.exec_char_cnn(inputs)
Example #26
Source File: model.py From pytorch-fast-elmo with MIT License | 5 votes |
def execute(self, inputs: PackedSequence) -> List[PackedSequence]: token_repr = self.exec_word_embedding(inputs) return self.exec_forward_vocab_prob_distrib(token_repr)
Example #27
Source File: model.py From pytorch-fast-elmo with MIT License | 5 votes |
def execute(self, inputs: PackedSequence) -> List[PackedSequence]: token_repr = self.exec_char_cnn(inputs) return self.exec_backward_vocab_prob_distrib(token_repr)
Example #28
Source File: model.py From pytorch-fast-elmo with MIT License | 5 votes |
def exec_context_independent_repr(self, inputs: PackedSequence) -> PackedSequence: return self.exec_word_embedding(inputs)
Example #29
Source File: model.py From pytorch-fast-elmo with MIT License | 5 votes |
def exec_backward_vocab_prob_distrib(self, token_repr: PackedSequence) -> List[PackedSequence]: bwd_lstm_last = self.exec_backward_lstm(token_repr)[-1] bwd_vocab_distrib = self.exec_vocab_projection(bwd_lstm_last) return [bwd_vocab_distrib]
Example #30
Source File: model.py From pytorch-fast-elmo with MIT License | 5 votes |
def execute(self, inputs: PackedSequence) -> List[PackedSequence]: token_repr = self.exec_char_cnn(inputs) return self.exec_forward_vocab_prob_distrib(token_repr)