Python torch.BoolTensor() Examples
The following are 30
code examples of torch.BoolTensor().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch
, or try the search function
.
Example #1
Source File: util.py From allennlp with Apache License 2.0 | 6 votes |
def masked_max( vector: torch.Tensor, mask: torch.BoolTensor, dim: int, keepdim: bool = False, ) -> torch.Tensor: """ To calculate max along certain dimensions on masked values # Parameters vector : `torch.Tensor` The vector to calculate max, assume unmasked parts are already zeros mask : `torch.BoolTensor` The mask of the vector. It must be broadcastable with vector. dim : `int` The dimension to calculate max keepdim : `bool` Whether to keep dimension # Returns `torch.Tensor` A `torch.Tensor` of including the maximum values. """ replaced_vector = vector.masked_fill(~mask, min_value_of_dtype(vector.dtype)) max_value, _ = replaced_vector.max(dim=dim, keepdim=keepdim) return max_value
Example #2
Source File: keypoints.py From detectron2 with Apache License 2.0 | 6 votes |
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Keypoints": """ Create a new `Keypoints` by indexing on this `Keypoints`. The following usage are allowed: 1. `new_kpts = kpts[3]`: return a `Keypoints` which contains only one instance. 2. `new_kpts = kpts[2:10]`: return a slice of key points. 3. `new_kpts = kpts[vector]`, where vector is a torch.ByteTensor with `length = len(kpts)`. Nonzero elements in the vector will be selected. Note that the returned Keypoints might share storage with this Keypoints, subject to Pytorch's indexing semantics. """ if isinstance(item, int): return Keypoints([self.tensor[item]]) return Keypoints(self.tensor[item])
Example #3
Source File: pass_through_encoder.py From allennlp with Apache License 2.0 | 6 votes |
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor = None) -> torch.Tensor: """ # Parameters inputs : `torch.Tensor`, required. A tensor of shape (batch_size, timesteps, input_dim) mask : `torch.BoolTensor`, optional (default = `None`). A tensor of shape (batch_size, timesteps). # Returns A tensor of shape (batch_size, timesteps, output_dim), where output_dim = input_dim. """ if mask is None: return inputs else: # We should mask out the output instead of the input. # But here, output = input, so we directly mask out the input. return inputs * mask.unsqueeze(dim=-1)
Example #4
Source File: boxes.py From detectron2 with Apache License 2.0 | 6 votes |
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Boxes": """ Returns: Boxes: Create a new :class:`Boxes` by indexing. The following usage are allowed: 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box. 2. `new_boxes = boxes[2:10]`: return a slice of boxes. 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor with `length = len(boxes)`. Nonzero elements in the vector will be selected. Note that the returned Boxes might share storage with this Boxes, subject to Pytorch's indexing semantics. """ if isinstance(item, int): return Boxes(self.tensor[item].view(1, -1)) b = self.tensor[item] assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item) return Boxes(b)
Example #5
Source File: rotated_boxes.py From detectron2 with Apache License 2.0 | 6 votes |
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "RotatedBoxes": """ Returns: RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing. The following usage are allowed: 1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box. 2. `new_boxes = boxes[2:10]`: return a slice of boxes. 3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor with `length = len(boxes)`. Nonzero elements in the vector will be selected. Note that the returned RotatedBoxes might share storage with this RotatedBoxes, subject to Pytorch's indexing semantics. """ if isinstance(item, int): return RotatedBoxes(self.tensor[item].view(1, -1)) b = self.tensor[item] assert b.dim() == 2, "Indexing on RotatedBoxes with {} failed to return a matrix!".format( item ) return RotatedBoxes(b)
Example #6
Source File: feedforward_encoder.py From allennlp with Apache License 2.0 | 6 votes |
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor = None) -> torch.Tensor: """ # Parameters inputs : `torch.Tensor`, required. A tensor of shape (batch_size, timesteps, input_dim) mask : `torch.BoolTensor`, optional (default = `None`). A tensor of shape (batch_size, timesteps). # Returns A tensor of shape (batch_size, timesteps, output_dim). """ if mask is None: return self._feedforward(inputs) else: outputs = self._feedforward(inputs) return outputs * mask.unsqueeze(dim=-1)
Example #7
Source File: torch_transformer_encoder.py From summarus with Apache License 2.0 | 6 votes |
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor): output = inputs if self._sinusoidal_positional_encoding: output = add_positional_features(output) if self._positional_embedding is not None: position_ids = torch.arange(inputs.size(1), dtype=torch.long, device=output.device) position_ids = position_ids.unsqueeze(0).expand(inputs.shape[:-1]) output = output + self._positional_embedding(position_ids) # print() # print(sum(output[0][4]), sum(output[0][100])) # For some reason the torch transformer expects the shape (sequence, batch, features), not the more # familiar (batch, sequence, features), so we have to fix it. output = output.permute(1, 0, 2) # For some other reason, the torch transformer takes the mask backwards. mask = ~mask output = self._transformer(output, src_key_padding_mask=mask) output = output.permute(1, 0, 2) # print(sum(inputs[0][4]), sum(inputs[0][100])) # print(sum(output[0][4]), sum(output[0][100])) # print() return output
Example #8
Source File: pytorch_transformer_wrapper.py From allennlp with Apache License 2.0 | 6 votes |
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor): output = inputs if self._sinusoidal_positional_encoding: output = add_positional_features(output) if self._positional_embedding is not None: position_ids = torch.arange(inputs.size(1), dtype=torch.long, device=output.device) position_ids = position_ids.unsqueeze(0).expand(inputs.shape[:-1]) output = output + self._positional_embedding(position_ids) # For some reason the torch transformer expects the shape (sequence, batch, features), not the more # familiar (batch, sequence, features), so we have to fix it. output = output.permute(1, 0, 2) # For some other reason, the torch transformer takes the mask backwards. mask = ~mask output = self._transformer(output, src_key_padding_mask=mask) output = output.permute(1, 0, 2) return output
Example #9
Source File: __init__.py From TorchCRF with MIT License | 6 votes |
def forward( self, h: FloatTensor, labels: LongTensor, mask: BoolTensor ) -> FloatTensor: """ :param h: hidden matrix (batch_size, seq_len, num_labels) :param labels: answer labels of each sequence in mini batch (batch_size, seq_len) :param mask: mask tensor of each sequence in mini batch (batch_size, seq_len) :return: The log-likelihood (batch_size) """ log_numerator = self._compute_numerator_log_likelihood(h, labels, mask) log_denominator = self._compute_denominator_log_likelihood(h, mask) return log_numerator - log_denominator
Example #10
Source File: load_graph.py From dgl with Apache License 2.0 | 6 votes |
def load_reddit(): from dgl.data import RedditDataset # load reddit data data = RedditDataset(self_loop=True) train_mask = data.train_mask val_mask = data.val_mask features = th.Tensor(data.features) labels = th.LongTensor(data.labels) # Construct graph g = data.graph g.ndata['features'] = features g.ndata['labels'] = labels g.ndata['train_mask'] = th.BoolTensor(data.train_mask) g.ndata['val_mask'] = th.BoolTensor(data.val_mask) g.ndata['test_mask'] = th.BoolTensor(data.test_mask) return g, data.num_labels
Example #11
Source File: boxes.py From detectron2 with Apache License 2.0 | 6 votes |
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]): """ Args: item: int, slice, or a BoolTensor Returns: Boxes: Create a new :class:`Boxes` by indexing. The following usage are allowed: 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box. 2. `new_boxes = boxes[2:10]`: return a slice of boxes. 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor with `length = len(boxes)`. Nonzero elements in the vector will be selected. Note that the returned Boxes might share storage with this Boxes, subject to Pytorch's indexing semantics. """ if isinstance(item, int): return Boxes(self.tensor[item].view(1, -1)) b = self.tensor[item] assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item) return Boxes(b)
Example #12
Source File: rotated_boxes.py From detectron2 with Apache License 2.0 | 6 votes |
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "RotatedBoxes": """ Returns: RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing. The following usage are allowed: 1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box. 2. `new_boxes = boxes[2:10]`: return a slice of boxes. 3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor with `length = len(boxes)`. Nonzero elements in the vector will be selected. Note that the returned RotatedBoxes might share storage with this RotatedBoxes, subject to Pytorch's indexing semantics. """ if isinstance(item, int): return RotatedBoxes(self.tensor[item].view(1, -1)) b = self.tensor[item] assert b.dim() == 2, "Indexing on RotatedBoxes with {} failed to return a matrix!".format( item ) return RotatedBoxes(b)
Example #13
Source File: conv.py From KoSpeech with Apache License 2.0 | 6 votes |
def forward(self, inputs, seq_lengths): output = None for module in self.sequential: output = module(inputs) mask = torch.BoolTensor(output.size()).fill_(0) if output.is_cuda: mask = mask.cuda() seq_lengths = self.get_seq_lengths(module, seq_lengths) for idx, length in enumerate(seq_lengths): length = length.item() if (mask[idx].size(2) - length) > 0: mask[idx].narrow(dim=2, start=length, length=mask[idx].size(2) - length).fill_(1) output = output.masked_fill(mask, 0) inputs = output return output, seq_lengths
Example #14
Source File: keypoints.py From detectron2 with Apache License 2.0 | 6 votes |
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Keypoints": """ Create a new `Keypoints` by indexing on this `Keypoints`. The following usage are allowed: 1. `new_kpts = kpts[3]`: return a `Keypoints` which contains only one instance. 2. `new_kpts = kpts[2:10]`: return a slice of key points. 3. `new_kpts = kpts[vector]`, where vector is a torch.ByteTensor with `length = len(kpts)`. Nonzero elements in the vector will be selected. Note that the returned Keypoints might share storage with this Keypoints, subject to Pytorch's indexing semantics. """ if isinstance(item, int): return Keypoints([self.tensor[item]]) return Keypoints(self.tensor[item])
Example #15
Source File: util.py From allennlp with Apache License 2.0 | 6 votes |
def replace_masked_values( tensor: torch.Tensor, mask: torch.BoolTensor, replace_with: float ) -> torch.Tensor: """ Replaces all masked values in `tensor` with `replace_with`. `mask` must be broadcastable to the same shape as `tensor`. We require that `tensor.dim() == mask.dim()`, as otherwise we won't know which dimensions of the mask to unsqueeze. This just does `tensor.masked_fill()`, except the pytorch method fills in things with a mask value of 1, where we want the opposite. You can do this in your own code with `tensor.masked_fill(~mask, replace_with)`. """ if tensor.dim() != mask.dim(): raise ConfigurationError( "tensor.dim() (%d) != mask.dim() (%d)" % (tensor.dim(), mask.dim()) ) return tensor.masked_fill(~mask, replace_with)
Example #16
Source File: util.py From allennlp with Apache License 2.0 | 6 votes |
def get_mask_from_sequence_lengths( sequence_lengths: torch.Tensor, max_length: int ) -> torch.BoolTensor: """ Given a variable of shape `(batch_size,)` that represents the sequence lengths of each batch element, this function returns a `(batch_size, max_length)` mask variable. For example, if our input was `[2, 2, 3]`, with a `max_length` of 4, we'd return `[[1, 1, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0]]`. We require `max_length` here instead of just computing it from the input `sequence_lengths` because it lets us avoid finding the max, then copying that value from the GPU to the CPU so that we can use it to construct a new tensor. """ # (batch_size, max_length) ones = sequence_lengths.new_ones(sequence_lengths.size(0), max_length) range_tensor = ones.cumsum(dim=1) return sequence_lengths.unsqueeze(1) >= range_tensor
Example #17
Source File: util.py From allennlp with Apache License 2.0 | 6 votes |
def get_lengths_from_binary_sequence_mask(mask: torch.BoolTensor) -> torch.LongTensor: """ Compute sequence lengths for each batch element in a tensor using a binary mask. # Parameters mask : `torch.BoolTensor`, required. A 2D binary mask of shape (batch_size, sequence_length) to calculate the per-batch sequence lengths from. # Returns `torch.LongTensor` A torch.LongTensor of shape (batch_size,) representing the lengths of the sequences in the batch. """ return mask.sum(-1)
Example #18
Source File: token_indexer.py From allennlp with Apache License 2.0 | 6 votes |
def as_padded_tensor_dict( self, tokens: IndexedTokenList, padding_lengths: Dict[str, int] ) -> Dict[str, torch.Tensor]: """ This method pads a list of tokens given the input padding lengths (which could actually truncate things, depending on settings) and returns that padded list of input tokens as a `Dict[str, torch.Tensor]`. This is a dictionary because there should be one key per argument that the `TokenEmbedder` corresponding to this class expects in its `forward()` method (where the argument name in the `TokenEmbedder` needs to make the key in this dictionary). The base class implements the case when all you want to do is create a padded `LongTensor` for every list in the `tokens` dictionary. If your `TokenIndexer` needs more complex logic than that, you need to override this method. """ tensor_dict = {} for key, val in tokens.items(): if val and isinstance(val[0], bool): tensor = torch.BoolTensor( pad_sequence_to_length(val, padding_lengths[key], default_value=lambda: False) ) else: tensor = torch.LongTensor(pad_sequence_to_length(val, padding_lengths[key])) tensor_dict[key] = tensor return tensor_dict
Example #19
Source File: pearson_correlation.py From allennlp with Apache License 2.0 | 6 votes |
def __call__( self, predictions: torch.Tensor, gold_labels: torch.Tensor, mask: Optional[torch.BoolTensor] = None, ): """ # Parameters predictions : `torch.Tensor`, required. A tensor of predictions of shape (batch_size, ...). gold_labels : `torch.Tensor`, required. A tensor of the same shape as `predictions`. mask : `torch.BoolTensor`, optional (default = `None`). A tensor of the same shape as `predictions`. """ predictions, gold_labels, mask = self.detach_tensors(predictions, gold_labels, mask) self._predictions_labels_covariance(predictions, gold_labels, mask) self._predictions_variance(predictions, predictions, mask) self._labels_variance(gold_labels, gold_labels, mask)
Example #20
Source File: boe_encoder.py From allennlp with Apache License 2.0 | 6 votes |
def forward(self, tokens: torch.Tensor, mask: torch.BoolTensor = None): if mask is not None: tokens = tokens * mask.unsqueeze(-1) # Our input has shape `(batch_size, num_tokens, embedding_dim)`, so we sum out the `num_tokens` # dimension. summed = tokens.sum(1) if self._averaged: if mask is not None: lengths = get_lengths_from_binary_sequence_mask(mask) length_mask = lengths > 0 # Set any length 0 to 1, to avoid dividing by zero. lengths = torch.max(lengths, lengths.new_ones(1)) else: lengths = tokens.new_full((1,), fill_value=tokens.size(1)) length_mask = None summed = summed / lengths.unsqueeze(-1).float() if length_mask is not None: summed = summed * (length_mask > 0).unsqueeze(-1) return summed
Example #21
Source File: mean_absolute_error.py From allennlp with Apache License 2.0 | 6 votes |
def __call__( self, predictions: torch.Tensor, gold_labels: torch.Tensor, mask: Optional[torch.BoolTensor] = None, ): """ # Parameters predictions : `torch.Tensor`, required. A tensor of predictions of shape (batch_size, ...). gold_labels : `torch.Tensor`, required. A tensor of the same shape as `predictions`. mask : `torch.BoolTensor`, optional (default = `None`). A tensor of the same shape as `predictions`. """ predictions, gold_labels, mask = self.detach_tensors(predictions, gold_labels, mask) absolute_errors = torch.abs(predictions - gold_labels) if mask is not None: absolute_errors *= mask self._total_count += torch.sum(mask) else: self._total_count += gold_labels.numel() self._absolute_error += torch.sum(absolute_errors)
Example #22
Source File: modules.py From ParlAI with MIT License | 6 votes |
def forward_layers( self, tensor: torch.Tensor, mask: torch.BoolTensor ) -> torch.Tensor: """ Apply transformer layers to input. :param tensor: embedded input :param mask: mask of input :return tensor: return embedding after applying transformer layers """ if getattr(self.layers, 'is_model_parallel', False): # factored out for readability. It is equivalent to the other # condition tensor = self._apply_model_parallel(tensor, mask) else: for i in range(self.n_layers): tensor = self.layers[i](tensor, mask) return tensor
Example #23
Source File: instances.py From detectron2 with Apache License 2.0 | 6 votes |
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances": """ Args: item: an index-like object and will be used to index all the fields. Returns: If `item` is a string, return the data in the corresponding field. Otherwise, returns an `Instances` where all fields are indexed by `item`. """ if type(item) == int: if item >= len(self) or item < -len(self): raise IndexError("Instances index out of range!") else: item = slice(item, None, len(self)) ret = Instances(self._image_size) for k, v in self._fields.items(): ret.set(k, v[item]) return ret
Example #24
Source File: __init__.py From TorchCRF with MIT License | 5 votes |
def _compute_numerator_log_likelihood( self, h: FloatTensor, y: LongTensor, mask: BoolTensor ) -> FloatTensor: """ compute the numerator term for the log-likelihood :param h: hidden matrix (batch_size, seq_len, num_labels) :param y: answer labels of each sequence in mini batch (batch_size, seq_len) :param mask: mask tensor of each sequence in mini batch (batch_size, seq_len) :return: The score of numerator term for the log-likelihood """ batch_size, seq_len, _ = h.size() h_unsqueezed = h.unsqueeze(-1) trans = self.trans_matrix.unsqueeze(-1) arange_b = torch.arange(batch_size) # extract first vector of sequences in mini batch calc_range = seq_len - 1 score = self.start_trans[y[:, 0]] + sum( [self._calc_trans_score_for_num_llh( h_unsqueezed, y, trans, mask, t, arange_b ) for t in range(calc_range)]) # extract end label number of each sequence in mini batch # (batch_size) last_mask_index = mask.sum(1) - 1 last_labels = y[arange_b, last_mask_index] each_last_score = h[arange_b, -1, last_labels] * mask[:, -1] # Add the score of the sequences of the maximum length in mini batch # Add the scores from the last tag of each sequence to EOS score += each_last_score + self.end_trans[last_labels] return score
Example #25
Source File: util.py From allennlp with Apache License 2.0 | 5 votes |
def masked_mean( vector: torch.Tensor, mask: torch.BoolTensor, dim: int, keepdim: bool = False ) -> torch.Tensor: """ To calculate mean along certain dimensions on masked values # Parameters vector : `torch.Tensor` The vector to calculate mean. mask : `torch.BoolTensor` The mask of the vector. It must be broadcastable with vector. dim : `int` The dimension to calculate mean keepdim : `bool` Whether to keep dimension # Returns `torch.Tensor` A `torch.Tensor` of including the mean values. """ replaced_vector = vector.masked_fill(~mask, 0.0) value_sum = torch.sum(replaced_vector, dim=dim, keepdim=keepdim) value_count = torch.sum(mask, dim=dim, keepdim=keepdim) return value_sum / value_count.float().clamp(min=tiny_value_of_dtype(torch.float))
Example #26
Source File: masks.py From detectron2 with Apache License 2.0 | 5 votes |
def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks": """ Support indexing over the instances and return a `PolygonMasks` object. `item` can be: 1. An integer. It will return an object with only one instance. 2. A slice. It will return an object with the selected instances. 3. A list[int]. It will return an object with the selected instances, correpsonding to the indices in the list. 4. A vector mask of type BoolTensor, whose length is num_instances. It will return an object with the instances whose mask is nonzero. """ if isinstance(item, int): selected_polygons = [self.polygons[item]] elif isinstance(item, slice): selected_polygons = self.polygons[item] elif isinstance(item, list): selected_polygons = [self.polygons[i] for i in item] elif isinstance(item, torch.Tensor): # Polygons is a list, so we have to move the indices back to CPU. if item.dtype == torch.bool: assert item.dim() == 1, item.shape item = item.nonzero().squeeze(1).cpu().numpy().tolist() elif item.dtype in [torch.int32, torch.int64]: item = item.cpu().numpy().tolist() else: raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype)) selected_polygons = [self.polygons[i] for i in item] return PolygonMasks(selected_polygons)
Example #27
Source File: spearman_correlation.py From allennlp with Apache License 2.0 | 5 votes |
def __call__( self, predictions: torch.Tensor, gold_labels: torch.Tensor, mask: Optional[torch.BoolTensor] = None, ): """ # Parameters predictions : `torch.Tensor`, required. A tensor of predictions of shape (batch_size, ...). gold_labels : `torch.Tensor`, required. A tensor of the same shape as `predictions`. mask : `torch.BoolTensor`, optional (default = `None`). A tensor of the same shape as `predictions`. """ predictions, gold_labels, mask = self.detach_tensors(predictions, gold_labels, mask) # Flatten predictions, gold_labels, and mask. We calculate the Spearman correlation between # the vectors, since each element in the predictions and gold_labels tensor is assumed # to be a separate observation. predictions = predictions.reshape(-1) gold_labels = gold_labels.reshape(-1) self.total_predictions = self.total_predictions.to(predictions.device) self.total_gold_labels = self.total_gold_labels.to(gold_labels.device) if mask is not None: mask = mask.reshape(-1) self.total_predictions = torch.cat((self.total_predictions, predictions * mask), 0) self.total_gold_labels = torch.cat((self.total_gold_labels, gold_labels * mask), 0) else: self.total_predictions = torch.cat((self.total_predictions, predictions), 0) self.total_gold_labels = torch.cat((self.total_gold_labels, gold_labels), 0)
Example #28
Source File: masks.py From detectron2 with Apache License 2.0 | 5 votes |
def nonempty(self) -> torch.Tensor: """ Find masks that are non-empty. Returns: Tensor: a BoolTensor which represents whether each mask is empty (False) or not (True). """ keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons] return torch.as_tensor(keep, dtype=torch.bool)
Example #29
Source File: metric.py From allennlp with Apache License 2.0 | 5 votes |
def __call__( self, predictions: torch.Tensor, gold_labels: torch.Tensor, mask: Optional[torch.BoolTensor] ): """ # Parameters predictions : `torch.Tensor`, required. A tensor of predictions. gold_labels : `torch.Tensor`, required. A tensor corresponding to some gold label to evaluate against. mask : `torch.BoolTensor`, optional (default = `None`). A mask can be passed, in order to deal with metrics which are computed over potentially padded elements, such as sequence labels. """ raise NotImplementedError
Example #30
Source File: masks.py From detectron2 with Apache License 2.0 | 5 votes |
def nonempty(self) -> torch.Tensor: """ Find masks that are non-empty. Returns: Tensor: a BoolTensor which represents whether each mask is empty (False) or not (True). """ keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons] return torch.from_numpy(np.asarray(keep, dtype=np.bool))