Python torch.broadcast_tensors() Examples
The following are 16
code examples of torch.broadcast_tensors().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch
, or try the search function
.
Example #1
Source File: functional.py From SlowFast-Network-pytorch with MIT License | 6 votes |
def smooth_l1_loss(input, target, size_average=None, reduce=None, reduction='mean'): # type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor r"""Function that uses a squared term if the absolute element-wise error falls below 1 and an L1 term otherwise. See :class:`~torch.nn.SmoothL1Loss` for details. """ if size_average is not None or reduce is not None: reduction = _Reduction.legacy_get_string(size_average, reduce) if target.requires_grad: ret = _smooth_l1_loss(input, target) if reduction != 'none': ret = torch.mean(ret) if reduction == 'mean' else torch.sum(ret) else: expanded_input, expanded_target = torch.broadcast_tensors(input, target) ret = torch._C._nn.smooth_l1_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction)) return ret
Example #2
Source File: functional.py From SlowFast-Network-pytorch with MIT License | 6 votes |
def l1_loss(input, target, size_average=None, reduce=None, reduction='mean'): # type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor r"""l1_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor Function that takes the mean element-wise absolute value difference. See :class:`~torch.nn.L1Loss` for details. """ if size_average is not None or reduce is not None: reduction = _Reduction.legacy_get_string(size_average, reduce) if target.requires_grad: ret = torch.abs(input - target) if reduction != 'none': ret = torch.mean(ret) if reduction == 'mean' else torch.sum(ret) else: expanded_input, expanded_target = torch.broadcast_tensors(input, target) ret = torch._C._nn.l1_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction)) return ret
Example #3
Source File: functional.py From SlowFast-Network-pytorch with MIT License | 6 votes |
def mse_loss(input, target, size_average=None, reduce=None, reduction='mean'): # type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor r"""mse_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor Measures the element-wise mean squared error. See :class:`~torch.nn.MSELoss` for details. """ if size_average is not None or reduce is not None: reduction = _Reduction.legacy_get_string(size_average, reduce) if target.requires_grad: ret = (input - target) ** 2 if reduction != 'none': ret = torch.mean(ret) if reduction == 'mean' else torch.sum(ret) else: expanded_input, expanded_target = torch.broadcast_tensors(input, target) ret = torch._C._nn.mse_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction)) return ret
Example #4
Source File: mpc.py From CrypTen with MIT License | 6 votes |
def div(self, y): r"""Divides each element of :attr:`self` with the scalar :attr:`y` or each element of the tensor :attr:`y` and returns a new resulting tensor. For `y` a scalar: .. math:: \text{out}_i = \frac{\text{self}_i}{\text{y}} For `y` a tensor: .. math:: \text{out}_i = \frac{\text{self}_i}{\text{y}_i} Note for :attr:`y` a tensor, the shapes of :attr:`self` and :attr:`y` must be `broadcastable`_. .. _broadcastable: https://pytorch.org/docs/stable/notes/broadcasting.html#broadcasting-semantics""" # noqa: B950 result = self.clone() if isinstance(y, CrypTensor): result.share = torch.broadcast_tensors(result.share, y.share)[0].clone() elif is_tensor(y): result.share = torch.broadcast_tensors(result.share, y)[0].clone() return result.div_(y)
Example #5
Source File: test_cuda.py From CrypTen with MIT License | 6 votes |
def test_torch_broadcast_tensor(self): """Test torch.broadcast_tensor on CUDALongTensor""" x = get_random_test_tensor(size=(1, 5), is_float=False) y = get_random_test_tensor(size=(5, 1), is_float=False) x_cuda = CUDALongTensor(x) y_cuda = CUDALongTensor(y) a, b = torch.broadcast_tensors(x, y) a_cuda, b_cuda = torch.broadcast_tensors(x_cuda, y_cuda) self.assertTrue( type(a_cuda) == CUDALongTensor, "result should be a CUDALongTensor" ) self.assertTrue( type(b_cuda) == CUDALongTensor, "result should be a CUDALongTensor" ) self._check_int( a, a_cuda.cpu(), "torch.broadcast_tensor failed for CUDALongTensor" ) self._check_int( b, b_cuda.cpu(), "torch.broadcast_tensor failed for CUDALongTensor" )
Example #6
Source File: affine.py From pyfilter with MIT License | 5 votes |
def _define_transdist(loc: torch.Tensor, scale: torch.Tensor, inc_dist: Distribution, ndim: int): loc, scale = torch.broadcast_tensors(loc, scale) shape = loc.shape[:-ndim] if ndim > 0 else loc.shape return TransformedDistribution( inc_dist.expand(shape), AffineTransform(loc, scale, event_dim=ndim) )
Example #7
Source File: birkhoff_polytope.py From geoopt with Apache License 2.0 | 5 votes |
def proj_tangent(x, u): assert x.shape[-2:] == u.shape[-2:], "Wrong shapes" x, u = torch.broadcast_tensors(x, u) x_shape = x.shape x = x.reshape(-1, x_shape[-2], x_shape[-1]) u = u.reshape(-1, x_shape[-2], x_shape[-1]) xt = x.transpose(-1, -2) batch_size, n = x.shape[0:2] I = torch.eye(n, dtype=x.dtype, device=x.device) I = I.expand_as(x) mu = x * u A = linalg.block_matrix([[I, x], [xt, I]]) B = A[:, :, 1:] z1 = mu.sum(dim=-1).unsqueeze(-1) zt1 = mu.sum(dim=-2).unsqueeze(-1) b = torch.cat([z1, zt1], dim=1,) rhs = B.transpose(1, 2) @ (b - A[:, :, 0:1]) lhs = B.transpose(1, 2) @ B zeta, _ = torch.solve(rhs, lhs) alpha = torch.cat( [torch.ones(batch_size, 1, 1, dtype=x.dtype), zeta[:, 0 : n - 1]], dim=1 ) beta = zeta[:, n - 1 : 2 * n - 1] rgrad = mu - (alpha + beta.transpose(-1, -2)) * x rgrad = rgrad.reshape(x_shape) return rgrad
Example #8
Source File: functional.py From SlowFast-Network-pytorch with MIT License | 5 votes |
def _pointwise_loss(lambd, lambd_optimized, input, target, reduction='mean'): if target.requires_grad: d = lambd(input, target) if reduction == 'none': return d return torch.mean(d) if reduction == 'mean' else torch.sum(d) else: expanded_input, expanded_target = torch.broadcast_tensors(input, target) return lambd_optimized(expanded_input, expanded_target, _Reduction.get_enum(reduction))
Example #9
Source File: mpc.py From CrypTen with MIT License | 5 votes |
def _feature_dropout(self, p=0.5, training=True, inplace=False): """Randomly zeros out entire channels in the input tensor with probability :attr:`p`. (a channel is a nD feature map, e.g., the :math:`j`-th channel of the :math:`i`-th sample in the batched input is a nD tensor :math:`\text{input}[i, j]`).""" assert self.dim() >= 2, "feature dropout requires dimension to be at least 2" assert p >= 0.0 and p <= 1.0, "dropout probability has to be between 0 and 1" if not training: if inplace: return self else: return self.clone() # take first 2 dimensions feature_dropout_size = self.size()[0:2] # create dropout tensor over the first two dimensions rand_tensor = MPCTensor.rand(feature_dropout_size, device=self.device) feature_dropout_tensor = rand_tensor > p # Broadcast to remaining dimensions for i in range(2, self.dim()): feature_dropout_tensor = feature_dropout_tensor.unsqueeze(i) feature_dropout_tensor.share, self.share = torch.broadcast_tensors( feature_dropout_tensor.share, self.share ) if inplace: result_tensor = self.mul_(feature_dropout_tensor).div_(1 - p) else: result_tensor = self.mul(feature_dropout_tensor).div_(1 - p) return result_tensor # Comparators
Example #10
Source File: arithmetic.py From CrypTen with MIT License | 5 votes |
def div(self, y): """Divide by a given tensor""" result = self.clone() if isinstance(y, CrypTensor): result.share = torch.broadcast_tensors(result.share, y.share)[0].clone() elif is_tensor(y): result.share = torch.broadcast_tensors(result.share, y)[0].clone() return result.div_(y)
Example #11
Source File: binary.py From CrypTen with MIT License | 5 votes |
def __xor__(self, y): """Bitwise XOR operator (element-wise)""" result = self.clone() if isinstance(y, BinarySharedTensor): broadcast_tensors = torch.broadcast_tensors(result.share, y.share) result.share = broadcast_tensors[0].clone() elif is_tensor(y): broadcast_tensors = torch.broadcast_tensors(result.share, y) result.share = broadcast_tensors[0].clone() return result.__ixor__(y)
Example #12
Source File: binary.py From CrypTen with MIT License | 5 votes |
def __and__(self, y): """Bitwise AND operator (element-wise)""" result = self.clone() # TODO: Remove explicit broadcasts to allow smaller beaver triples if isinstance(y, BinarySharedTensor): broadcast_tensors = torch.broadcast_tensors(result.share, y.share) result.share = broadcast_tensors[0].clone() elif is_tensor(y): broadcast_tensors = torch.broadcast_tensors(result.share, y) result.share = broadcast_tensors[0].clone() return result.__iand__(y)
Example #13
Source File: cuda_tensor.py From CrypTen with MIT License | 5 votes |
def broadcast_tensors(*tensors): tensor_list = [t.data for t in tensors] results = torch.broadcast_tensors(*tensor_list) results = [CUDALongTensor(t) for t in results] return results
Example #14
Source File: gradients.py From CrypTen with MIT License | 5 votes |
def forward(ctx, input, p=0.5, training=True, inplace=False): # inference mode: if not training: if inplace: return input else: return input.clone() # training mode: feature_dropout_size = input.size()[0:2] cryptensor_type = crypten.get_cryptensor_type(input) rand_tensor = crypten.rand( feature_dropout_size, cryptensor_type=cryptensor_type ) boolean_mask = rand_tensor > p for i in range(2, input.dim()): boolean_mask = boolean_mask.unsqueeze(i) boolean_mask.share, tensor = torch.broadcast_tensors( boolean_mask.share, input.share ) if inplace: result = input.mul_(boolean_mask).div_(1 - p) else: result = input.mul(boolean_mask).div_(1 - p) ctx.save_multiple_for_backward([boolean_mask, p]) return result
Example #15
Source File: utilities.py From Brancher with MIT License | 5 votes |
def broadcast_and_squeeze(*args): assert all([is_tensor(ar) for ar in args]), 'at least 1 object is not torch tensor' if all([np.prod(val.shape[2:]) == 1 for val in args]): args = [val.contiguous().view(size=val.shape[:2] + tuple([1, 1])) for val in args] uniformed_values = uniform_shapes(*args) broadcasted_values = torch.broadcast_tensors(*uniformed_values) return broadcasted_values
Example #16
Source File: train_variational_autoencoder_pytorch.py From variational-autoencoder with MIT License | 5 votes |
def forward(self, z, x): """Return log probability of model.""" log_p_z = self.log_p_z(self.p_z_loc, self.p_z_scale, z).sum(-1, keepdim=True) logits = self.generative_network(z) # unsqueeze sample dimension logits, x = torch.broadcast_tensors(logits, x.unsqueeze(1)) log_p_x = self.log_p_x(logits, x).sum(-1, keepdim=True) return log_p_z + log_p_x