Python torch.Variable() Examples
The following are 30
code examples of torch.Variable().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch
, or try the search function
.
Example #1
Source File: ctgan.py From SDGym with MIT License | 6 votes |
def calc_gradient_penalty(netD, real_data, fake_data, device='cpu', pac=10, lambda_=10): alpha = torch.rand(real_data.size(0) // pac, 1, 1, device=device) alpha = alpha.repeat(1, pac, real_data.size(1)) alpha = alpha.view(-1, real_data.size(1)) interpolates = alpha * real_data + ((1 - alpha) * fake_data) # interpolates = torch.Variable(interpolates, requires_grad=True, device=device) disc_interpolates = netD(interpolates) gradients = torch.autograd.grad( outputs=disc_interpolates, inputs=interpolates, grad_outputs=torch.ones(disc_interpolates.size(), device=device), create_graph=True, retain_graph=True, only_inputs=True)[0] gradient_penalty = ( (gradients.view(-1, pac * real_data.size(1)).norm(2, dim=1) - 1) ** 2).mean() * lambda_ return gradient_penalty
Example #2
Source File: decoder_rnn.py From neural-motifs with MIT License | 6 votes |
def get_dropout_mask(dropout_probability: float, tensor_for_masking: torch.autograd.Variable): """ Computes and returns an element-wise dropout mask for a given tensor, where each element in the mask is dropped out with probability dropout_probability. Note that the mask is NOT applied to the tensor - the tensor is passed to retain the correct CUDA tensor type for the mask. Parameters ---------- dropout_probability : float, required. Probability of dropping a dimension of the input. tensor_for_masking : torch.Variable, required. Returns ------- A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability). This scaling ensures expected values and variances of the output of applying this mask and the original tensor are the same. """ binary_mask = tensor_for_masking.clone() binary_mask.data.copy_(torch.rand(tensor_for_masking.size()) > dropout_probability) # Scale mask by 1/keep_prob to preserve output statistics. dropout_mask = binary_mask.float().div(1.0 - dropout_probability) return dropout_mask
Example #3
Source File: decoder_rnn.py From VCTree-Scene-Graph-Generation with MIT License | 6 votes |
def get_dropout_mask(dropout_probability: float, tensor_for_masking: torch.autograd.Variable): """ Computes and returns an element-wise dropout mask for a given tensor, where each element in the mask is dropped out with probability dropout_probability. Note that the mask is NOT applied to the tensor - the tensor is passed to retain the correct CUDA tensor type for the mask. Parameters ---------- dropout_probability : float, required. Probability of dropping a dimension of the input. tensor_for_masking : torch.Variable, required. Returns ------- A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability). This scaling ensures expected values and variances of the output of applying this mask and the original tensor are the same. """ binary_mask = tensor_for_masking.clone() binary_mask.data.copy_(torch.rand(tensor_for_masking.size()) > dropout_probability) # Scale mask by 1/keep_prob to preserve output statistics. dropout_mask = binary_mask.float().div(1.0 - dropout_probability) return dropout_mask
Example #4
Source File: train.py From Structured-Self-Attention with MIT License | 6 votes |
def get_activation_wts(attention_model,x): """ Get r attention heads Args: attention_model : {object} model x : {torch.Variable} input whose weights we want Returns: r different attention weights """ attention_model.batch_size = x.size(0) attention_model.hidden_state = attention_model.init_hidden() _,wts = attention_model(x) return wts
Example #5
Source File: train.py From crnn-pytorch with MIT License | 6 votes |
def train(net, criterion, optimizer, train_iter): for p in crnn.parameters(): p.requires_grad = True crnn.train() data = train_iter.next() cpu_images, cpu_texts = data batch_size = cpu_images.size(0) utils.loadData(image, cpu_images) t, l = converter.encode(cpu_texts) utils.loadData(text, t) utils.loadData(length, l) optimizer.zero_grad() preds = crnn(image) preds_size = Variable(torch.LongTensor([preds.size(0)] * batch_size)) cost = criterion(preds, text, preds_size, length) / batch_size # crnn.zero_grad() cost.backward() optimizer.step() return cost
Example #6
Source File: tree_lstm.py From VCTree-Scene-Graph-Generation with MIT License | 6 votes |
def forward(self, forest, features, num_obj): # calc dropout mask, same for all if self.dropout > 0.0: dropout_mask = get_dropout_mask(self.dropout, self.out_dim) else: dropout_mask = None # tree lstm input out_h = None h_order = Variable(torch.LongTensor(num_obj).zero_().cuda()) # used to resume order order_idx = 0 lstm_io = tree_utils.TreeLSTM_IO(out_h, h_order, order_idx, None, None, dropout_mask) # run tree lstm forward (leaves to root) for idx in range(len(forest)): self.treeLSTM(forest[idx], features, lstm_io) # resume order to the same as input output = torch.index_select(lstm_io.hidden, 0, lstm_io.order.long()) return output
Example #7
Source File: decoder_tree_lstm.py From VCTree-Scene-Graph-Generation with MIT License | 6 votes |
def get_dropout_mask(dropout_probability: float, h_dim: int): """ Computes and returns an element-wise dropout mask for a given tensor, where each element in the mask is dropped out with probability dropout_probability. Note that the mask is NOT applied to the tensor - the tensor is passed to retain the correct CUDA tensor type for the mask. Parameters ---------- dropout_probability : float, required. Probability of dropping a dimension of the input. tensor_for_masking : torch.Variable, required. Returns ------- A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability). This scaling ensures expected values and variances of the output of applying this mask and the original tensor are the same. """ binary_mask = Variable(torch.FloatTensor(h_dim).cuda().fill_(0.0)) binary_mask.data.copy_(torch.rand(h_dim) > dropout_probability) # Scale mask by 1/keep_prob to preserve output statistics. dropout_mask = binary_mask.float().div(1.0 - dropout_probability) return dropout_mask
Example #8
Source File: network.py From PyTorchWavelets with MIT License | 6 votes |
def forward(self, x): """ Takes a batch of signals and convoles each signal with all elements in the filter bank. After convoling the entire filter bank, the method returns a tensor of shape [N,N_scales,1/2,T] where the 1/2 number of channels depends on whether the filter bank is composed of real or complex filters. If the filters are complex the 2 channels represent [real, imag] parts. :param x: torch.Variable, batch of input signals of shape [N,1,T] :return: torch.Variable, batch of outputs of size [N,N_scales,1/2,T] """ if not self._filters: raise ValueError('PyTorch filters not initialized. Please call set_filters() first.') return None results = [None]*len(self._filters) for ind, conv in enumerate(self._filters): results[ind] = conv(x) results = torch.stack(results) # [n_scales,n_batch,2,t] results = results.permute(1,0,2,3) # [n_batch,n_scales,2,t] return results
Example #9
Source File: model.py From instance-segmentation-pytorch with GNU General Public License v3.0 | 5 votes |
def add(self, v): if isinstance(v, Variable): count = v.data.numel() v = v.data.sum() elif isinstance(v, torch.Tensor): count = v.numel() v = v.sum() self.n_count += count self.sum += v
Example #10
Source File: function.py From StyleGAN2_PyTorch with MIT License | 5 votes |
def asImg(tensor, size = None): """ This function provides fast approach to transfer the image into numpy.ndarray This function only accept the output from sigmoid layer or hyperbolic tangent output Arg: tensor - The torch.Variable object, the rank format is BCHW or BHW size - The tuple object, and the format is (height, width) Ret: The numpy image, the rank format is BHWC """ global channel_op result = tensor.detach() # 1. Judge the rank first if len(tensor.size()) == 3: result = torch.stack([result, result, result], 1) # 2. Judge the range of tensor (sigmoid output or hyperbolic tangent output) min_v = torch.min(result).cpu().data.numpy() max_v = torch.max(result).cpu().data.numpy() if max_v > 1.0 or min_v < -1.0: raise Exception('tensor value out of range...\t range is [' + str(min_v) + ' ~ ' + str(max_v)) if min_v < 0: result = (result + 1) / 2 # 3. Define the BCHW -> BHWC operation if channel_op is None: channel_op = Transpose(BCHW2BHWC) # 3. Rest result = channel_op(result) result = result.cpu().data.numpy() if size is not None: result_list = [] for img in result: result_list.append(transform.resize(img, (size[0], size[1]), mode = 'constant', order = 0) * 255) result = np.stack(result_list, axis = 0) else: result *= 255. result = result.astype(np.uint8) return result
Example #11
Source File: train.py From Structured-Self-Attention with MIT License | 5 votes |
def evaluate(attention_model,x_test,y_test): """ cv results Args: attention_model : {object} model x_test : {nplist} x_test y_test : {nplist} y_test Returns: cv-accuracy """ attention_model.batch_size = x_test.shape[0] attention_model.hidden_state = attention_model.init_hidden() x_test_var = Variable(torch.from_numpy(x_test).type(torch.LongTensor)) y_test_pred,_ = attention_model(x_test_var) if bool(attention_model.type): y_preds = torch.max(y_test_pred,1)[1] y_test_var = Variable(torch.from_numpy(y_test).type(torch.LongTensor)) else: y_preds = torch.round(y_test_pred.type(torch.DoubleTensor).squeeze(1)) y_test_var = Variable(torch.from_numpy(y_test).type(torch.DoubleTensor)) return torch.eq(y_preds,y_test_var).data.sum()/x_test_var.size(0)
Example #12
Source File: model.py From reseg-pytorch with GNU General Public License v3.0 | 5 votes |
def __define_variable(self, tensor, volatile=False): return Variable(tensor, volatile=volatile)
Example #13
Source File: model.py From reseg-pytorch with GNU General Public License v3.0 | 5 votes |
def add(self, v): if isinstance(v, Variable): count = v.data.numel() v = v.data.sum() elif isinstance(v, torch.Tensor): count = v.numel() v = v.sum() self.n_count += count self.sum += v
Example #14
Source File: utils.py From crnn.pytorch with MIT License | 5 votes |
def add(self, v): if isinstance(v, Variable): count = v.data.numel() v = v.data.sum() elif isinstance(v, torch.Tensor): count = v.numel() v = v.sum() self.n_count += count self.sum += v
Example #15
Source File: copy_task.py From Quaternion-Recurrent-Neural-Networks with GNU General Public License v3.0 | 5 votes |
def tovar(x): return Variable(torch.FloatTensor(x).cuda())
Example #16
Source File: utils.py From crnn with MIT License | 5 votes |
def add(self, v): if isinstance(v, Variable): count = v.data.numel() v = v.data.sum() elif isinstance(v, torch.Tensor): count = v.numel() v = v.sum() self.n_count += count self.sum += v
Example #17
Source File: utils.py From ctpn-crnn with MIT License | 5 votes |
def add(self, v): if isinstance(v, Variable): count = v.data.numel() v = v.data.sum() elif isinstance(v, torch.Tensor): count = v.numel() v = v.sum() self.n_count += count self.sum += v
Example #18
Source File: utils.py From basicOCR with GNU General Public License v3.0 | 5 votes |
def add(self, v): if isinstance(v, Variable): count = v.data.numel() v = v.data.sum() elif isinstance(v, torch.Tensor): count = v.numel() v = v.sum() self.n_count += count self.sum += v
Example #19
Source File: model.py From deep-forecast-pytorch with GNU General Public License v3.0 | 5 votes |
def __define_variable(self, tensor, volatile=False): return Variable(tensor, volatile=volatile)
Example #20
Source File: model.py From deep-forecast-pytorch with GNU General Public License v3.0 | 5 votes |
def add(self, v): if isinstance(v, Variable): count = v.data.numel() v = v.data.sum() elif isinstance(v, torch.Tensor): count = v.numel() v = v.sum() self.n_count += count self.sum += v
Example #21
Source File: _torch_losses.py From solaris with Apache License 2.0 | 5 votes |
def torch_lovasz_hinge(logits, labels, per_image=False, ignore=None): """Lovasz Hinge Loss. Implementation edited from Maxim Berman's GitHub. References ---------- https://github.com/bermanmaxim/LovaszSoftmax/ https://arxiv.org/abs/1705.08790 Arguments --------- logits: :class:`torch.Variable` logits at each pixel (between -inf and +inf) labels: :class:`torch.Tensor` binary ground truth masks (0 or 1) per_image: bool, optional compute the loss per image instead of per batch. Defaults to ``False``. ignore: optional void class id. Returns ------- loss : :class:`torch.Variable` Lovasz loss value for the input logits and labels. Compatible with ``loss.backward()`` as its a :class:`torch.Variable` . """ # TODO: Restructure into a class like TorchFocalLoss for compatibility if per_image: loss = mean( lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore)) for log, lab in zip(logits, labels)) else: loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore)) return loss
Example #22
Source File: _torch_losses.py From solaris with Apache License 2.0 | 5 votes |
def lovasz_hinge_flat(logits, labels): """Binary Lovasz hinge loss. Arguments --------- logits: :class:`torch.Variable` Logits at each prediction (between -inf and +inf) labels: :class:`torch.Tensor` binary ground truth labels (0 or 1) Returns ------- loss : :class:`torch.Variable` Lovasz loss value for the input logits and labels. """ if len(labels) == 0: # only void pixels, the gradients should be 0 return logits.sum() * 0. signs = 2. * labels.float() - 1. errors = (1. - logits * Variable(signs)) errors_sorted, perm = torch.sort(errors, dim=0, descending=True) perm = perm.data gt_sorted = labels[perm] grad = lovasz_grad(gt_sorted) loss = torch.dot(F.relu(errors_sorted), Variable(grad)) return loss
Example #23
Source File: _torch_losses.py From solaris with Apache License 2.0 | 5 votes |
def binary_xloss(logits, labels, ignore=None): """ Binary Cross entropy loss logits: [B, H, W] Variable, logits at each pixel (between -inf and +inf) labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) ignore: void class id """ logits, labels = flatten_binary_scores(logits, labels, ignore) loss = TorchStableBCELoss()(logits, Variable(labels.float())) return loss
Example #24
Source File: copy_task.py From Pytorch-Quaternion-Neural-Networks with GNU General Public License v3.0 | 5 votes |
def tovar(x, cuda): if cuda: return Variable(torch.FloatTensor(x).cuda()) else: return Variable(torch.FloatTensor(x.astype(np.float64)))
Example #25
Source File: utils.py From ICDAR-2019-SROIE with MIT License | 5 votes |
def add(self, v): if isinstance(v, Variable): count = v.data.numel() v = v.data.sum() elif isinstance(v, torch.Tensor): count = v.numel() v = v.sum() self.n_count += count self.sum += v
Example #26
Source File: word_prediction_criterion.py From translate with BSD 3-Clause "New" or "Revised" License | 5 votes |
def predictor_loss_function(self, prediction, target, *args, **kwargs): """Pure abstract method that computes the loss. Args: prediction: Prediction that was made by the model of shape [BATCH_SIZE, N_LABELS] target: Expected result of shape [BATCH_SIZE, N_OUTPUT_TOKENS] Returns: loss: This method should return the loss as a Tensor or Variable. """ return torch.Tensor(float("Inf"))
Example #27
Source File: word_prediction_criterion.py From translate with BSD 3-Clause "New" or "Revised" License | 5 votes |
def predictor_loss_function(self, prediction, target): """Loss function that maximizes the confidence of the true positive. Args: prediction: Prediction that was made by the model of shape [BATCH_SIZE, N_LABELS] target: Expected result of shape [BATCH_SIZE, N_OUTPUT_TOKENS] Returns: loss: Loss as a torch.Variable """ return -prediction.gather(dim=-1, index=target)
Example #28
Source File: utils.py From crnn-pytorch with MIT License | 5 votes |
def add(self, v): if isinstance(v, Variable): count = v.data.numel() v = v.data.sum() elif isinstance(v, torch.Tensor): count = v.numel() v = v.sum() self.n_count += count self.sum += v
Example #29
Source File: PytorchA.py From PytorchToCaffe with MIT License | 5 votes |
def analyse(net, inputs): """ analyse the network given input :param net: torch.nn.Module :param inputs: torch.Variable, torch.Tensor or list of them :return: blob_dict, tracked_layers """ del tracked_layers[:] del blob_dict[:] if inputs is not list: raw_inputs=[inputs] _inputs=[] for name,layer in net.named_modules(): layer_name_dict[layer]=name for i in raw_inputs: if isinstance(i,Variable): _inputs.append(i) elif isinstance(i,torch.Tensor): _inputs.append(Variable(i)) elif isinstance(i,np.ndarray): _inputs.append(Variable(torch.Tensor(i))) else: raise NotImplementedError("Not Support the input type {}".format(type(i))) net.apply(register) net.forward(*_inputs) return blob_dict,tracked_layers
Example #30
Source File: copy_task.py From Pytorch-Quaternion-Neural-Networks with GNU General Public License v3.0 | 5 votes |
def tovar(x, cuda): if cuda: return Variable(torch.FloatTensor(x).cuda()) else: return Variable(torch.FloatTensor(x.astype(np.float64)))