Python torch.neg() Examples
The following are 13
code examples of torch.neg().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch
, or try the search function
.
Example #1
Source File: Patient2Vec.py From Patient2Vec with MIT License | 6 votes |
def get_loss(pred, y, criterion, mtr, a=0.5): """ To calculate loss :param pred: predicted value :param y: actual value :param criterion: nn.CrossEntropyLoss :param mtr: beta matrix """ mtr_t = torch.transpose(mtr, 1, 2) aa = torch.bmm(mtr, mtr_t) loss_fn = 0 for i in range(aa.size()[0]): aai = torch.add(aa[i, ], Variable(torch.neg(torch.eye(mtr.size()[1])))) loss_fn += torch.trace(torch.mul(aai, aai).data) loss_fn /= aa.size()[0] loss = torch.add(criterion(pred, y), Variable(torch.FloatTensor([loss_fn * a]))) return loss
Example #2
Source File: layers.py From Match-LSTM with MIT License | 6 votes |
def forward(self, Hr, Hr_mask, h_0=None): Hr = self.dropout.forward(Hr) left_beta, _ = self.left_ptr_rnn.forward(Hr, Hr_mask, h_0) rtn_beta = left_beta if self.bidirectional: right_beta_inv, _ = self.right_ptr_rnn.forward(Hr, Hr_mask, h_0) right_beta = right_beta_inv[[1, 0], :] rtn_beta = (left_beta + right_beta) / 2 # todo: unexplainable new_mask = torch.neg((Hr_mask - 1) * 1e-6) # mask replace zeros with 1e-6, make sure no gradient explosion rtn_beta = rtn_beta + new_mask.unsqueeze(0) return rtn_beta
Example #3
Source File: net.py From PMFNet with MIT License | 6 votes |
def weighted_binary_cross_entropy_interaction(output, target, weights=None): ''' weights: (A, 2), 0 for negative 1 for positive output: (N, A) target: (N, A) A is action number ''' output = F.sigmoid(output) if weights is not None: assert len(weights.shape) == 2 loss = weights[:, 1].unsqueeze(dim=0) * (target * torch.log(output+1e-8)) + \ weights[:, 0].unsqueeze(dim=0) * ((1 - target) * torch.log(1 - output+1e-8)) else: loss = target * torch.log(output+1e-8) + (1 - target) * torch.log(1 - output+1e-8) return torch.neg(torch.mean(loss))
Example #4
Source File: BASS.py From BASS with MIT License | 6 votes |
def E_Step(X, logdet, c1_temp, pi_temp, SigmaXY, X_C_SIGMA, sum, c_idx, c_idx_9, c_idx_25, distances2, r_ik_5, neig, sumP, X_C, X_C_SIGMA_buf): """ Computes the distances of the Data points for each centroid and normalize it, """ torch.add(X.unsqueeze(1), torch.neg(c1_temp.reshape(-1, Global.neig_num, Global.D_)),out=X_C) torch.mul(X_C[:, :, 0].unsqueeze(2), SigmaXY[:, :, 0:2],out=X_C_SIGMA_buf) torch.addcmul(X_C_SIGMA_buf,1,X_C[:,:,1].unsqueeze(2),SigmaXY[:,:,2:4],out=X_C_SIGMA[:,:,0:2]) X_C_SIGMA[:, :, 2:] = torch.mul(X_C[:, :, 2:], Global.SIGMA_INT) torch.mul(-X_C.view(-1, Global.neig_num,Global.D_),X_C_SIGMA.view(-1,Global.neig_num,Global.D_),out=distances2) distances2=distances2.view(-1,Global.neig_num,Global.D_) torch.sum(distances2,2,out=r_ik_5) r_ik_5.add_(torch.neg(logdet.reshape(-1, Global.neig_num))) r_ik_5.add_(torch.log(pi_temp.reshape(-1, Global.neig_num))) c_neig = c_idx_25.reshape(-1, Global.potts_area).float() torch.add(c_neig.unsqueeze(1), -c_idx.reshape(-1, Global.neig_num).unsqueeze(2).float(),out=neig) torch.sum((neig!=0).float(),2,out=sumP) r_ik_5.add_(-(Global.Beta_P*sumP)) (my_help.softmaxTF(r_ik_5, 1,sum))
Example #5
Source File: main.py From dgl with Apache License 2.0 | 5 votes |
def forward(self, input, embs, label): n = input.shape[0] log_target = torch.log( torch.sigmoid(torch.sum(torch.mul(embs, self.weights[label]), 1)) ) negs = torch.multinomial( self.sample_weights, self.num_sampled * n, replacement=True ).view(n, self.num_sampled) noise = torch.neg(self.weights[negs]) sum_log_sampled = torch.sum( torch.log(torch.sigmoid(torch.bmm(noise, embs.unsqueeze(2)))), 1 ).squeeze() loss = log_target + sum_log_sampled return -loss.sum() / n
Example #6
Source File: projection.py From pykg2vec with MIT License | 5 votes |
def forward(self, e, r, er_e2, direction="tail"): emb_hr_e = self.ent_embeddings(e) # [m, k] emb_hr_r = self.rel_embeddings(r) # [m, k] if direction == "tail": ere2_sigmoid = self.g(torch.dropout(self.f1(emb_hr_e, emb_hr_r), p=self.hidden_dropout, train=True), self.ent_embeddings.weight) else: ere2_sigmoid = self.g(torch.dropout(self.f2(emb_hr_e, emb_hr_r), p=self.hidden_dropout, train=True), self.ent_embeddings.weight) ere2_loss_left = -torch.sum((torch.log(torch.clamp(ere2_sigmoid, 1e-10, 1.0)) * torch.max(torch.FloatTensor([0]).to(self.device), er_e2))) ere2_loss_right = -torch.sum((torch.log(torch.clamp(1 - ere2_sigmoid, 1e-10, 1.0)) * torch.max(torch.FloatTensor([0]).to(self.device), torch.neg(er_e2)))) hrt_loss = ere2_loss_left + ere2_loss_right return hrt_loss
Example #7
Source File: layers.py From coqa-baselines with MIT License | 5 votes |
def multi_nll_loss(scores, target_mask): """ Select actions with sampling at train-time, argmax at test-time: """ scores = scores.exp() loss = 0 for i in range(scores.size(0)): loss += torch.neg(torch.log(torch.masked_select(scores[i], target_mask[i]).sum() / scores[i].sum())) return loss
Example #8
Source File: main_pytorch.py From GATNE with MIT License | 5 votes |
def forward(self, input, embs, label): n = input.shape[0] log_target = torch.log( torch.sigmoid(torch.sum(torch.mul(embs, self.weights[label]), 1)) ) negs = torch.multinomial( self.sample_weights, self.num_sampled * n, replacement=True ).view(n, self.num_sampled) noise = torch.neg(self.weights[negs]) sum_log_sampled = torch.sum( torch.log(torch.sigmoid(torch.bmm(noise, embs.unsqueeze(2)))), 1 ).squeeze() loss = log_target + sum_log_sampled return -loss.sum() / n
Example #9
Source File: loss_functions.py From cc with MIT License | 5 votes |
def weighted_binary_cross_entropy(output, target, weights=None): if weights is not None: assert len(weights) == 2 loss = weights[1] * (target * torch.log(output + epsilon)) + \ weights[0] * ((1 - target) * torch.log(1 - output + epsilon)) else: loss = target * torch.log(output + epsilon) + (1 - target) * torch.log(1 - output + epsilon) return torch.neg(torch.mean(loss))
Example #10
Source File: layers.py From Match-LSTM with MIT License | 5 votes |
def forward(self, Hr, Hr_mask, h_0=None): Hr = self.dropout.forward(Hr) beta_last = None for i in range(self.num_hops): beta, h_0 = self.ptr_rnn.forward(Hr, Hr_mask, h_0) if beta_last is not None and (beta_last == beta).sum().item() == beta.shape[0]: # beta not changed break beta_last = beta new_mask = torch.neg((Hr_mask - 1) * 1e-6) # mask replace zeros with 1e-6, make sure no gradient explosion rtn_beta = beta + new_mask.unsqueeze(0) return rtn_beta
Example #11
Source File: training.py From peakonly with MIT License | 5 votes |
def __call__(self, output, target): if self.weights is not None: assert len(self.weights) == 2 loss = self.weights[1] * (target * self.logsigmoid(output)) + \ self.weights[0] * ((1 - target) * self.logsigmoid(-output)) else: loss = target * self.logsigmoid(output) + (1 - target) * self.logsigmoid(-output) return torch.neg(torch.mean(loss))
Example #12
Source File: gatne.py From cogdl with MIT License | 5 votes |
def forward(self, input, embs, label): n = input.shape[0] log_target = torch.log( torch.sigmoid(torch.sum(torch.mul(embs, self.weights[label]), 1)) ) negs = torch.multinomial( self.sample_weights, self.num_sampled * n, replacement=True ).view(n, self.num_sampled) noise = torch.neg(self.weights[negs]) sum_log_sampled = torch.sum( torch.log(torch.sigmoid(torch.bmm(noise, embs.unsqueeze(2)))), 1 ).squeeze() loss = log_target + sum_log_sampled return -loss.sum() / n
Example #13
Source File: unary.py From torch2trt with MIT License | 5 votes |
def aten_neg(inputs, attributes, scope): inp = inputs[0] ctx = current_context() net = ctx.network if ctx.is_tensorrt and has_trt_tensor(inputs): layer = net.add_unary(inp, trt.UnaryOperation.NEG) output = layer.get_output(0) output.name = scope layer.name = scope return [output] elif ctx.is_tvm and has_tvm_tensor(inputs): return [_op.negative(inp)] return [torch.neg(inp)]