Python torch_geometric.utils.softmax() Examples
The following are 19
code examples of torch_geometric.utils.softmax().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch_geometric.utils
, or try the search function
.
Example #1
Source File: topk_pool.py From pytorch_geometric with MIT License | 6 votes |
def forward(self, x, edge_index, edge_attr=None, batch=None, attn=None): """""" if batch is None: batch = edge_index.new_zeros(x.size(0)) attn = x if attn is None else attn attn = attn.unsqueeze(-1) if attn.dim() == 1 else attn score = (attn * self.weight).sum(dim=-1) if self.min_score is None: score = self.nonlinearity(score / self.weight.norm(p=2, dim=-1)) else: score = softmax(score, batch) perm = topk(score, self.ratio, batch, self.min_score) x = x[perm] * score[perm].view(-1, 1) x = self.multiplier * x if self.multiplier != 1 else x batch = batch[perm] edge_index, edge_attr = filter_adj(edge_index, edge_attr, perm, num_nodes=score.size(0)) return x, edge_index, edge_attr, batch, perm, score[perm]
Example #2
Source File: pyg_gnn_layer.py From GraphNAS with Apache License 2.0 | 6 votes |
def message(self, x_i, x_j, edge_index, num_nodes): if self.att_type == "const": if self.training and self.dropout > 0: x_j = F.dropout(x_j, p=self.dropout, training=True) neighbor = x_j elif self.att_type == "gcn": if self.gcn_weight is None or self.gcn_weight.size(0) != x_j.size(0): # 对于不同的图gcn_weight需要重新计算 _, norm = self.norm(edge_index, num_nodes, None) self.gcn_weight = norm neighbor = self.gcn_weight.view(-1, 1, 1) * x_j else: # Compute attention coefficients. alpha = self.apply_attention(edge_index, num_nodes, x_i, x_j) alpha = softmax(alpha, edge_index[0], num_nodes) # Sample attention coefficients stochastically. if self.training and self.dropout > 0: alpha = F.dropout(alpha, p=self.dropout, training=True) neighbor = x_j * alpha.view(-1, self.heads, 1) if self.pool_dim > 0: for layer in self.pool_layer: neighbor = layer(neighbor) return neighbor
Example #3
Source File: attention.py From pytorch_geometric with MIT License | 6 votes |
def forward(self, x, batch, size=None): """""" x = x.unsqueeze(-1) if x.dim() == 1 else x size = batch[-1].item() + 1 if size is None else size gate = self.gate_nn(x).view(-1, 1) x = self.nn(x) if self.nn is not None else x assert gate.dim() == x.dim() and gate.size(0) == x.size(0) gate = softmax(gate, batch, num_nodes=size) out = scatter_add(gate * x, batch, dim=0, dim_size=size) return out
Example #4
Source File: sag_pool.py From pytorch_geometric with MIT License | 6 votes |
def forward(self, x, edge_index, edge_attr=None, batch=None, attn=None): """""" if batch is None: batch = edge_index.new_zeros(x.size(0)) attn = x if attn is None else attn attn = attn.unsqueeze(-1) if attn.dim() == 1 else attn score = self.gnn(attn, edge_index).view(-1) if self.min_score is None: score = self.nonlinearity(score) else: score = softmax(score, batch) perm = topk(score, self.ratio, batch, self.min_score) x = x[perm] * score[perm].view(-1, 1) x = self.multiplier * x if self.multiplier != 1 else x batch = batch[perm] edge_index, edge_attr = filter_adj(edge_index, edge_attr, perm, num_nodes=score.size(0)) return x, edge_index, edge_attr, batch, perm, score[perm]
Example #5
Source File: edge_pool.py From pytorch_geometric with MIT License | 5 votes |
def compute_edge_score_softmax(raw_edge_score, edge_index, num_nodes): return softmax(raw_edge_score, edge_index[1], num_nodes=num_nodes)
Example #6
Source File: gat_conv.py From pytorch_geometric with MIT License | 5 votes |
def message(self, x_j: Tensor, alpha_j: Tensor, alpha_i: OptTensor, index: Tensor, ptr: OptTensor, size_i: Optional[int]) -> Tensor: alpha = alpha_j if alpha_i is None else alpha_j + alpha_i alpha = F.leaky_relu(alpha, self.negative_slope) alpha = softmax(alpha, index, ptr, size_i) self._alpha = alpha alpha = F.dropout(alpha, p=self.dropout, training=self.training) return x_j * alpha.unsqueeze(-1)
Example #7
Source File: agnn_conv.py From pytorch_geometric with MIT License | 5 votes |
def message(self, x_j: Tensor, x_norm_i: Tensor, x_norm_j: Tensor, index: Tensor, ptr: OptTensor, size_i: Optional[int]) -> Tensor: alpha = self.beta * (x_norm_i * x_norm_j).sum(dim=-1) alpha = softmax(alpha, index, ptr, size_i) return x_j * alpha.view(-1, 1)
Example #8
Source File: test_softmax.py From pytorch_geometric with MIT License | 5 votes |
def test_softmax(): src = torch.tensor([1., 1., 1., 1.]) index = torch.tensor([0, 0, 1, 2]) ptr = torch.tensor([0, 2, 3, 4]) out = softmax(src, index) assert out.tolist() == [0.5, 0.5, 1, 1] assert softmax(src, None, ptr).tolist() == out.tolist()
Example #9
Source File: graph_models.py From G-Bert with MIT License | 5 votes |
def message(self, x_i, x_j, edge_index, num_nodes): # Compute attention coefficients. alpha = (torch.cat([x_i, x_j], dim=-1) * self.att).sum(dim=-1) alpha = F.leaky_relu(alpha, self.negative_slope) alpha = softmax(alpha, edge_index[0], num_nodes) alpha = F.dropout(alpha, p=self.dropout) return x_j * alpha.view(-1, self.heads, 1)
Example #10
Source File: graph_star_conv_multi_rel_super_attn.py From graph_star with MIT License | 5 votes |
def message(self, x_q, x_k, x_v, edge_index_i, num_nodes): score = self.cal_att_score(x_q, x_k, self.heads) # score = F.leaky_relu(score) score = softmax(score, edge_index_i, num_nodes) # score = F.dropout(score, p=self.dropout, training=self.training) x_v = F.dropout(x_v, p=self.dropout, training=self.training) return x_v * score.view(-1, self.heads, 1)
Example #11
Source File: graph_star_conv.py From graph_star with MIT License | 5 votes |
def message(self, x_q, x_k, x_v, edge_index, num_nodes): score = self.cal_att_score(x_q, x_k, self.heads) # score = F.leaky_relu(score) score = softmax(score, edge_index[0], num_nodes) # score = F.dropout(score, p=self.dropout, training=self.training) x_v = F.dropout(x_v, p=self.dropout, training=self.training) return x_v * score.view(-1, self.heads, 1)
Example #12
Source File: graph_star_conv_multi_rel.py From graph_star with MIT License | 5 votes |
def message(self, x_q, x_k, x_v, edge_index_i, num_nodes): score = self.cal_att_score(x_q, x_k, self.heads) # score = F.leaky_relu(score) score = softmax(score, edge_index_i, num_nodes) # score = F.dropout(score, p=self.dropout, training=self.training) x_v = F.dropout(x_v, p=self.dropout, training=self.training) return x_v * score.view(-1, self.heads, 1)
Example #13
Source File: pyg_basic_operators.py From GraphNAS with Apache License 2.0 | 5 votes |
def forward(self, neighbor_vecs, self_vecs): # shape [num_nodes, num_sample, num_heads] alpha = (self_vecs * self.att_self_weight).sum(dim=-1) + (neighbor_vecs * self.att_neighbor_weight).sum(dim=-1) alpha = F.leaky_relu(alpha, negative_slope=0.2) # alpha = torch.softmax(alpha, dim=-2) # Sample attention coefficients stochastically. return alpha
Example #14
Source File: pyg_basic_operators.py From GraphNAS with Apache License 2.0 | 5 votes |
def preprocess(self, alpha, edge_index, neighbor_vecs, num_nodes): if isinstance(alpha, int): if self.training and self.dropout > 0: neighbor_vecs = F.dropout(neighbor_vecs, p=self.dropout, training=self.training) return alpha*neighbor_vecs else: alpha = softmax(alpha, edge_index[0], num_nodes) # Sample attention coefficients stochastically. if self.training and self.dropout > 0: alpha = F.dropout(alpha, p=self.dropout, training=self.training) neighbor = neighbor_vecs * alpha.view(-1, self.num_head, 1) return neighbor
Example #15
Source File: pyg_basic_operators.py From GraphNAS with Apache License 2.0 | 5 votes |
def preprocess(self, alpha, need_softmax, neighbor_vecs, num_sample): # shape [num_nodes, num_sample, num_heads] if isinstance(alpha, torch.Tensor): if need_softmax: alpha = torch.softmax(alpha, dim=-2) # Sample attention coefficients stochastically. if self.training and self.dropout > 0: alpha = F.dropout(alpha, p=self.dropout, training=self.training) out = neighbor_vecs * alpha.view(-1, num_sample, self.num_head, 1) else: out = neighbor_vecs * alpha return out
Example #16
Source File: asap.py From pytorch_geometric with MIT License | 4 votes |
def forward(self, x, edge_index, edge_weight=None, batch=None): N = x.size(0) edge_index, edge_weight = add_remaining_self_loops( edge_index, edge_weight, fill_value=1, num_nodes=N) if batch is None: batch = edge_index.new_zeros(x.size(0)) x = x.unsqueeze(-1) if x.dim() == 1 else x x_pool = x if self.GNN is not None: x_pool = self.gnn_intra_cluster(x=x, edge_index=edge_index, edge_weight=edge_weight) x_pool_j = x_pool[edge_index[0]] x_q = scatter(x_pool_j, edge_index[1], dim=0, reduce='max') x_q = self.lin(x_q)[edge_index[1]] score = self.att(torch.cat([x_q, x_pool_j], dim=-1)).view(-1) score = F.leaky_relu(score, self.negative_slope) score = softmax(score, edge_index[1], num_nodes=N) # Sample attention coefficients stochastically. score = F.dropout(score, p=self.dropout, training=self.training) v_j = x[edge_index[0]] * score.view(-1, 1) x = scatter(v_j, edge_index[1], dim=0, reduce='add') # Cluster selection. fitness = self.gnn_score(x, edge_index).sigmoid().view(-1) perm = topk(fitness, self.ratio, batch) x = x[perm] * fitness[perm].view(-1, 1) batch = batch[perm] # Graph coarsening. row, col = edge_index A = SparseTensor(row=row, col=col, value=edge_weight, sparse_sizes=(N, N)) S = SparseTensor(row=row, col=col, value=score, sparse_sizes=(N, N)) S = S[:, perm] A = S.t() @ A @ S if self.add_self_loops: A = A.fill_diag(1.) else: A = A.remove_diag() row, col, edge_weight = A.coo() edge_index = torch.stack([row, col], dim=0) return x, edge_index, edge_weight, batch, perm
Example #17
Source File: hypergraph_conv.py From pytorch_geometric with MIT License | 4 votes |
def forward(self, x, hyperedge_index, hyperedge_weight=None): r""" Args: x (Tensor): Node feature matrix :math:`\mathbf{X}` hyper_edge_index (LongTensor): Hyperedge indices from :math:`\mathbf{H}`. hyperedge_weight (Tensor, optional): Sparse hyperedge weights from :math:`\mathbf{W}`. (default: :obj:`None`) """ x = torch.matmul(x, self.weight) alpha = None if self.use_attention: x = x.view(-1, self.heads, self.out_channels) x_i, x_j = x[hyperedge_index[0]], x[hyperedge_index[1]] alpha = (torch.cat([x_i, x_j], dim=-1) * self.att).sum(dim=-1) alpha = F.leaky_relu(alpha, self.negative_slope) alpha = softmax(alpha, hyperedge_index[0], num_nodes=x.size(0)) alpha = F.dropout(alpha, p=self.dropout, training=self.training) if hyperedge_weight is None: D = degree(hyperedge_index[0], x.size(0), x.dtype) else: D = scatter_add(hyperedge_weight[hyperedge_index[1]], hyperedge_index[0], dim=0, dim_size=x.size(0)) D = 1.0 / D D[D == float("inf")] = 0 if hyperedge_index.numel() == 0: num_edges = 0 else: num_edges = hyperedge_index[1].max().item() + 1 B = 1.0 / degree(hyperedge_index[1], num_edges, x.dtype) B[B == float("inf")] = 0 if hyperedge_weight is not None: B = B * hyperedge_weight self.flow = 'source_to_target' out = self.propagate(hyperedge_index, x=x, norm=B, alpha=alpha) self.flow = 'target_to_source' out = self.propagate(hyperedge_index, x=out, norm=D, alpha=alpha) if self.concat is True: out = out.view(-1, self.heads * self.out_channels) else: out = out.mean(dim=1) if self.bias is not None: out = out + self.bias return out
Example #18
Source File: star_attn.py From graph_star with MIT License | 4 votes |
def forward(self, stars, nodes, batch): dtype, device = batch.dtype, batch.device num_star = stars.size(1) b = num_star * batch + len(nodes) edge_index = batch.new_empty((2, 0)) col = torch.arange(start=0, end=len(nodes), dtype=dtype, device=device) # add star to node edge for i in range(num_star): row = b + i edge_index = torch.cat([edge_index, torch.stack([row, col], dim=0)], dim=1) # add star self loop if self.use_star: star_row = torch.arange(start=len(nodes), end=len(nodes) + len(stars), dtype=dtype, device=device) edge_index = torch.cat([edge_index, torch.stack([star_row, star_row], dim=0)], dim=1) # TODO add cross star! edge_index_i = edge_index[0] edge_index_j = edge_index[1] x = torch.cat([nodes, stars.view(-1, self.in_channels)], dim=0) xq = self.Wq(x).view(-1, self.heads, self.size_pre_head) xk = self.Wk(x).view(-1, self.heads, self.size_pre_head) xv = self.Wv(x).view(-1, self.heads, self.size_pre_head) xq = torch.index_select(xq, 0, edge_index_i) xk = torch.index_select(xk, 0, edge_index_j) xv = torch.index_select(xv, 0, edge_index_j) score = self.cal_att_score(xq, xk, self.heads) coef = softmax(score, edge_index_i, len(x)) # TODO add tensorboard # [:-num_star] is star to node # [-num_star:] is star self loop coef = F.dropout(coef, p=self.coef_dropout, training=self.training) xv = F.dropout(xv, p=self.dropout, training=self.training) out = xv * coef.view(-1, self.heads, 1) out = scatter_("add", out, edge_index_i)[len(nodes):] new_stars = out.view(-1, num_star, self.out_channels) if self.activation is not None: new_stars = self.activation(new_stars) if self.residual: new_stars = new_stars + stars if self.layer_norm: new_stars = self.sLayerNorm(new_stars) return new_stars
Example #19
Source File: asap_pool.py From ASAP with Apache License 2.0 | 4 votes |
def forward(self, x, edge_index, edge_weight=None, batch=None): if batch is None: batch = edge_index.new_zeros(x.size(0)) # NxF x = x.unsqueeze(-1) if x.dim() == 1 else x # Add Self Loops fill_value = 1 num_nodes = scatter_add(batch.new_ones(x.size(0)), batch, dim=0) edge_index, edge_weight = add_remaining_self_loops(edge_index=edge_index, edge_weight=edge_weight, fill_value=fill_value, num_nodes=num_nodes.sum()) N = x.size(0) # total num of nodes in batch # ExF x_pool = self.gnn_intra_cluster(x=x, edge_index=edge_index, edge_weight=edge_weight) x_pool_j = x_pool[edge_index[1]] x_j = x[edge_index[1]] #---Master query formation--- # NxF X_q, _ = scatter_max(x_pool_j, edge_index[0], dim=0) # NxF M_q = self.lin_q(X_q) # ExF M_q = M_q[edge_index[0].tolist()] score = self.gat_att(torch.cat((M_q, x_pool_j), dim=-1)) score = F.leaky_relu(score, self.negative_slope) score = softmax(score, edge_index[0], num_nodes=num_nodes.sum()) # Sample attention coefficients stochastically. score = F.dropout(score, p=self.dropout_att, training=self.training) # ExF v_j = x_j * score.view(-1, 1) #---Aggregation--- # NxF out = scatter_add(v_j, edge_index[0], dim=0) #---Cluster Selection # Nx1 fitness = torch.sigmoid(self.gnn_score(x=out, edge_index=edge_index)).view(-1) perm = topk(x=fitness, ratio=self.ratio, batch=batch) x = out[perm] * fitness[perm].view(-1, 1) #---Maintaining Graph Connectivity batch = batch[perm] edge_index, edge_weight = graph_connectivity( device = x.device, perm=perm, edge_index=edge_index, edge_weight=edge_weight, score=score, ratio=self.ratio, batch=batch, N=N) return x, edge_index, edge_weight, batch, perm