Python torch_geometric.nn.GATConv() Examples

The following are 13 code examples of torch_geometric.nn.GATConv(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch_geometric.nn , or try the search function .
Example #1
Source File: ogbn_products_gat.py    From pytorch_geometric with MIT License 7 votes vote down vote up
def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
                 heads):
        super(GAT, self).__init__()

        self.num_layers = num_layers

        self.convs = torch.nn.ModuleList()
        self.convs.append(GATConv(dataset.num_features, hidden_channels,
                                  heads))
        for _ in range(num_layers - 2):
            self.convs.append(
                GATConv(heads * hidden_channels, hidden_channels, heads))
        self.convs.append(
            GATConv(heads * hidden_channels, out_channels, heads,
                    concat=False))

        self.skips = torch.nn.ModuleList()
        self.skips.append(Lin(dataset.num_features, hidden_channels * heads))
        for _ in range(num_layers - 2):
            self.skips.append(
                Lin(hidden_channels * heads, hidden_channels * heads))
        self.skips.append(Lin(hidden_channels * heads, out_channels)) 
Example #2
Source File: MTGAT.py    From MultiTurnDialogZoo with MIT License 6 votes vote down vote up
def __init__(self, inpt_size, output_size, user_embed_size, 
                 posemb_size, dropout=0.5, threshold=2, head=5):
        # inpt_size: utter_hidden_size + user_embed_size
        super(GATContext, self).__init__()
        # utter + user_embed + pos_embed
        size = inpt_size + user_embed_size + posemb_size
        self.threshold = threshold
        
        # GraphConv
        self.conv1 = GATConv(size, inpt_size, heads=head, 
                             dropout=dropout)
        self.conv2 = GATConv(size, inpt_size, heads=head,
                             dropout=dropout)
        self.conv3 = GATConv(size, inpt_size, heads=head,
                             dropout=dropout)
        self.layer_norm1 = nn.LayerNorm(inpt_size)
        self.layer_norm2 = nn.LayerNorm(inpt_size)
        self.layer_norm3 = nn.LayerNorm(inpt_size)
        self.layer_norm4 = nn.LayerNorm(inpt_size)
        self.compress = nn.Linear(head * inpt_size, inpt_size)

        # rnn for background
        self.rnn = nn.GRU(inpt_size + user_embed_size, inpt_size, bidirectional=True)

        self.linear1 = nn.Linear(inpt_size * 2, inpt_size)
        self.linear2 = nn.Linear(inpt_size, output_size)
        self.drop = nn.Dropout(p=dropout)
        
        # 100 is far bigger than the max turn lengths (cornell and dailydialog datasets)
        self.posemb = nn.Embedding(100, posemb_size)
        self.init_weight() 
Example #3
Source File: gat.py    From pytorch_geometric with MIT License 5 votes vote down vote up
def __init__(self, in_channels, out_channels):
        super(GAT, self).__init__()
        self.conv1 = GATConv(in_channels, 8, heads=8, dropout=0.6)
        self.conv2 = GATConv(8 * 8, out_channels, dropout=0.6) 
Example #4
Source File: gat.py    From pytorch_geometric with MIT License 5 votes vote down vote up
def __init__(self, dataset):
        super(Net, self).__init__()
        self.conv1 = GATConv(
            dataset.num_features,
            args.hidden,
            heads=args.heads,
            dropout=args.dropout)
        self.conv2 = GATConv(
            args.hidden * args.heads,
            dataset.num_classes,
            heads=args.output_heads,
            concat=False,
            dropout=args.dropout) 
Example #5
Source File: gat.py    From pytorch_geometric with MIT License 5 votes vote down vote up
def __init__(self):
        super(Net, self).__init__()
        self.conv1 = GATConv(dataset.num_features, 8, heads=8,
                             dropout=0.6).jittable()

        self.conv2 = GATConv(64, dataset.num_classes, heads=1, concat=True,
                             dropout=0.6).jittable() 
Example #6
Source File: gat.py    From pytorch_geometric with MIT License 5 votes vote down vote up
def __init__(self):
        super(Net, self).__init__()
        self.conv1 = GATConv(dataset.num_features, 8, heads=8, dropout=0.6)
        # On the Pubmed dataset, use heads=8 in conv2.
        self.conv2 = GATConv(8 * 8, dataset.num_classes, heads=1, concat=False,
                             dropout=0.6) 
Example #7
Source File: geniepath.py    From pytorch_geometric with MIT License 5 votes vote down vote up
def __init__(self, in_dim, out_dim):
        super(Breadth, self).__init__()
        self.gatconv = GATConv(in_dim, out_dim, heads=1) 
Example #8
Source File: model_geniepath.py    From GeniePath-pytorch with MIT License 5 votes vote down vote up
def __init__(self, in_dim, out_dim):
        super(Breadth, self).__init__()
        self.gatconv = GATConv(in_dim, out_dim, heads=heads) 
Example #9
Source File: ppi_gat.py    From GeniePath-pytorch with MIT License 5 votes vote down vote up
def __init__(self):
        super(Net, self).__init__()
        self.conv1 = GATConv(train_dataset.num_features, 256, heads=4)
        self.lin1 = torch.nn.Linear(train_dataset.num_features, 4 * 256)
        self.conv2 = GATConv(4 * 256, 256, heads=4)
        self.lin2 = torch.nn.Linear(4 * 256, 4 * 256)
        self.conv3 = GATConv(
            4 * 256, train_dataset.num_classes, heads=6, concat=False)
        self.lin3 = torch.nn.Linear(4 * 256, train_dataset.num_classes) 
Example #10
Source File: model_gat.py    From GeniePath-pytorch with MIT License 5 votes vote down vote up
def __init__(self, in_dim, out_dim):
        super(GAT, self).__init__()
        self.conv1 = GATConv(in_dim, 256, heads=4)
        self.lin1 = torch.nn.Linear(in_dim, 4 * 256)
        self.conv2 = GATConv(4 * 256, 256, heads=4)
        self.lin2 = torch.nn.Linear(4 * 256, 4 * 256)
        self.conv3 = GATConv(
            4 * 256, out_dim, heads=6, concat=False)
        self.lin3 = torch.nn.Linear(4 * 256, out_dim) 
Example #11
Source File: gat.py    From Alchemy with MIT License 5 votes vote down vote up
def __init__(self,
                 node_input_dim=15,
                 output_dim=12,
                 node_hidden_dim=64,
                 num_step_prop=6,
                 num_step_set2set=6):
        super(GAT, self).__init__()
        self.num_step_prop = num_step_prop
        self.lin0 = nn.Linear(node_input_dim, node_hidden_dim)
        self.conv = GATConv(node_hidden_dim, node_hidden_dim)
        
        self.set2set = Set2Set(node_hidden_dim, processing_steps=num_step_set2set)
        self.lin1 = nn.Linear(2 * node_hidden_dim, node_hidden_dim)
        self.lin2 = nn.Linear(node_hidden_dim, output_dim) 
Example #12
Source File: layers.py    From MultiTurnDialogZoo with MIT License 5 votes vote down vote up
def __init__(self, in_channels, out_channels, kernel, head=8, dropout=0.5):
            super(My_GATRNNConv, self).__init__()

            # kernel is a Gated GRUCell
            self.rnn = kernel     # [in_channel, out_channel]
            self.conv = GATConv(in_channels, in_channels, heads=head, dropout=dropout)
            self.compress = nn.Linear(in_channels * head, in_channels)
            self.in_channels = in_channels
            self.opt = nn.Linear(in_channels, out_channels) 
Example #13
Source File: han.py    From cogdl with MIT License 5 votes vote down vote up
def __init__(self, num_edge, w_in, w_out):
        super(HANLayer, self).__init__()
        self.gat_layer = nn.ModuleList()
        for _ in range(num_edge):
            self.gat_layer.append(GATConv(w_in, w_out // 8, 8))
        self.att_layer = AttentionLayer(w_out)