Python torch_geometric.nn.GCNConv() Examples

The following are 30 code examples of torch_geometric.nn.GCNConv(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch_geometric.nn , or try the search function .
Example #1
Source File: models.py    From IGMC with MIT License 7 votes vote down vote up
def __init__(self, dataset, gconv=GCNConv, latent_dim=[32, 32, 32, 1], k=30, 
                 regression=False, adj_dropout=0.2, force_undirected=False):
        super(DGCNN, self).__init__(
            dataset, gconv, latent_dim, regression, adj_dropout, force_undirected
        )
        if k < 1:  # transform percentile to number
            node_nums = sorted([g.num_nodes for g in dataset])
            k = node_nums[int(math.ceil(k * len(node_nums)))-1]
            k = max(10, k)  # no smaller than 10
        self.k = int(k)
        print('k used in sortpooling is:', self.k)
        conv1d_channels = [16, 32]
        conv1d_activation = nn.ReLU()
        self.total_latent_dim = sum(latent_dim)
        conv1d_kws = [self.total_latent_dim, 5]
        self.conv1d_params1 = Conv1d(1, conv1d_channels[0], conv1d_kws[0], conv1d_kws[0])
        self.maxpool1d = nn.MaxPool1d(2, 2)
        self.conv1d_params2 = Conv1d(conv1d_channels[0], conv1d_channels[1], conv1d_kws[1], 1)
        dense_dim = int((k - 2) / 2 + 1)
        self.dense_dim = (dense_dim - conv1d_kws[1] + 1) * conv1d_channels[1]
        self.lin1 = Linear(self.dense_dim, 128) 
Example #2
Source File: models.py    From IGMC with MIT License 6 votes vote down vote up
def __init__(self, dataset, gconv=RGCNConv, latent_dim=[32, 32, 32, 1], k=30, 
                 num_relations=5, num_bases=2, regression=False, adj_dropout=0.2, 
                 force_undirected=False):
        super(DGCNN_RS, self).__init__(
            dataset, 
            GCNConv, 
            latent_dim, 
            k, 
            regression, 
            adj_dropout=adj_dropout, 
            force_undirected=force_undirected
        )
        self.convs = torch.nn.ModuleList()
        self.convs.append(gconv(dataset.num_features, latent_dim[0], num_relations, num_bases))
        for i in range(0, len(latent_dim)-1):
            self.convs.append(gconv(latent_dim[i], latent_dim[i+1], num_relations, num_bases)) 
Example #3
Source File: models.py    From IGMC with MIT License 6 votes vote down vote up
def __init__(self, dataset, gconv=RGCNConv, latent_dim=[32, 32, 32, 32], 
                 num_relations=5, num_bases=2, regression=False, adj_dropout=0.2, 
                 force_undirected=False, side_features=False, n_side_features=0, 
                 multiply_by=1):
        super(IGMC, self).__init__(
            dataset, GCNConv, latent_dim, regression, adj_dropout, force_undirected
        )
        self.multiply_by = multiply_by
        self.convs = torch.nn.ModuleList()
        self.convs.append(gconv(dataset.num_features, latent_dim[0], num_relations, num_bases))
        for i in range(0, len(latent_dim)-1):
            self.convs.append(gconv(latent_dim[i], latent_dim[i+1], num_relations, num_bases))
        self.lin1 = Linear(2*sum(latent_dim), 128)
        self.side_features = side_features
        if side_features:
            self.lin1 = Linear(2*sum(latent_dim)+n_side_features, 128) 
Example #4
Source File: test_asap.py    From pytorch_geometric with MIT License 6 votes vote down vote up
def test_asap():
    in_channels = 16
    edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3],
                               [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))

    for GNN in [GraphConv, GCNConv]:
        pool = ASAPooling(in_channels, ratio=0.5, GNN=GNN,
                          add_self_loops=False)
        assert pool.__repr__() == ('ASAPooling(16, ratio=0.5)')
        out = pool(x, edge_index)
        assert out[0].size() == (num_nodes // 2, in_channels)
        assert out[1].size() == (2, 2)

        pool = ASAPooling(in_channels, ratio=0.5, GNN=GNN, add_self_loops=True)
        assert pool.__repr__() == ('ASAPooling(16, ratio=0.5)')
        out = pool(x, edge_index)
        assert out[0].size() == (num_nodes // 2, in_channels)
        assert out[1].size() == (2, 4) 
Example #5
Source File: models.py    From gdc with MIT License 6 votes vote down vote up
def __init__(self,
                 dataset: InMemoryDataset,
                 hidden: List[int] = [64],
                 dropout: float = 0.5):
        super(GCN, self).__init__()

        num_features = [dataset.data.x.shape[1]] + hidden + [dataset.num_classes]
        layers = []
        for in_features, out_features in zip(num_features[:-1], num_features[1:]):
            layers.append(GCNConv(in_features, out_features))
        self.layers = ModuleList(layers)

        self.reg_params = list(layers[0].parameters())
        self.non_reg_params = list([p for l in layers[1:] for p in l.parameters()])

        self.dropout = Dropout(p=dropout)
        self.act_fn = ReLU() 
Example #6
Source File: models_pyg.py    From gnn-model-explainer with Apache License 2.0 6 votes vote down vote up
def __init__(self, input_dim, hidden_dim, label_dim, num_layers,
            pred_hidden_dims=[], concat=True, bn=True, dropout=0.0, add_self=False, args=None):
        super(GCNNet, self).__init__()
        self.input_dim = input_dim
        print ('GCNNet input_dim:', self.input_dim)
        self.hidden_dim = hidden_dim
        print ('GCNNet hidden_dim:', self.hidden_dim)
        self.label_dim = label_dim
        print ('GCNNet label_dim:', self.label_dim)
        self.num_layers = num_layers
        print ('GCNNet num_layers:', self.num_layers)

        # self.concat = concat
        # self.bn = bn
        # self.add_self = add_self
        self.args = args
        self.dropout = dropout
        self.act = F.relu

        self.convs = torch.nn.ModuleList()
        self.convs.append(GCNConv(self.input_dim, self.hidden_dim))
        for layer in range(self.num_layers - 2):
            self.convs.append(GCNConv(self.hidden_dim, self.hidden_dim))
        self.convs.append(GCNConv(self.hidden_dim, self.label_dim))
        print ('len(self.convs):', len(self.convs)) 
Example #7
Source File: gtn.py    From cogdl with MIT License 6 votes vote down vote up
def __init__(self, num_edge, num_channels, w_in, w_out, num_class, num_nodes, num_layers):
        super(GTN, self).__init__()
        self.num_edge = num_edge
        self.num_channels = num_channels
        self.num_nodes = num_nodes
        self.w_in = w_in
        self.w_out = w_out
        self.num_class = num_class
        self.num_layers = num_layers
        layers = []
        for i in range(num_layers):
            if i == 0:
                layers.append(GTLayer(num_edge, num_channels, num_nodes, first=True))
            else:
                layers.append(GTLayer(num_edge, num_channels, num_nodes, first=False))
        self.layers = nn.ModuleList(layers)
        self.cross_entropy_loss = nn.CrossEntropyLoss()
        self.gcn = GCNConv(in_channels=self.w_in, out_channels=w_out)
        self.linear1 = nn.Linear(self.w_out*self.num_channels, self.w_out)
        self.linear2 = nn.Linear(self.w_out, self.num_class) 
Example #8
Source File: pyg_infomax.py    From cogdl with MIT License 5 votes vote down vote up
def __init__(self, in_channels, hidden_channels):
        super(Encoder, self).__init__()
        self.conv = GCNConv(in_channels, hidden_channels, cached=True)
        self.prelu = nn.PReLU(hidden_channels) 
Example #9
Source File: test_gcn_conv.py    From pytorch_geometric with MIT License 5 votes vote down vote up
def test_gcn_conv_with_sparse_input_feature():
    x = torch.sparse_coo_tensor(indices=torch.tensor([[0, 0], [0, 1]]),
                                values=torch.tensor([1., 1.]),
                                size=torch.Size([4, 16]))
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])

    conv = GCNConv(16, 32)
    assert conv(x, edge_index).size() == (4, 32) 
Example #10
Source File: test_gcn_conv.py    From pytorch_geometric with MIT License 5 votes vote down vote up
def test_static_gcn_conv():
    x = torch.randn(3, 4, 16)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])

    conv = GCNConv(16, 32)
    out = conv(x, edge_index)
    assert out.size() == (3, 4, 32) 
Example #11
Source File: layers.py    From ClusterGCN with GNU General Public License v3.0 5 votes vote down vote up
def setup_layers(self):
        """
        Creating the layes based on the args.
        """
        self.layers = []
        self.args.layers = [self.input_channels] + self.args.layers + [self.output_channels]
        for i, _ in enumerate(self.args.layers[:-1]):
            self.layers.append(GCNConv(self.args.layers[i],self.args.layers[i+1]))
        self.layers = ListModule(*self.layers) 
Example #12
Source File: gcn.py    From Alchemy with MIT License 5 votes vote down vote up
def __init__(self,
                 node_input_dim=15,
                 output_dim=12,
                 node_hidden_dim=64,
                 num_step_prop=6,
                 num_step_set2set=6):
        super(GCN, self).__init__()
        self.num_step_prop = num_step_prop
        self.lin0 = nn.Linear(node_input_dim, node_hidden_dim)
        self.conv = GCNConv(node_hidden_dim, node_hidden_dim, cached=False)        
        self.set2set = Set2Set(node_hidden_dim, processing_steps=num_step_set2set)
        self.lin1 = nn.Linear(2 * node_hidden_dim, node_hidden_dim)
        self.lin2 = nn.Linear(node_hidden_dim, output_dim) 
Example #13
Source File: simgnn.py    From SimGNN with GNU General Public License v3.0 5 votes vote down vote up
def setup_layers(self):
        """
        Creating the layers.
        """
        self.calculate_bottleneck_features()
        self.convolution_1 = GCNConv(self.number_labels, self.args.filters_1)
        self.convolution_2 = GCNConv(self.args.filters_1, self.args.filters_2)
        self.convolution_3 = GCNConv(self.args.filters_2, self.args.filters_3)
        self.attention = AttentionModule(self.args)
        self.tensor_network = TenorNetworkModule(self.args)
        self.fully_connected_first = torch.nn.Linear(self.feature_count,
                                                     self.args.bottle_neck_neurons)
        self.scoring_layer = torch.nn.Linear(self.args.bottle_neck_neurons, 1) 
Example #14
Source File: MTGCN.py    From MultiTurnDialogZoo with MIT License 5 votes vote down vote up
def __init__(self, inpt_size, output_size, user_embed_size, 
                 posemb_size, dropout=0.5, threshold=2):
        # inpt_size: utter_hidden_size + user_embed_size
        super(OGCNContext, self).__init__()
        # utter + user_embed + pos_embed
        size = inpt_size + user_embed_size + posemb_size
        self.threshold = threshold
        
        # GatedGCN
        # self.kernel_rnn = nn.GRUCell(size, size)
        # self.conv1 = My_GatedGCN(size, inpt_size, self.kernel_rnn)
        # self.conv2 = My_GatedGCN(size, inpt_size, self.kernel_rnn)
        # self.conv3 = My_GatedGCN(size, inpt_size, self.kernel_rnn)
        self.conv1 = GCNConv(size, inpt_size)
        self.conv2 = GCNConv(size, inpt_size)
        self.conv3 = GCNConv(size, inpt_size)
        # self.bn1 = nn.BatchNorm1d(num_features=inpt_size)
        # self.bn2 = nn.BatchNorm1d(num_features=inpt_size)
        # self.bn3 = nn.BatchNorm1d(num_features=inpt_size)

        # rnn for background
        self.rnn = nn.GRU(inpt_size + user_embed_size, inpt_size, bidirectional=True)

        self.linear1 = nn.Linear(inpt_size * 2, inpt_size)
        self.linear2 = nn.Linear(inpt_size * 2, output_size)
        self.drop = nn.Dropout(p=dropout)
        
        # 100 is far bigger than the max turn lengths (cornell and dailydialog datasets)
        self.posemb = nn.Embedding(100, posemb_size)
        self.init_weight() 
Example #15
Source File: test_gnn_explainer.py    From pytorch_geometric with MIT License 5 votes vote down vote up
def __init__(self):
        super(Net, self).__init__()
        self.conv1 = GCNConv(3, 16)
        self.conv2 = GCNConv(16, 7) 
Example #16
Source File: asap_pool.py    From ASAP with Apache License 2.0 5 votes vote down vote up
def __init__(self, in_channels, ratio, dropout_att=0, negative_slope=0.2):
        super(ASAP_Pooling, self).__init__()

        self.in_channels = in_channels
        self.ratio = ratio
        self.negative_slope = negative_slope
        self.dropout_att = dropout_att
        self.lin_q = Linear(in_channels, in_channels)
        self.gat_att = Linear(2*in_channels, 1)
        self.gnn_score = LEConv(self.in_channels, 1) # gnn_score: uses LEConv to find cluster fitness scores
        self.gnn_intra_cluster = GCNConv(self.in_channels, self.in_channels) # gnn_intra_cluster: uses GCN to account for intra cluster properties, e.g., edge-weights
        self.reset_parameters() 
Example #17
Source File: asap_pool_model.py    From ASAP with Apache License 2.0 5 votes vote down vote up
def __init__(self, dataset, num_layers, hidden, ratio=0.8, **kwargs):
        super(ASAP_Pool, self).__init__()
        if type(ratio)!=list:
            ratio = [ratio for i in range(num_layers)]
        self.conv1 = GCNConv(dataset.num_features, hidden)
        self.pool1 = ASAP_Pooling(in_channels=hidden, ratio=ratio[0], **kwargs)
        self.convs = torch.nn.ModuleList()
        self.pools = torch.nn.ModuleList()
        for i in range(num_layers - 1):
            self.convs.append(GCNConv(hidden, hidden))
            self.pools.append(ASAP_Pooling(in_channels=hidden, ratio=ratio[i], **kwargs))
        self.lin1 = Linear(2*hidden, hidden) # 2*hidden due to readout layer
        self.lin2 = Linear(hidden, dataset.num_classes)
        self.reset_parameters() 
Example #18
Source File: graph_saint.py    From ogb with MIT License 5 votes vote down vote up
def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
                 dropout):
        super(GCN, self).__init__()

        self.convs = torch.nn.ModuleList()
        self.convs.append(GCNConv(in_channels, hidden_channels))
        for _ in range(num_layers - 2):
            self.convs.append(GCNConv(hidden_channels, hidden_channels))
        self.convs.append(GCNConv(hidden_channels, out_channels))

        self.dropout = dropout 
Example #19
Source File: cluster_gcn.py    From ogb with MIT License 5 votes vote down vote up
def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
                 dropout):
        super(GCN, self).__init__()

        self.convs = torch.nn.ModuleList()
        self.convs.append(GCNConv(in_channels, hidden_channels))
        for _ in range(num_layers - 2):
            self.convs.append(GCNConv(hidden_channels, hidden_channels))
        self.convs.append(GCNConv(hidden_channels, out_channels))

        self.dropout = dropout 
Example #20
Source File: layers.py    From SEAL-CI with GNU General Public License v3.0 5 votes vote down vote up
def _setup(self):
        """
        Setting up upstream and pooling layers.
        """
        self.graph_convolution_1 = GCNConv(self.number_of_features,
                                           self.args.first_gcn_dimensions)

        self.graph_convolution_2 = GCNConv(self.args.first_gcn_dimensions,
                                           self.args.second_gcn_dimensions)

        self.fully_connected_1 = torch.nn.Linear(self.args.second_gcn_dimensions,
                                                 self.args.first_dense_neurons)

        self.fully_connected_2 = torch.nn.Linear(self.args.first_dense_neurons,
                                                 self.args.second_dense_neurons) 
Example #21
Source File: layers.py    From SEAL-CI with GNU General Public License v3.0 5 votes vote down vote up
def _setup(self):
        """
        We define two GCN layers, the downstram does classification.
        """
        self.graph_convolution_1 = GCNConv(self.number_of_features, self.args.macro_gcn_dimensions)
        self.graph_convolution_2 = GCNConv(self.args.macro_gcn_dimensions, self.number_of_labels) 
Example #22
Source File: capsgnn.py    From CapsGNN with GNU General Public License v3.0 5 votes vote down vote up
def _setup_base_layers(self):
        """
        Creating GCN layers.
        """
        self.base_layers = [GCNConv(self.number_of_features, self.args.gcn_filters)]
        for _ in range(self.args.gcn_layers-1):
            self.base_layers.append(GCNConv(self.args.gcn_filters, self.args.gcn_filters))
        self.base_layers = ListModule(*self.base_layers) 
Example #23
Source File: autoencoder.py    From pytorch_geometric with MIT License 5 votes vote down vote up
def __init__(self, in_channels, out_channels):
        super(Encoder, self).__init__()
        self.conv1 = GCNConv(in_channels, 2 * out_channels, cached=True)
        if args.model in ['GAE']:
            self.conv2 = GCNConv(2 * out_channels, out_channels, cached=True)
        elif args.model in ['VGAE']:
            self.conv_mu = GCNConv(2 * out_channels, out_channels, cached=True)
            self.conv_logstd = GCNConv(2 * out_channels, out_channels,
                                       cached=True) 
Example #24
Source File: CNN_GNN2018.py    From Pytorch-Networks with MIT License 5 votes vote down vote up
def __init__(self, Gnn_layers, use_gpu):
        super().__init__()
        self.gnn_layers = nn.ModuleList([GatedGraphConv(2116, 2) for l in range(Gnn_layers)])
        #self.gnn_layers = nn.ModuleList([GCNConv(2116,2116) for l in range(Gnn_layers)])
        #self.gnn_actfs = nn.ModuleList([nn.LeakyReLU() for l in range(Gnn_layers)])
        self.use_gpu = use_gpu 
Example #25
Source File: models.py    From IGMC with MIT License 5 votes vote down vote up
def __init__(self, dataset, gconv=GCNConv, latent_dim=[32, 32, 32, 1], 
                 regression=False, adj_dropout=0.2, force_undirected=False):
        super(GNN, self).__init__()
        self.regression = regression
        self.adj_dropout = adj_dropout 
        self.force_undirected = force_undirected
        self.convs = torch.nn.ModuleList()
        self.convs.append(gconv(dataset.num_features, latent_dim[0]))
        for i in range(0, len(latent_dim)-1):
            self.convs.append(gconv(latent_dim[i], latent_dim[i+1]))
        self.lin1 = Linear(sum(latent_dim), 128)
        if self.regression:
            self.lin2 = Linear(128, 1)
        else:
            self.lin2 = Linear(128, dataset.num_classes) 
Example #26
Source File: graph_unet.py    From pytorch_geometric with MIT License 5 votes vote down vote up
def __init__(self, in_channels, hidden_channels, out_channels, depth,
                 pool_ratios=0.5, sum_res=True, act=F.relu):
        super(GraphUNet, self).__init__()
        assert depth >= 1
        self.in_channels = in_channels
        self.hidden_channels = hidden_channels
        self.out_channels = out_channels
        self.depth = depth
        self.pool_ratios = repeat(pool_ratios, depth)
        self.act = act
        self.sum_res = sum_res

        channels = hidden_channels

        self.down_convs = torch.nn.ModuleList()
        self.pools = torch.nn.ModuleList()
        self.down_convs.append(GCNConv(in_channels, channels, improved=True))
        for i in range(depth):
            self.pools.append(TopKPooling(channels, self.pool_ratios[i]))
            self.down_convs.append(GCNConv(channels, channels, improved=True))

        in_channels = channels if sum_res else 2 * channels

        self.up_convs = torch.nn.ModuleList()
        for i in range(depth - 1):
            self.up_convs.append(GCNConv(in_channels, channels, improved=True))
        self.up_convs.append(GCNConv(in_channels, out_channels, improved=True))

        self.reset_parameters() 
Example #27
Source File: gcn.py    From pytorch_geometric with MIT License 5 votes vote down vote up
def __init__(self, dataset, num_layers, hidden):
        super(GCN, self).__init__()
        self.conv1 = GCNConv(dataset.num_features, hidden)
        self.convs = torch.nn.ModuleList()
        for i in range(num_layers - 1):
            self.convs.append(GCNConv(hidden, hidden))
        self.lin1 = Linear(hidden, hidden)
        self.lin2 = Linear(hidden, dataset.num_classes) 
Example #28
Source File: gcn.py    From pytorch_geometric with MIT License 5 votes vote down vote up
def __init__(self, dataset, num_layers, hidden, mode='cat'):
        super(GCNWithJK, self).__init__()
        self.conv1 = GCNConv(dataset.num_features, hidden)
        self.convs = torch.nn.ModuleList()
        for i in range(num_layers - 1):
            self.convs.append(GCNConv(hidden, hidden))
        self.jump = JumpingKnowledge(mode)
        if mode == 'cat':
            self.lin1 = Linear(num_layers * hidden, hidden)
        else:
            self.lin1 = Linear(hidden, hidden)
        self.lin2 = Linear(hidden, dataset.num_classes) 
Example #29
Source File: gcn.py    From pytorch_geometric with MIT License 5 votes vote down vote up
def __init__(self, in_channels, out_channels):
        super(GCN, self).__init__()
        self.conv1 = GCNConv(in_channels, 16, cached=True)
        self.conv2 = GCNConv(16, out_channels, cached=True) 
Example #30
Source File: gcn.py    From pytorch_geometric with MIT License 5 votes vote down vote up
def __init__(self, dataset):
        super(Net, self).__init__()
        self.conv1 = GCNConv(dataset.num_features, args.hidden)
        self.conv2 = GCNConv(args.hidden, dataset.num_classes)