Python networkx.is_directed() Examples

The following are 22 code examples of networkx.is_directed(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module networkx , or try the search function .
Example #1
Source File: split_train_test.py    From EvalNE with MIT License 6 votes vote down vote up
def _sanity_check(G):
    r"""
    Helper function that checks if the input graphs contains a single connected component. Raises an error if not.

    Parameters
    ----------
    G : graph
       A NetworkX graph

    Raises
    ------
    ValueError
        If the graph has more than one (weakly) connected component.
    """
    # Compute the number of connected components
    if G.is_directed():
        num_ccs = nx.number_weakly_connected_components(G)
    else:
        num_ccs = nx.number_connected_components(G)

    # Rise an error if more than one CC exists
    if num_ccs != 1:
        raise ValueError("Input graph should contain one (weakly) connected component. "
                         "This graph contains: " + str(num_ccs)) 
Example #2
Source File: node2vec.py    From CogDL-TensorFlow with MIT License 6 votes vote down vote up
def train(self, G):
        self.G = G
        is_directed = nx.is_directed(self.G)
        for i, j in G.edges():
            G[i][j]["weight"] = G[i][j].get("weight", 1.0)
            if not is_directed:
                G[j][i]["weight"] = G[j][i].get("weight", 1.0)
        self._preprocess_transition_probs()
        walks = self._simulate_walks(self.walk_num, self.walk_length)
        walks = [[str(node) for node in walk] for walk in walks]
        model = Word2Vec(
            walks,
            size=self.dimension,
            window=self.window_size,
            min_count=0,
            sg=1,
            workers=self.worker,
            iter=self.iteration,
        )
        id2node = dict([(vid, node) for vid, node in enumerate(G.nodes())])
        self.embeddings = np.asarray(
            [model[str(id2node[i])] for i in range(len(id2node))]
        )
        return self.embeddings 
Example #3
Source File: node2vec.py    From cogdl with MIT License 6 votes vote down vote up
def train(self, G):
        self.G = G
        is_directed = nx.is_directed(self.G)
        for i, j in G.edges():
            G[i][j]["weight"] = G[i][j].get("weight", 1.0)
            if not is_directed:
                G[j][i]["weight"] = G[j][i].get("weight", 1.0)
        self._preprocess_transition_probs()
        walks = self._simulate_walks(self.walk_num, self.walk_length)
        walks = [[str(node) for node in walk] for walk in walks]
        model = Word2Vec(
            walks,
            size=self.dimension,
            window=self.window_size,
            min_count=0,
            sg=1,
            workers=self.worker,
            iter=self.iteration,
        )
        id2node = dict([(vid, node) for vid, node in enumerate(G.nodes())])
        self.embeddings = np.asarray(
            [model.wv[str(id2node[i])] for i in range(len(id2node))]
        )
        return self.embeddings 
Example #4
Source File: netsmf.py    From cogdl with MIT License 6 votes vote down vote up
def _random_walk_matrix(self, pid):
        # construct matrix based on random walk
        np.random.seed(pid)
        matrix = sp.lil_matrix((self.num_node, self.num_node))
        t0 = time.time()
        for round in range(int(self.num_round / self.worker)):
            if round % 10 == 0 and pid == 0:
                print(
                    "round %d / %d, time: %lf"
                    % (round * self.worker, self.num_round, time.time() - t0)
                )
            for i in range(self.num_edge):
                u, v = self.edges[i]
                if not self.is_directed and np.random.rand() > 0.5:
                    v, u = self.edges[i]
                for r in range(1, self.window_size + 1):
                    u_, v_, zp = self._path_sampling(u, v, r)
                    matrix[u_, v_] += 2 * r / self.window_size / self.num_round / zp
        return matrix 
Example #5
Source File: graph.py    From netrd with MIT License 6 votes vote down vote up
def ensure_undirected(G):
    """Ensure the graph G is undirected.

    If it is not, coerce it to undirected and warn the user.

    Parameters
    ----------
    G (networkx graph)
        The graph to be checked

    Returns
    -------

    G (nx.Graph)
        Undirected version of the input graph

    """
    if nx.is_directed(G):
        G = G.to_undirected(as_view=False)
        warnings.warn("Coercing directed graph to undirected.", RuntimeWarning)
    return G 
Example #6
Source File: utils.py    From CS-GNN with MIT License 6 votes vote down vote up
def rm_useless(G, feats, class_map, unlabeled_nodes, num_layers):
    # find useless nodes
    print('start to check and remove {} unlabeled nodes'.format(len(unlabeled_nodes)))
    unlabeled_nodes = set(unlabeled_nodes)
    rm_nodes = []
    for n_id in tqdm(unlabeled_nodes):
        neighbors_set = set()
        neighbors_set.add(n_id)
        for _ in range(num_layers):
            for node in neighbors_set:
                if nx.is_directed(G):
                    neighbors_set = neighbors_set | set(G.neighbors(node)) | set(G.predecessors(node))
                else:
                    neighbors_set = neighbors_set | set(G.neighbors(node))
        if check_rm(neighbors_set, unlabeled_nodes):
            rm_nodes.append(n_id)
    # rm nodes
    if len(rm_nodes):
        for node in rm_nodes:
            G.remove_node(node)
        G_new = nx.relabel.convert_node_labels_to_integers(G, ordering='sorted')
        feats = np.delete(feats, rm_nodes, 0)
        class_map = np.delete(class_map, rm_nodes, 0)
        print('remove {} '.format(len(rm_nodes)), 'useless unlabeled nodes')
    return G_new, feats, class_map 
Example #7
Source File: smoothness.py    From CS-GNN with MIT License 6 votes vote down vote up
def compute_feature_smoothness(path, times=0):
    G_org = json_graph.node_link_graph(json.load(open(path+'-G.json')))
    # G_org = remove_unlabeled(G_org)
    if nx.is_directed(G_org):
        G_org = G_org.to_undirected()
    edge_num = G_org.number_of_edges()
    G = pygsp.graphs.Graph(nx.adjacency_matrix(G_org))
    feats = np.load(path+'-feats.npy')
    # smooth
    for i in range(times):
        feats = feature_broadcast(feats, G_org)
    np.save(path+'-feats_'+str(times)+'.npy', feats)

    min_max_scaler = preprocessing.MinMaxScaler()
    feats = min_max_scaler.fit_transform(feats)
    smoothness = np.zeros(feats.shape[1])
    for src, dst in G_org.edges():
        smoothness += (feats[src]-feats[dst])*(feats[src]-feats[dst])
    smoothness = np.linalg.norm(smoothness,ord=1)
    print('The smoothness is: ', 2*smoothness/edge_num/feats.shape[1]) 
Example #8
Source File: smoothness.py    From CS-GNN with MIT License 6 votes vote down vote up
def compute_label_smoothness(path, rate=0.):
    G_org = json_graph.node_link_graph(json.load(open(path+'-G.json')))
    # G_org = remove_unlabeled(G_org)
    if nx.is_directed(G_org):
        G_org = G_org.to_undirected()
    class_map = json.load(open(path+'-class_map.json'))
    for k, v in class_map.items():
        if type(v) != list:
            class_map = convert_list(class_map)
        break
    labels = convert_ndarray(class_map)
    labels = np.squeeze(label_to_vector(labels))

    # smooth
    G_org = label_broadcast(G_org, labels, rate)
    with open(path+'-G_'+str(rate)+'.json', 'w') as f:
        f.write(json.dumps(json_graph.node_link_data(G_org)))

    edge_num = G_org.number_of_edges()
    G = pygsp.graphs.Graph(nx.adjacency_matrix(G_org))
    smoothness = 0
    for src, dst in G_org.edges():
        if labels[src] != labels[dst]:
            smoothness += 1
    print('The smoothness is: ', 2*smoothness/edge_num) 
Example #9
Source File: test_function.py    From aws-kube-codesuite with Apache License 2.0 5 votes vote down vote up
def test_is_directed(self):
        assert_equal(self.G.is_directed(), nx.is_directed(self.G))
        assert_equal(self.DG.is_directed(), nx.is_directed(self.DG)) 
Example #10
Source File: test_function.py    From Carnets with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_is_directed(self):
        assert_equal(self.G.is_directed(), nx.is_directed(self.G))
        assert_equal(self.DG.is_directed(), nx.is_directed(self.DG)) 
Example #11
Source File: test_function.py    From qgisSpaceSyntaxToolkit with GNU General Public License v3.0 5 votes vote down vote up
def test_is_directed(self):
        assert_equal(self.G.is_directed(),nx.is_directed(self.G))
        assert_equal(self.DG.is_directed(),nx.is_directed(self.DG)) 
Example #12
Source File: line.py    From cogdl with MIT License 5 votes vote down vote up
def _train_line(self, order):
        # train Line model with order
        self.alpha = self.init_alpha
        batch_size = self.batch_size
        t0 = time.time()
        num_batch = int(self.num_sampling_edge / batch_size)
        epoch_iter = tqdm(range(num_batch))
        for b in epoch_iter:
            if b % 100 == 0:
                epoch_iter.set_description(
                    f"Progress: {b *1.0/num_batch * 100:.4f}%, alpha: {self.alpha:.6f}, time: {time.time() - t0:.4f}"
                )
                self.alpha = self.init_alpha * max((1 - b * 1.0 / num_batch), 0.0001)
            u, v = [0] * batch_size, [0] * batch_size
            for i in range(batch_size):
                edge_id = alias_draw(self.edges_table, self.edges_prob)
                u[i], v[i] = self.edges[edge_id]
                if not self.is_directed and np.random.rand() > 0.5:
                    v[i], u[i] = self.edges[edge_id]

            vec_error = np.zeros((batch_size, self.dimension))
            label, target = np.asarray([1 for i in range(batch_size)]), np.asarray(v)
            for j in range(1 + self.negative):
                if j != 0:
                    label = np.asarray([0 for i in range(batch_size)])
                    for i in range(batch_size):
                        target[i] = alias_draw(self.node_table, self.node_prob)
                if order == 1:
                    self._update(
                        self.emb_vertex[u], self.emb_vertex[target], vec_error, label
                    )
                else:
                    self._update(
                        self.emb_vertex[u], self.emb_context[target], vec_error, label
                    )
            self.emb_vertex[u] += vec_error 
Example #13
Source File: node2vec.py    From cogdl with MIT License 5 votes vote down vote up
def _preprocess_transition_probs(self):
        # Preprocessing of transition probabilities for guiding the random walks.
        G = self.G
        is_directed = nx.is_directed(self.G)

        print(len(list(G.nodes())))
        print(len(list(G.edges())))

        s = time.time()
        alias_nodes = {}
        for node in G.nodes():
            unnormalized_probs = [G[node][nbr]["weight"] for nbr in G.neighbors(node)]
            norm_const = sum(unnormalized_probs)
            normalized_probs = [
                float(u_prob) / norm_const for u_prob in unnormalized_probs
            ]
            alias_nodes[node] = alias_setup(normalized_probs)

        t = time.time()
        print("alias_nodes", t - s)

        alias_edges = {}
        s = time.time()

        if is_directed:
            for edge in G.edges():
                alias_edges[edge] = self._get_alias_edge(edge[0], edge[1])
        else:
            for edge in G.edges():
                alias_edges[edge] = self._get_alias_edge(edge[0], edge[1])
                alias_edges[(edge[1], edge[0])] = self._get_alias_edge(edge[1], edge[0])

        t = time.time()
        print("alias_edges", t - s)

        self.alias_nodes = alias_nodes
        self.alias_edges = alias_edges

        return 
Example #14
Source File: line.py    From CogDL-TensorFlow with MIT License 5 votes vote down vote up
def _train_line(self, order):
        # train Line model with order
        self.alpha = self.init_alpha
        batch_size = self.batch_size
        t0 = time.time()
        num_batch = int(self.num_sampling_edge / batch_size)
        epoch_iter = tqdm(range(num_batch))
        for b in epoch_iter:
            if b % 100 == 0:
                epoch_iter.set_description(
                    f"Progress: {b *1.0/num_batch * 100:.4f}%, alpha: {self.alpha:.6f}, time: {time.time() - t0:.4f}"
                )
                self.alpha = self.init_alpha * max((1 - b * 1.0 / num_batch), 0.0001)
            u, v = [0] * batch_size, [0] * batch_size
            for i in range(batch_size):
                edge_id = alias_draw(self.edges_table, self.edges_prob)
                u[i], v[i] = self.edges[edge_id]
                if not self.is_directed and np.random.rand() > 0.5:
                    v[i], u[i] = self.edges[edge_id]

            vec_error = np.zeros((batch_size, self.dimension))
            label, target = np.asarray([1 for i in range(batch_size)]), np.asarray(v)
            for j in range(self.negative):
                if j != 0:
                    label = np.asarray([0 for i in range(batch_size)])
                    for i in range(batch_size):
                        target[i] = alias_draw(self.node_table, self.node_prob)
                if order == 1:
                    self._update(
                        self.emb_vertex[u], self.emb_vertex[target], vec_error, label
                    )
                else:
                    self._update(
                        self.emb_vertex[u], self.emb_context[target], vec_error, label
                    )
            self.emb_vertex[u] += vec_error 
Example #15
Source File: node2vec.py    From CogDL-TensorFlow with MIT License 5 votes vote down vote up
def _preprocess_transition_probs(self):
        # Preprocessing of transition probabilities for guiding the random walks.
        G = self.G
        is_directed = nx.is_directed(self.G)

        print(len(list(G.nodes())))
        print(len(list(G.edges())))

        s = time.time()
        alias_nodes = {}
        for node in G.nodes():
            unnormalized_probs = [G[node][nbr]["weight"] for nbr in G.neighbors(node)]
            norm_const = sum(unnormalized_probs)
            normalized_probs = [
                float(u_prob) / norm_const for u_prob in unnormalized_probs
            ]
            alias_nodes[node] = alias_setup(normalized_probs)

        t = time.time()
        print("alias_nodes", t - s)

        alias_edges = {}
        s = time.time()

        if is_directed:
            for edge in G.edges():
                alias_edges[edge] = self._get_alias_edge(edge[0], edge[1])
        else:
            for edge in G.edges():
                alias_edges[edge] = self._get_alias_edge(edge[0], edge[1])
                alias_edges[(edge[1], edge[0])] = self._get_alias_edge(edge[1], edge[0])

        t = time.time()
        print("alias_edges", t - s)

        self.alias_nodes = alias_nodes
        self.alias_edges = alias_edges

        return 
Example #16
Source File: test_utils.py    From numpy-ml with GNU General Public License v3.0 5 votes vote down vote up
def to_networkx(G):
    """Convert my graph representation to a networkx graph"""
    G_nx = nx.DiGraph() if G.is_directed else nx.Graph()
    V = list(G._V2I.keys())
    G_nx.add_nodes_from(V)

    for v in V:
        fr_i = G._V2I[v]
        edges = G._G[fr_i]

        for edge in edges:
            G_nx.add_edge(edge.fr, edge.to, weight=edge._w)
    return G_nx 
Example #17
Source File: test_utils.py    From numpy-ml with GNU General Public License v3.0 5 votes vote down vote up
def from_networkx(G_nx):
    """Convert a networkx graph to my graph representation"""
    V = list(G_nx.nodes)
    edges = list(G_nx.edges)
    is_weighted = "weight" in G_nx[edges[0][0]][edges[0][1]]

    E = []
    for e in edges:
        if is_weighted:
            E.append(Edge(e[0], e[1], G_nx[e[0]][e[1]]["weight"]))
        else:
            E.append(Edge(e[0], e[1]))

    return DiGraph(V, E) if nx.is_directed(G_nx) else UndirectedGraph(V, E) 
Example #18
Source File: estimator.py    From karateclub with GNU General Public License v3.0 5 votes vote down vote up
def _check_directedness(self, graph):
        """Checking the undirected nature of a single graph."""
        directed = nx.is_directed(graph)
        assert directed == False, "Graph is directed." 
Example #19
Source File: convert.py    From pytorch_geometric with MIT License 5 votes vote down vote up
def from_networkx(G):
    r"""Converts a :obj:`networkx.Graph` or :obj:`networkx.DiGraph` to a
    :class:`torch_geometric.data.Data` instance.

    Args:
        G (networkx.Graph or networkx.DiGraph): A networkx graph.
    """

    G = nx.convert_node_labels_to_integers(G)
    G = G.to_directed() if not nx.is_directed(G) else G
    edge_index = torch.tensor(list(G.edges)).t().contiguous()

    data = {}

    for i, (_, feat_dict) in enumerate(G.nodes(data=True)):
        for key, value in feat_dict.items():
            data[key] = [value] if i == 0 else data[key] + [value]

    for i, (_, _, feat_dict) in enumerate(G.edges(data=True)):
        for key, value in feat_dict.items():
            data[key] = [value] if i == 0 else data[key] + [value]

    for key, item in data.items():
        try:
            data[key] = torch.tensor(item)
        except ValueError:
            pass

    data['edge_index'] = edge_index.view(2, -1)
    data = torch_geometric.data.Data.from_dict(data)
    data.num_nodes = G.number_of_nodes()

    return data 
Example #20
Source File: line.py    From CogDL-TensorFlow with MIT License 4 votes vote down vote up
def train(self, G):
        # run LINE algorithm, 1-order, 2-order or 3(1-order + 2-order)
        self.G = G
        self.is_directed = nx.is_directed(self.G)
        self.num_node = G.number_of_nodes()
        self.num_edge = G.number_of_edges()
        self.num_sampling_edge = self.walk_length * self.walk_num * self.num_node

        node2id = dict([(node, vid) for vid, node in enumerate(G.nodes())])
        self.edges = [[node2id[e[0]], node2id[e[1]]] for e in self.G.edges()]
        self.edges_prob = np.asarray([G[u][v].get("weight", 1.0) for u, v in G.edges()])
        self.edges_prob /= np.sum(self.edges_prob)
        self.edges_table, self.edges_prob = alias_setup(self.edges_prob)

        degree_weight = np.asarray([0] * self.num_node)
        for u, v in G.edges():
            degree_weight[node2id[u]] += G[u][v].get("weight", 1.0)
            if not self.is_directed:
                degree_weight[node2id[v]] += G[u][v].get("weight", 1.0)
        self.node_prob = np.power(degree_weight, 0.75)
        self.node_prob /= np.sum(self.node_prob)
        self.node_table, self.node_prob = alias_setup(self.node_prob)

        if self.order == 3:
            self.dimension = int(self.dimension / 2)
        if self.order == 1 or self.order == 3:
            print("train line with 1-order")
            print(type(self.dimension))
            self.emb_vertex = (
                                      np.random.random((self.num_node, self.dimension)) - 0.5
                              ) / self.dimension
            self._train_line(order=1)
            embedding1 = preprocessing.normalize(self.emb_vertex, "l2")

        if self.order == 2 or self.order == 3:
            print("train line with 2-order")
            self.emb_vertex = (
                                      np.random.random((self.num_node, self.dimension)) - 0.5
                              ) / self.dimension
            self.emb_context = self.emb_vertex
            self._train_line(order=2)
            embedding2 = preprocessing.normalize(self.emb_vertex, "l2")

        if self.order == 1:
            self.embeddings = embedding1
        elif self.order == 2:
            self.embeddings = embedding2
        else:
            print("concatenate two embedding...")
            self.embeddings = np.hstack((embedding1, embedding2))
        return self.embeddings 
Example #21
Source File: line.py    From cogdl with MIT License 4 votes vote down vote up
def train(self, G):
        # run LINE algorithm, 1-order, 2-order or 3(1-order + 2-order)
        self.G = G
        self.is_directed = nx.is_directed(self.G)
        self.num_node = G.number_of_nodes()
        self.num_edge = G.number_of_edges()
        self.num_sampling_edge = self.walk_length * self.walk_num * self.num_node

        node2id = dict([(node, vid) for vid, node in enumerate(G.nodes())])
        self.edges = [[node2id[e[0]], node2id[e[1]]] for e in self.G.edges()]
        self.edges_prob = np.asarray([G[u][v].get("weight", 1.0) for u, v in G.edges()])
        self.edges_prob /= np.sum(self.edges_prob)
        self.edges_table, self.edges_prob = alias_setup(self.edges_prob)

        degree_weight = np.asarray([0] * self.num_node)
        for u, v in G.edges():
            degree_weight[node2id[u]] += G[u][v].get("weight", 1.0)
            if not self.is_directed:
                degree_weight[node2id[v]] += G[u][v].get("weight", 1.0)
        self.node_prob = np.power(degree_weight, 0.75)
        self.node_prob /= np.sum(self.node_prob)
        self.node_table, self.node_prob = alias_setup(self.node_prob)

        if self.order == 3:
            self.dimension = int(self.dimension / 2)
        if self.order == 1 or self.order == 3:
            print("train line with 1-order")
            print(type(self.dimension))
            self.emb_vertex = (
                np.random.random((self.num_node, self.dimension)) - 0.5
            ) / self.dimension
            self._train_line(order=1)
            embedding1 = preprocessing.normalize(self.emb_vertex, "l2")

        if self.order == 2 or self.order == 3:
            print("train line with 2-order")
            self.emb_vertex = (
                np.random.random((self.num_node, self.dimension)) - 0.5
            ) / self.dimension
            self.emb_context = self.emb_vertex
            self._train_line(order=2)
            embedding2 = preprocessing.normalize(self.emb_vertex, "l2")

        if self.order == 1:
            self.embeddings = embedding1
        elif self.order == 2:
            self.embeddings = embedding2
        else:
            print("concatenate two embedding...")
            self.embeddings = np.hstack((embedding1, embedding2))
        return self.embeddings 
Example #22
Source File: split_train_test.py    From EvalNE with MIT License 4 votes vote down vote up
def broder_alg(G, E):
    r"""
    Runs Andrei Broder's algorithm to select uniformly at random a spanning tree of the input
    graph.The direction of the edges included in train_E is taken from E which respects the
    edge directions in the original graph, thus, the results are still valid for directed graphs.
    For pairs of nodes in the original digraphs which have edges in both directions, we randomly
    select the direction of the edge included in the ST.

    Parameters
    ----------
    G : graph
       A NetworkX graph
    E : set
       A set of directed or undirected edges constituting the graph G.

    Returns
    -------
    train_E : set
       A set of edges of G describing the random spanning tree

     References
    ----------
    .. [1] A. Broder, "Generating Random Spanning Trees", Proc. of the 30th Annual Symposium
           on Foundations of Computer Science, pp. 442--447, 1989.
    """
    # Create two partitions, S and T. Initially store all nodes in S.
    S = set(G.nodes)
    T = set()

    # Pick a random node as the "current node" and mark it as visited.
    current_node = random.sample(S, 1).pop()
    S.remove(current_node)
    T.add(current_node)

    # Perform random walk on the graph
    train_E = set()
    while S:
        if G.is_directed():
            neighbour_node = random.sample(list(G.successors(current_node)) + list(G.predecessors(current_node)), 1).pop()
        else:
            neighbour_node = random.sample(list(G.neighbors(current_node)), 1).pop()
        if neighbour_node not in T:
            S.remove(neighbour_node)
            T.add(neighbour_node)
            if random.random() < 0.5:
                if (current_node, neighbour_node) in E:
                    train_E.add((current_node, neighbour_node))
                else:
                    train_E.add((neighbour_node, current_node))
            else:
                if (neighbour_node, current_node) in E:
                    train_E.add((neighbour_node, current_node))
                else:
                    train_E.add((current_node, neighbour_node))
        current_node = neighbour_node

    # Return the set of edges constituting the spanning tree
    return train_E