Python networkx.info() Examples
The following are 27
code examples of networkx.info().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
networkx
, or try the search function
.
Example #1
Source File: test_function.py From aws-kube-codesuite with Apache License 2.0 | 6 votes |
def test_info_digraph(self): G = nx.DiGraph(name='path_graph(5)') nx.add_path(G, [0, 1, 2, 3, 4]) info = nx.info(G) expected_graph_info = '\n'.join(['Name: path_graph(5)', 'Type: DiGraph', 'Number of nodes: 5', 'Number of edges: 4', 'Average in degree: 0.8000', 'Average out degree: 0.8000']) assert_equal(info, expected_graph_info) info = nx.info(G, n=1) expected_node_info = '\n'.join( ['Node 1 has the following properties:', 'Degree: 2', 'Neighbors: 2']) assert_equal(info, expected_node_info) assert_raises(nx.NetworkXError, nx.info, G, n=-1)
Example #2
Source File: graphityOut.py From r2graphity with MIT License | 6 votes |
def printGraph(graphity): # TODO add more info to print, alias and stuff, sample info # print dangling APIs # print dangling strings for item in graphity.nodes(data=True): print item[0] if 'alias' in item[1]: print "Node alias: " + item[1]['alias'] # mix up API calls and strings and sort by offset callStringMerge = item[1]['calls'] + item[1]['strings'] callStringMerge.sort(key=lambda x: x[0]) for cx in callStringMerge: print cx # Printing all the meta info to cmdline
Example #3
Source File: test_function.py From aws-kube-codesuite with Apache License 2.0 | 6 votes |
def test_info(self): G = nx.path_graph(5) G.name = "path_graph(5)" info = nx.info(G) expected_graph_info = '\n'.join(['Name: path_graph(5)', 'Type: Graph', 'Number of nodes: 5', 'Number of edges: 4', 'Average degree: 1.6000']) assert_equal(info, expected_graph_info) info = nx.info(G, n=1) expected_node_info = '\n'.join( ['Node 1 has the following properties:', 'Degree: 2', 'Neighbors: 0 2']) assert_equal(info, expected_node_info)
Example #4
Source File: gengraph.py From GenGraph with GNU General Public License v3.0 | 6 votes |
def parse_seq_file(path_to_seq_file): seq_file_dict = input_parser(path_to_seq_file) A_seq_label_dict = {} A_input_path_dict = {} ordered_paths_list = [] anno_path_dict = {} for a_seq_file in seq_file_dict: logging.info(a_seq_file) A_seq_label_dict[a_seq_file['aln_name']] = a_seq_file['seq_name'] A_input_path_dict[a_seq_file['seq_name']] = a_seq_file['seq_path'] ordered_paths_list.append(a_seq_file['seq_path']) anno_path_dict[a_seq_file['seq_name']] = a_seq_file['annotation_path'] return A_seq_label_dict, A_input_path_dict, ordered_paths_list, anno_path_dict
Example #5
Source File: test_function.py From Carnets with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_info_digraph(self): G = nx.DiGraph(name='path_graph(5)') nx.add_path(G, [0, 1, 2, 3, 4]) info = nx.info(G) expected_graph_info = '\n'.join(['Name: path_graph(5)', 'Type: DiGraph', 'Number of nodes: 5', 'Number of edges: 4', 'Average in degree: 0.8000', 'Average out degree: 0.8000']) assert_equal(info, expected_graph_info) info = nx.info(G, n=1) expected_node_info = '\n'.join( ['Node 1 has the following properties:', 'Degree: 2', 'Neighbors: 2']) assert_equal(info, expected_node_info) assert_raises(nx.NetworkXError, nx.info, G, n=-1)
Example #6
Source File: gengraph.py From GenGraph with GNU General Public License v3.0 | 6 votes |
def realign_all_nodes(inGraph, input_dict): logging.info('Running realign_all_nodes') realign_node_list = [] iso_list = inGraph.graph['isolates'].split(',') # Load genomes into memory # Only need to realign nodes with more than one isolate in them for node, data in inGraph.nodes(data=True): # print(data) if len(data['ids'].split(',')) > 1: realign_node_list.append(node) # Realign the nodes. This is where multiprocessing will come in. for a_node in realign_node_list: inGraph = local_node_realign_new(inGraph, a_node, input_dict[1]) nx.write_graphml(inGraph, 'intermediate_split_unlinked.xml') return inGraph
Example #7
Source File: test_function.py From Carnets with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_info(self): G = nx.path_graph(5) G.name = "path_graph(5)" info = nx.info(G) expected_graph_info = '\n'.join(['Name: path_graph(5)', 'Type: Graph', 'Number of nodes: 5', 'Number of edges: 4', 'Average degree: 1.6000']) assert_equal(info, expected_graph_info) info = nx.info(G, n=1) expected_node_info = '\n'.join( ['Node 1 has the following properties:', 'Degree: 2', 'Neighbors: 0 2']) assert_equal(info, expected_node_info)
Example #8
Source File: test_function.py From qgisSpaceSyntaxToolkit with GNU General Public License v3.0 | 6 votes |
def test_info_digraph(self): G=nx.DiGraph(name='path_graph(5)') G.add_path([0,1,2,3,4]) info=nx.info(G) expected_graph_info='\n'.join(['Name: path_graph(5)', 'Type: DiGraph', 'Number of nodes: 5', 'Number of edges: 4', 'Average in degree: 0.8000', 'Average out degree: 0.8000']) assert_equal(info,expected_graph_info) info=nx.info(G,n=1) expected_node_info='\n'.join( ['Node 1 has the following properties:', 'Degree: 2', 'Neighbors: 2']) assert_equal(info,expected_node_info) assert_raises(nx.NetworkXError,nx.info,G,n=-1)
Example #9
Source File: test_function.py From qgisSpaceSyntaxToolkit with GNU General Public License v3.0 | 6 votes |
def test_info(self): G=nx.path_graph(5) info=nx.info(G) expected_graph_info='\n'.join(['Name: path_graph(5)', 'Type: Graph', 'Number of nodes: 5', 'Number of edges: 4', 'Average degree: 1.6000']) assert_equal(info,expected_graph_info) info=nx.info(G,n=1) expected_node_info='\n'.join( ['Node 1 has the following properties:', 'Degree: 2', 'Neighbors: 0 2']) assert_equal(info,expected_node_info)
Example #10
Source File: gengraph.py From GenGraph with GNU General Public License v3.0 | 5 votes |
def progressiveMauve_alignment(path_to_progressiveMauve, fasta_path_list, out_aln_name): """ A wrapper for progressiveMauve for use in GenGraph for the identification of co-linear blocks :param path_to_progressiveMauve: Absolute path to progressiveMauve executable :param fasta_path_list: List of paths to fasta files :param out_aln_name: Name for alignment file, added to mauve output :return: """ # Maybe add --skip-gapped-alignment flag? logging.info(path_to_progressiveMauve) progressiveMauve_call = [path_to_progressiveMauve, '--output=globalAlignment_' + out_aln_name, '--scratch-path-1=./mauveTemp', '--scratch-path-2=./mauveTemp'] + fasta_path_list try: return call(progressiveMauve_call, stdout=open(os.devnull, 'wb')) # Check if file was created successfully bbone_file = open('globalAlignment_' + out_aln_name + '.backbone') number_of_lines = 3 for i in range(number_of_lines): line = bbone_file.readline() print(len(line.split('\t'))) if len(line.split('\t')) <= 1: logging.error('progressiveMauve_call error: output of progressiveMauve empty') print('Error: progressiveMauve_call output appears empty.') quit() except OSError: logging.error('progressiveMauve_call error') return 'progressiveMauve_call error' # ---------------------------------------------------- Utility functions
Example #11
Source File: test_kcutsets.py From aws-kube-codesuite with Apache License 2.0 | 5 votes |
def test_non_repeated_cuts(): # The algorithm was repeating the cut {0, 1} for the giant biconnected # component of the Karate club graph. K = nx.karate_club_graph() G = max(list(nx.biconnected_component_subgraphs(K)), key=len) solution = [{32, 33}, {2, 33}, {0, 3}, {0, 1}, {29, 33}] cuts = list(nx.all_node_cuts(G)) if len(solution) != len(cuts): print(nx.info(G)) print("Solution: {}".format(solution)) print("Result: {}".format(cuts)) assert_true(len(solution) == len(cuts)) for cut in cuts: assert_true(cut in solution)
Example #12
Source File: test_kcutsets.py From Carnets with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_non_repeated_cuts(): # The algorithm was repeating the cut {0, 1} for the giant biconnected # component of the Karate club graph. K = nx.karate_club_graph() G = max(list(nx.biconnected_component_subgraphs(K)), key=len) solution = [{32, 33}, {2, 33}, {0, 3}, {0, 1}, {29, 33}] cuts = list(nx.all_node_cuts(G)) if len(solution) != len(cuts): print(nx.info(G)) print("Solution: {}".format(solution)) print("Result: {}".format(cuts)) assert_true(len(solution) == len(cuts)) for cut in cuts: assert_true(cut in solution)
Example #13
Source File: test_kcutsets.py From qgisSpaceSyntaxToolkit with GNU General Public License v3.0 | 5 votes |
def test_non_repeated_cuts(): # The algorithm was repeating the cut {0, 1} for the giant biconnected # component of the Karate club graph. K = nx.karate_club_graph() G = max(list(nx.biconnected_component_subgraphs(K)), key=len) solution = [{32, 33}, {2, 33}, {0, 3}, {0, 1}, {29, 33}] cuts = list(nx.all_node_cuts(G)) if len(solution) != len(cuts): print(nx.info(G)) print("Solution: {}".format(solution)) print("Result: {}".format(cuts)) assert_true(len(solution) == len(cuts)) for cut in cuts: assert_true(cut in solution)
Example #14
Source File: gengraph.py From GenGraph with GNU General Public License v3.0 | 5 votes |
def extract_gene(seq_locus_id, seq_isolate_origin, graph_obj, annotation_path_dict): iso_anno_obj = input_parser(annotation_path_dict[3][seq_isolate_origin]) tar_gene_anno = 'Not found' for entry in iso_anno_obj: if entry[2] == 'gene': if entry[8]['locus_tag'] == seq_locus_id: tar_gene_anno = entry if 'old_locus_tag' in entry[8].keys(): if entry[8]['old_locus_tag'] == seq_locus_id: tar_gene_anno = entry if tar_gene_anno != 'Not found': logging.info(tar_gene_anno[3], tar_gene_anno[4]) logging.info(int(tar_gene_anno[4]) - int(tar_gene_anno[3])) logging.info(tar_gene_anno[6]) out_seq = extract_original_seq_region(graph_obj, tar_gene_anno[3], tar_gene_anno[4], seq_isolate_origin) if tar_gene_anno[6] == '-': out_seq = reverse_compliment(out_seq) return out_seq else: return tar_gene_anno logging.info('in function') # ---------------------------------------------------- # Testing functions
Example #15
Source File: gengraph.py From GenGraph with GNU General Public License v3.0 | 5 votes |
def add_graph_data(graph_obj): count_dict = {} # Add start nodes for node, data in graph_obj.nodes(data=True): logging.info(node) logging.info(data) for an_isolate in data['ids'].split(','): if abs(int(data[an_isolate + '_leftend'])) == 1: graph_obj.graph[an_isolate + '_startnode'] = node if node not in count_dict.keys(): count_dict[node] = 1 else: count_dict[node] = count_dict[node] + 1 logging.info(count_dict) most_start_node = '' most_start_node_number = 0 for a_node in count_dict.keys(): if count_dict[a_node] > most_start_node_number: most_start_node = a_node most_start_node_number = count_dict[a_node] graph_obj.graph['start_node'] = most_start_node # ---------------------------------------------------- Alignment functions
Example #16
Source File: gengraph.py From GenGraph with GNU General Public License v3.0 | 5 votes |
def seq_recreate_check(graph_obj, input_dict): for isolate in input_dict[1].keys(): extracted_seq = extract_original_seq(graph_obj, isolate) original_seq_from_fasta = input_parser(input_dict[1][isolate]) count = 0 while count < len(extracted_seq): if extracted_seq[count] != original_seq_from_fasta[0]['DNA_seq'][count]: logging.warning(count) logging.warning(extracted_seq[count]) logging.warning(original_seq_from_fasta[0]['DNA_seq'][count]) logging.warning(extracted_seq[count-10:count + 10]) logging.warning(original_seq_from_fasta[0]['DNA_seq'][count-10:count + 10]) count += 1 if extracted_seq.upper() == original_seq_from_fasta[0]['DNA_seq'].upper(): logging.info('Sequence recreate pass') print('Sequence recreate pass') recreate_check_result = 'Pass' else: logging.error('Sequence recreate fail') logging.error(len(extracted_seq)) logging.error(len(original_seq_from_fasta[0]['DNA_seq'])) logging.error(extracted_seq[-10:]) logging.error(original_seq_from_fasta[0]['DNA_seq'][-10:]) logging.error(extracted_seq[:10]) logging.error(original_seq_from_fasta[0]['DNA_seq'][:10]) recreate_check_result = 'Fail'
Example #17
Source File: gengraph.py From GenGraph with GNU General Public License v3.0 | 5 votes |
def link_all_nodes(graph_obj): print('Linking nodes') logging.info('Running link_all_nodes') for isolate in graph_obj.graph['isolates'].split(','): logging.info(isolate) graph_obj = link_nodes(graph_obj, isolate) return graph_obj
Example #18
Source File: resolve.py From atap with Apache License 2.0 | 5 votes |
def info(G): """ Wrapper for nx.info with some other helpers. """ pairwise = len(list(pairwise_comparisons(G))) edge_blocked = len(list(edge_blocked_comparisons(G))) fuzz_blocked = len(list(fuzzy_blocked_comparisons(G))) output = [""] output.append("Number of Pairwise Comparisons: {}".format(pairwise)) output.append("Number of Edge Blocked Comparisons: {}".format(edge_blocked)) output.append("Number of Fuzzy Blocked Comparisons: {}".format(fuzz_blocked)) return nx.info(G) + "\n".join(output)
Example #19
Source File: metrics.py From GEM-Benchmark with BSD 3-Clause "New" or "Revised" License | 5 votes |
def computeMAP(predicted_edge_list, true_digraph, max_k=-1): """This function computers the Mean average precision. Args: predicted_edge_list (Array): Consists of predicted edge list for each node. true_digraph (object): True network graph object consists of the original nodes and edges. max_k (Int): Maximum number of edges to be considered for computing the precsion. Returns: Array: MAP values. """ true_digraph = true_digraph.to_directed() node_num = true_digraph.number_of_nodes() node_edges = [] for i in range(node_num): node_edges.append([]) for (st, ed, w) in predicted_edge_list: node_edges[st].append((st, ed, w)) node_AP = [0.0] * node_num count = 0 ###debug ### change undirected into direct when needed print(nx.info(true_digraph)) for i in range(node_num): if true_digraph.out_degree(i) == 0: continue count += 1 precision_scores, delta_factors = computePrecisionCurve(node_edges[i], true_digraph, max_k) precision_rectified = [p * d for p,d in zip(precision_scores,delta_factors)] if(sum(delta_factors) == 0): node_AP[i] = 0 else: node_AP[i] = float(sum(precision_rectified) / sum(delta_factors)) try: map_val = sum(node_AP) / count except ZeroDivisionError: map_val = 0 return map_val
Example #20
Source File: app.py From dataiku-contrib with Apache License 2.0 | 4 votes |
def draw_graph(): #get data project_key = dataiku.default_project_key() similarity = float(request.args.get('similarity')) node_source = request.args.get('node_source') node_target = request.args.get('node_target') interactions = request.args.get('interactions') dataset = request.args.get('dataset') name=project_key+'.'+dataset print name df=dataiku.Dataset(name).get_dataframe() df=df[df[interactions]>similarity] df=df[[node_source,node_target,interactions]] df.columns=['source','target','weight'] print "%d rows" % df.shape[0] G=nx.Graph() G.add_edges_from(zip(df.source,df.target)) print nx.info(G) # degree for node, val in dict(nx.degree(G)).iteritems(): G.node[node]['degree'] = val # pagerank for node, val in dict(nx.pagerank(G)).iteritems(): G.node[node]['pagerank'] = val # connected components components = sorted(nx.connected_components(G), key = len, reverse=True) for component,nodes in enumerate(components): for node in nodes: G.node[node]['cc'] = component # community partition = best_partition(G) for node, cluster in dict(partition).iteritems(): G.node[node]['community'] = cluster # convert to JSON data = json_graph.node_link_data(G) #fix for networkx>=2.0 change of API if nx.__version__ > 2: dict_name_id = {data["nodes"][i]["id"] : i for i in xrange(len(data["nodes"]))} for link in data["links"]: link["source"] = dict_name_id[link["source"]] link["target"] = dict_name_id[link["target"]] return json.dumps({"status" : "ok", "graph": data})
Example #21
Source File: gengraph.py From GenGraph with GNU General Public License v3.0 | 4 votes |
def generate_graph_report(in_graph, out_file_name): nx_summary = nx.info(in_graph) report_file = open(out_file_name + '_report.txt', 'w') for line in nx_summary: report_file.write(line) report_file.write("\nIsolates: " + str(in_graph.graph['isolates'])) # Length of sequence in the graph len_dict = {} for an_iso in in_graph.graph['isolates'].split(','): len_dict[an_iso] = 0 comp_len = 0 for n, d in in_graph.nodes(data=True): if 'sequence' in d.keys(): comp_len = comp_len + len(d['sequence']) for node_iso in d['ids'].split(','): len_dict[node_iso] = len_dict[node_iso] + len(d['sequence']) #print comp_len report_file.write("\nCompressed sequence length: " + str(comp_len)) #print len_dict for iso_name in len_dict.keys(): report_file.write('\n' + iso_name + " length: " + str(len_dict[iso_name])) #print "Density: " + str(nx.density(in_graph)) report_file.write("\nDensity: " + str(nx.density(in_graph))) return nx_summary # ---------------------------------------------------- # read alignment # Functions to align reads to the GenGraph genome graph. # ------------------ Traditional approaches. # Break down into k-mers to either create a hash table, or to create de-bruijn graphs.
Example #22
Source File: gengraph.py From GenGraph with GNU General Public License v3.0 | 4 votes |
def add_sequences_to_graph_fastaObj(graph_obj, imported_fasta_object): logging.info('Adding sequences') seqObj = reshape_fastaObj(imported_fasta_object) for node, data in graph_obj.nodes(data=True): seq_source = data['ids'].split(',')[0] is_reversed = False is_comp = False if len(seq_source) < 1: logging.error('No ids current node') logging.error(node) else: ref_seq = seqObj[seq_source] # Check orientation if int(data[seq_source + '_leftend']) < 0: is_reversed = True seq_start = abs(int(data[seq_source + '_leftend'])) seq_end = abs(int(data[seq_source + '_rightend'])) if seq_start > seq_end: new_seq_start = seq_end new_seq_end = seq_start seq_end = new_seq_end seq_start = new_seq_start if is_reversed is True: seq_start = seq_start - 1 if is_reversed: seq_start = seq_start seq_end = seq_end + 1 logging.info(str(seq_start) + ' ' + str(seq_end)) logging.info(ref_seq[seq_start:seq_end].upper()) node_seq = ref_seq[seq_start:seq_end].upper() graph_obj.nodes[node]['sequence'] = node_seq return graph_obj
Example #23
Source File: convert_matrix.py From Carnets with BSD 3-Clause "New" or "Revised" License | 4 votes |
def from_pandas_adjacency(df, create_using=None): r"""Returns a graph from Pandas DataFrame. The Pandas DataFrame is interpreted as an adjacency matrix for the graph. Parameters ---------- df : Pandas DataFrame An adjacency matrix representation of a graph create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. Notes ----- If the numpy matrix has a single data type for each matrix entry it will be converted to an appropriate Python data type. If the numpy matrix has a user-specified compound data type the names of the data fields will be used as attribute keys in the resulting NetworkX graph. See Also -------- to_pandas_adjacency Examples -------- Simple integer weights on edges: >>> import pandas as pd >>> pd.options.display.max_columns = 20 >>> df = pd.DataFrame([[1, 1], [2, 1]]) >>> df 0 1 0 1 1 1 2 1 >>> G = nx.from_pandas_adjacency(df) >>> G.name = 'Graph from pandas adjacency matrix' >>> print(nx.info(G)) Name: Graph from pandas adjacency matrix Type: Graph Number of nodes: 2 Number of edges: 3 Average degree: 3.0000 """ try: df = df[df.index] except: msg = "%s not in columns" missing = list(set(df.index).difference(set(df.columns))) raise nx.NetworkXError("Columns must match Indices.", msg % missing) A = df.values G = from_numpy_matrix(A, create_using=create_using) nx.relabel.relabel_nodes(G, dict(enumerate(df.columns)), copy=False) return G
Example #24
Source File: gengraph.py From GenGraph with GNU General Public License v3.0 | 4 votes |
def node_check(a_graph): from operator import itemgetter logging.info('checking nodes') doespass = True iso_list = a_graph.graph['isolates'].split(',') for isolate in iso_list: isolate_node_list = [] for node, data in a_graph.nodes(data=True): if isolate in data['ids'].split(','): isolate_node_list.append(node) #print isolate_node_list presorted_list = [] for a_node in isolate_node_list: presorted_list.append((a_node, abs(int(a_graph.nodes[a_node][isolate + '_leftend'])), abs(int(a_graph.nodes[a_node][isolate + '_rightend'])))) if abs(int(a_graph.nodes[a_node][isolate + '_leftend'])) > abs(int(a_graph.nodes[a_node][isolate + '_rightend'])): logging.warning('problem node', a_node) logging.warning(a_graph.nodes[a_node][isolate + '_leftend']) sorted_list = sorted(presorted_list,key=itemgetter(1)) count = 0 while count < len(sorted_list) - 1: if sorted_list[count][2] != sorted_list[count + 1][1] - 1: logging.error('error 1: gaps in graph') logging.error(isolate) logging.error(sorted_list[count]) logging.error(sorted_list[count + 1]) logging.error('Gap length:', sorted_list[count + 1][1] - sorted_list[count][2] - 1) doespass = False if sorted_list[count][2] >= sorted_list[count + 1][1]: logging.error('error 2') logging.error(isolate) logging.error(sorted_list[count]) logging.error(sorted_list[count + 1]) doespass = False if sorted_list[count][1] == sorted_list[count - 1][2] - 1: logging.error('error 3: last node end close to node start. If this is not a SNP, there is an error') logging.error(isolate) logging.error(sorted_list[count]) logging.error(sorted_list[count - 1]) doespass = False if sorted_list[count][1] > sorted_list[count][2]: logging.error('error 4: start greater than stop') logging.error(isolate) logging.error(sorted_list[count]) logging.error(a_graph.nodes[sorted_list[count][0]]) doespass = False count+=1 return doespass
Example #25
Source File: gengraph.py From GenGraph with GNU General Public License v3.0 | 4 votes |
def add_missing_nodes(a_graph, input_dict): from operator import itemgetter logging.info(input_dict[1].keys()) iso_list = a_graph.graph['isolates'].split(',') for isolate in iso_list: isolate_Seq = input_parser(input_dict[1][isolate]) isolate_Seq = isolate_Seq[0]['DNA_seq'] isolate_node_list = [] for node, data in a_graph.nodes(data=True): if isolate in data['ids'].split(','): isolate_node_list.append(node) presorted_list = [] for a_node in isolate_node_list: presorted_list.append((a_node, abs(a_graph.nodes[a_node][isolate + '_leftend']), abs(a_graph.nodes[a_node][isolate + '_rightend']))) if abs(a_graph.nodes[a_node][isolate + '_leftend']) > abs(a_graph.nodes[a_node][isolate + '_rightend']): logging.warning('problem node' + str(a_node)) logging.warning(a_graph.nodes[a_node][isolate + '_leftend']) sorted_list = sorted(presorted_list, key=itemgetter(1)) count = 0 while count < len(sorted_list) - 1: if sorted_list[count][2] != sorted_list[count + 1][1] - 1: new_node_dict = {isolate + '_leftend':sorted_list[count][2] + 1, isolate + '_rightend':sorted_list[count + 1][1] - 1, 'ids':isolate} new_node_dict['sequence'] = isolate_Seq[sorted_list[count][2]:sorted_list[count + 1][1] - 1] node_ID = isolate + "_" + str(count) att_node_dict = {node_ID: new_node_dict} a_graph.add_node(node_ID) nx.set_node_attributes(a_graph, att_node_dict) count += 1
Example #26
Source File: convert_matrix.py From aws-kube-codesuite with Apache License 2.0 | 4 votes |
def from_pandas_adjacency(df, create_using=None): r"""Return a graph from Pandas DataFrame. The Pandas DataFrame is interpreted as an adjacency matrix for the graph. Parameters ---------- df : Pandas DataFrame An adjacency matrix representation of a graph create_using : NetworkX graph Use specified graph for result. The default is Graph() Notes ----- If the numpy matrix has a single data type for each matrix entry it will be converted to an appropriate Python data type. If the numpy matrix has a user-specified compound data type the names of the data fields will be used as attribute keys in the resulting NetworkX graph. See Also -------- to_pandas_adjacency Examples -------- Simple integer weights on edges: >>> import pandas as pd >>> df = pd.DataFrame([[1, 1], [2, 1]]) >>> df 0 1 0 1 1 1 2 1 >>> G = nx.from_pandas_adjacency(df) >>> G.name = 'Graph from pandas adjacency matrix' >>> print(nx.info(G)) Name: Graph from pandas adjacency matrix Type: Graph Number of nodes: 2 Number of edges: 3 Average degree: 3.0000 """ A = df.values G = from_numpy_matrix(A, create_using) try: df = df[df.index] except: raise nx.NetworkXError("Columns must match Indices.", "%s not in columns" % list(set(df.index).difference(set(df.columns)))) nx.relabel.relabel_nodes(G, dict(enumerate(df.columns)), copy=False) return G
Example #27
Source File: graphityOut.py From r2graphity with MIT License | 4 votes |
def plotSeGraph(graphity): pydotMe = nx.drawing.nx_pydot.to_pydot(graphity) for node in pydotMe.get_nodes(): finalString = '' if node.get('calls') != '[]' or node.get('strings') != '[]': # TODO THE single ugliest piece of code I ever wrote. Now I'll promise to fix this in the future, priority -1... duh finalList = [] for item in node.get('calls').split('[\''): if item.startswith('0x'): stuff = item.split('\'') finalList.append(str(stuff[0]) + ": [C] " + str(stuff[2])) try: for otherItem in node.get('strings').split('[\''): if otherItem.startswith('0x'): stuff = otherItem.split('\'') finalList.append(str(stuff[0]) + ": [S] " + str(stuff[2])) except: print "Trouble with string " + str(stuff) finalList.sort() finalString = '\n'.join(finalList) if node.get('type') == 'Export': label = "Export " + node.get('alias') label = label + "\n" + finalString node.set_fillcolor('skyblue') node.set_style('filled,setlinewidth(3.0)') node.set_label(label) elif node.get('type') == 'Callback': label = "Callback " + "\n" + finalString node.set_fillcolor('darkolivegreen1') node.set_style('filled,setlinewidth(3.0)') node.set_label(label) elif finalString != '': # TODO add address of node as title # finalString = str(node) + '\n' + finalString node.set_fillcolor('lightpink1') node.set_style('filled,setlinewidth(3.0)') node.set_label(finalString) # TODO add info about sample to graph graphname = os.path.basename(sys.argv[1]) + ".png" try: # TODO pydotplus throws an error sometimes (Error: /tmp/tmp6XgKth: syntax error in line 92 near '[') look into pdp code to see why pydotMe.write_png(os.path.join(os.path.abspath(os.path.dirname(__file__)), graphname)) except Exception as e: print "ERROR drawing graph" print str(e)