def toplogical_sort(self, g: AssemblyGraph, source: Node):
n = networkx.number_of_nodes(g)
visited = set()
order = n-1
ordering = deque()
def _toposort_recurse(source):
nonlocal g, order, visited
visited.add(source)
for neighbour in g.neighbors_iter(source):
if neighbour not in visited:
_toposort_recurse(neighbour)
ordering.appendleft((source, order))
order -= 1
_toposort_recurse(source)
return ordering
python类number_of_nodes()的实例源码
def skeleton_to_swc(skeleton, offset, resolution):
import networkx as nx
g = nx.Graph()
g.add_nodes_from(skeleton.nodes())
g.add_edges_from((e.u, e.v) for e in skeleton.edges())
# Find a directed tree for mapping to a skeleton.
if nx.number_of_nodes(g) > 1:
# This discards cyclic edges in the graph.
t = nx.bfs_tree(nx.minimum_spanning_tree(g), g.nodes()[0])
else:
t = nx.DiGraph()
t.add_nodes_from(g)
# Copy node attributes
for n in t.nodes_iter():
loc = skeleton.locations(n)
# skeletopyze is z, y, x (as it should be).
loc = np.array(loc)
loc = np.multiply(loc + offset, resolution)
t.node[n].update({'x': loc[0],
'y': loc[1],
'z': loc[2],
'radius': skeleton.diameters(n) / 2.0})
# Set parent node ID
for n, nbrs in t.adjacency_iter():
for nbr in nbrs:
t.node[nbr]['parent_id'] = n
if 'radius' not in t.node[nbr]:
t.node[nbr]['radius'] = -1
return [[
node_id,
0,
n['x'], n['y'], n['z'],
n['radius'],
n.get('parent_id', -1)] for node_id, n in t.nodes(data=True)]
def create_graph(self):
path = ""
data = pd.read_csv(path + '../../worldoss:ocean/Web_Crawler/generated_repo_topic_data.csv', error_bad_lines=False, header=None,
sep=",", delimiter='\n') # pandas ?????? ???? SNA ?? csv ?? ????
# Creating node list
node = []
for i in data.values:
for j in i[0].split(',')[1:]:
node.append(j)
node = list(set(node))
# Creating edge list
self.edges = []
for i in data.values:
l = i[0].split(',')[1:]
for j in range(len(l)):
for k in range(j + 1, len(l)):
self.edges.append((l[j], l[k]))
self.G = nx.Graph()
self.G.add_nodes_from(node)
self.G.add_edges_from(self.edges)
print nx.number_of_nodes(self.G)
print nx.number_of_edges(self.G)
def centrality(self):
with open('community_test.csv','rU') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
cluster = row[1:]
edges = []
print cluster
for i in self.edges:
for j in cluster:
if i[0] == j:
for k in cluster:
if i[1] == k:
edges.append(i)
if i[1] == j:
for k in cluster:
if i[0] == k:
edges.append(i)
C = nx.Graph()
C.add_nodes_from(cluster)
C.add_edges_from(edges)
node_count=nx.number_of_nodes(C)
edge_count=nx.number_of_edges(C)
print node_count, edge_count
cent = self.degree_centrality_custom(C)
print cent
with open('centrality_test.csv','a') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['Community '+row[0],'Node: '+str(node_count),'Edge: '+str(edge_count)])
for i,j in cent.items():
writer.writerow([i,j])
print 'Finished Community '+row[0]
def Attributes_of_Graph(G):
print "*Statistic attributes of graphs:"
print "N", nx.number_of_nodes(G)
print "M", nx.number_of_edges(G)
print "C", nx.average_clustering(G)
#print "<d>", nx.average_shortest_path_length(G)
print "r", nx.degree_assortativity_coefficient(G)
degree_list = list(G.degree_iter())
max_degree = 0
min_degree = 0
avg_degree_1 = 0.0
avg_degree_2 = 0.0
for node in degree_list:
avg_degree_1 = avg_degree_1 + node[1]
avg_degree_2 = avg_degree_2 + node[1]*node[1]
if node[1] > max_degree:
max_degree = node[1]
if node[1] < min_degree:
min_degree = node[1]
#end for
avg_degree = avg_degree_1/len(degree_list)
avg_degree_square = (avg_degree_2/len(degree_list)) / (avg_degree*avg_degree)
print "<k>", avg_degree
print "k_max", max_degree
print "H", avg_degree_square
print "DH", float(max_degree-min_degree)/G.number_of_nodes()
#************************************************************************
def Attributes_of_Graph(G):
print "*Statistic attributes of graphs:"
print "N", nx.number_of_nodes(G)
print "M", nx.number_of_edges(G)
print "C", nx.average_clustering(G)
#print "<d>", nx.average_shortest_path_length(G)
print "r", nx.degree_assortativity_coefficient(G)
degree_list = list(G.degree_iter())
max_degree = 0
min_degree = 0
avg_degree_1 = 0.0
avg_degree_2 = 0.0
for node in degree_list:
avg_degree_1 = avg_degree_1 + node[1]
avg_degree_2 = avg_degree_2 + node[1]*node[1]
if node[1] > max_degree:
max_degree = node[1]
if node[1] < min_degree:
min_degree = node[1]
#end for
avg_degree = avg_degree_1/len(degree_list)
avg_degree_square = (avg_degree_2/len(degree_list)) / (avg_degree*avg_degree)
print "<k>", avg_degree
print "k_max", max_degree
print "H (degree heterogeneity)", avg_degree_square
print "S (average span of degree distribution)", float(max_degree-min_degree)/G.number_of_nodes()
#*******************************************************************
def Degree_distribution(G):
Nodes = G.nodes()
Degree_List = []
Degree_Dic = {}
for i in Nodes:
Degree_List.append(G.degree(i))
Degree_List = sorted(Degree_List, reverse = False)
Flag = Degree_List[0]
Count = 1
for i in Degree_List[1:]:
if i !=Flag:
Degree_Dic[Flag] = Count
Count = 1
Flag = i
else:
Count = Count + 1
#end for
#print Degree_Dic
n = G.number_of_nodes()
plt.figure(1)
ax1 = plt.subplot(111)
plt.sca(ax1)
#x = list([(i+1) for i in range(0,len(Degree_List))])
x = sorted(Degree_Dic.keys(), reverse = False)
y = []
for i in x:
y.append(float(Degree_Dic[i])/n)
#end for
plt.plot(x, y, "rs-")
plt.ylabel("Probability")
plt.xlabel("Degree K")
plt.title("Degree distribution of networks")
plt.show()
#************************************************************************
def main():
edges = [] # ???????
domain_name = 'taobao.com'
domain_pkts = get_data(domain_name)
for i in domain_pkts[0]['details']:
for v in i['answers']:
edges.append((v['domain_name'],v['dm_data']))
plt.figure(1, figsize=(10, 8))
G = nx.Graph()
G.add_edges_from(edges)
pos = graphviz_layout(G, prog="fdp") #neato fdp
C = nx.connected_component_subgraphs(G) # ?????????????
for g in C:
c = [random.random()] * nx.number_of_nodes(g)
nx.draw(g,
pos,
node_size=90,
node_color=c,
vmin=0.0,
vmax=1.0,
with_labels=False
)
plt.savefig('./graph/'+domain_name+"_relation.png", dpi=75)
plt.show()
def merge_node(path,new_path):
g = nx.read_gml(path)
nodes = [n for n,d in g.out_degree().items() if d==1]
for node in nodes:
if not node in g.nodes():
continue
if g.in_degree(node) != 1:
continue
p = g.successors(node)
#print p
#dict = g.in_degree(p)
#print dict[p]
#print g.in_degree(p)[p[0]]
if g.in_degree(p)[p[0]] == 1:
text1 = g.node[node]["text"]
text1 = remove_last_jump(text1)
text2 = g.node[p[0]]["text"]
#print text1
#print text2
new_text = text1 + ',' + text2
#print new_text
nns = g.successors(p[0])
g.node[node]["text"] = new_text
for n in nns:
g.add_edge(node, n)
g.remove_node(p[0])
nx.write_gml(g, new_path)
return nx.number_of_nodes(g)
def num_nodes(self):
return nx.number_of_nodes(self)
def get_paths(self):
# If there's only one node
if nx.number_of_nodes(self.graph) == 1:
self.shortest_path = self.longest_path = [self.function_start]
return [[self.function_start]]
# If there aren't any obvious exit blocks
if len(self.exit_blocks) == 0:
return
# We need to go through all the possible paths from
# function start to each of exit blocks
all_paths = []
longest_path_len = 0
shortest_path_len = None
for ret in self.exit_blocks:
paths = (nx.all_simple_paths(self.graph, source = self.function_start, target = ret))
for path in paths:
if len(path) > longest_path_len:
longest_path_len = len(path)
self.longest_path = path
if not shortest_path_len or len(path) < shortest_path_len:
shortest_path_len = len(path)
self.shortest_path = path
all_paths.extend(paths)
return all_paths
def motif_m7(g):
n = nx.number_of_nodes(g)
W = np.zeros((n, n), dtype=float)
W = np.mat(W)
# nei = set(nx.all_neighbors(g, 2))
# print('all_neighbors: ', nei)
for u in range(1, n+1):
u_neighbors = set(nx.all_neighbors(g, u))
# sorted(u_neighbors)
# print(u, u_neighbors)
for v in u_neighbors:
if v < u:
continue
v_neighbors = set(nx.all_neighbors(g, v))
# sorted(v_neighbors)
for w in v_neighbors:
if w < u or w < v:
continue
if g.has_edge(u, v) and g.has_edge(v, u) and g.has_edge(u, w) and g.has_edge(v, w):
W[u - 1, w - 1] = W[u - 1, w - 1] + 1
W[w - 1, u - 1] = W[w - 1, u - 1] + 1
W[u - 1, v - 1] = W[u - 1, v - 1] + 1
W[v - 1, u - 1] = W[v - 1, u - 1] + 1
W[v - 1, w - 1] = W[v - 1, w - 1] + 1
W[w - 1, v - 1] = W[w - 1, v - 1] + 1
continue
if g.has_edge(u, w) and g.has_edge(w, u) and g.has_edge(u, v) and g.has_edge(w, v):
W[u - 1, w - 1] = W[u - 1, w - 1] + 1
W[w - 1, u - 1] = W[w - 1, u - 1] + 1
W[u - 1, v - 1] = W[u - 1, v - 1] + 1
W[v - 1, u - 1] = W[v - 1, u - 1] + 1
W[v - 1, w - 1] = W[v - 1, w - 1] + 1
W[w - 1, v - 1] = W[w - 1, v - 1] + 1
continue
if g.has_edge(v, w) and g.has_edge(w, v) and g.has_edge(w, u) and g.has_edge(v, u):
W[u - 1, w - 1] = W[u - 1, w - 1] + 1
W[w - 1, u - 1] = W[w - 1, u - 1] + 1
W[u - 1, v - 1] = W[u - 1, v - 1] + 1
W[v - 1, u - 1] = W[v - 1, u - 1] + 1
W[v - 1, w - 1] = W[v - 1, w - 1] + 1
W[w - 1, v - 1] = W[w - 1, v - 1] + 1
continue
# print(W)
# print(type(W))
return W
def count_motif(g):
count_number = list([0])*13
n = nx.number_of_nodes(g)
node_set = set()
for u in range(1, n+1):
if not g.has_node(u):
continue
for v in range(u+1, n+1):
if not g.has_node(v):
continue
u_neighbors = list(set(nx.all_neighbors(g, u)))
v_neighbors = list(set(nx.all_neighbors(g, v)))
for i in u_neighbors:
if i > v and i > u:
node_set.add(i)
for i in v_neighbors:
if i > v and i > u:
node_set.add(i)
if len(node_set) <= 0:
continue
print(u, v, node_set)
for w in node_set:
if count_m1(g, u, v, w):
count_number[0] += 1
elif count_m2(g, u, v, w):
count_number[1] += 1
elif count_m3(g, u, v, w):
count_number[2] += 1
elif count_m4(g, u, v, w):
count_number[3] += 1
elif count_m5(g, u, v, w):
count_number[4] += 1
elif count_m6(g, u, v, w):
count_number[5] += 1
elif count_m7(g, u, v, w):
count_number[6] += 1
elif count_m8(g, u, v, w):
count_number[7] += 1
elif count_m9(g, u, v, w):
count_number[8] += 1
elif count_m10(g, u, v, w):
count_number[9] += 1
elif count_m11(g, u, v, w):
count_number[10] += 1
elif count_m12(g, u, v, w):
count_number[11] += 1
elif count_m13(g, u, v, w):
count_number[12] += 1
node_set.clear()
print(count_number)
def bfs_eff_diam(G, NTestNodes, P):
EffDiam = -1
FullDiam = -1
AvgSPL = -1
DistToCntH = {}
NodeIdV = nx.nodes(G)
random.shuffle(NodeIdV)
for tries in range(0, min(NTestNodes, nx.number_of_nodes(G))):
NId = NodeIdV[tries]
b = nx.bfs_successors(G, NId)
for l, h in hops(b, NId):
if h is 0: continue
if not l + 1 in DistToCntH:
DistToCntH[l + 1] = h
else:
DistToCntH[l + 1] += h
DistNbrsPdfV = {}
SumPathL = 0.0
PathCnt = 0.0
for i in DistToCntH.keys():
DistNbrsPdfV[i] = DistToCntH[i]
SumPathL += i * DistToCntH[i]
PathCnt += DistToCntH[i]
oDistNbrsPdfV = collections.OrderedDict(sorted(DistNbrsPdfV.items()))
CdfV = oDistNbrsPdfV
for i in range(1, len(CdfV)):
if not i + 1 in CdfV:
CdfV[i + 1] = 0
CdfV[i + 1] = CdfV[i] + CdfV[i + 1]
EffPairs = P * CdfV[next(reversed(CdfV))]
for ValN in CdfV.keys():
if CdfV[ValN] > EffPairs: break
if ValN >= len(CdfV): return next(reversed(CdfV))
if ValN is 0: return 1
# interpolate
DeltaNbrs = CdfV[ValN] - CdfV[ValN - 1];
if DeltaNbrs is 0: return ValN;
return ValN - 1 + (EffPairs - CdfV[ValN - 1]) / DeltaNbrs