def test_characteristic_path_length():
"""
Pandit, Arka, and John C. Crittenden. "Index of network resilience
(INR) for urban water distribution systems." Nature (2012).
"""
raise SkipTest
inp_file = join(datadir,'Anytown.inp')
# Create a water network model for results object
wn = wntr.network.WaterNetworkModel(inp_file)
G = wn.get_graph_deep_copy()
udG = G.to_undirected()
CPL = nx.average_shortest_path_length(udG)
print('CPL = ',CPL)
print('expected CPL = ',1.24)
error = abs(1.24-CPL)
assert_less(error, 0.01)
python类average_shortest_path_length()的实例源码
def Attributes_of_Graph(G):
print "*Statistic attributes of graphs:"
print "N", nx.number_of_nodes(G)
print "M", nx.number_of_edges(G)
print "C", nx.average_clustering(G)
#print "<d>", nx.average_shortest_path_length(G)
print "r", nx.degree_assortativity_coefficient(G)
degree_list = list(G.degree_iter())
max_degree = 0
min_degree = 0
avg_degree_1 = 0.0
avg_degree_2 = 0.0
for node in degree_list:
avg_degree_1 = avg_degree_1 + node[1]
avg_degree_2 = avg_degree_2 + node[1]*node[1]
if node[1] > max_degree:
max_degree = node[1]
if node[1] < min_degree:
min_degree = node[1]
#end for
avg_degree = avg_degree_1/len(degree_list)
avg_degree_square = (avg_degree_2/len(degree_list)) / (avg_degree*avg_degree)
print "<k>", avg_degree
print "k_max", max_degree
print "H", avg_degree_square
print "DH", float(max_degree-min_degree)/G.number_of_nodes()
#************************************************************************
def Attributes_of_Graph(G):
print "*Statistic attributes of graphs:"
print "N", nx.number_of_nodes(G)
print "M", nx.number_of_edges(G)
print "C", nx.average_clustering(G)
#print "<d>", nx.average_shortest_path_length(G)
print "r", nx.degree_assortativity_coefficient(G)
degree_list = list(G.degree_iter())
max_degree = 0
min_degree = 0
avg_degree_1 = 0.0
avg_degree_2 = 0.0
for node in degree_list:
avg_degree_1 = avg_degree_1 + node[1]
avg_degree_2 = avg_degree_2 + node[1]*node[1]
if node[1] > max_degree:
max_degree = node[1]
if node[1] < min_degree:
min_degree = node[1]
#end for
avg_degree = avg_degree_1/len(degree_list)
avg_degree_square = (avg_degree_2/len(degree_list)) / (avg_degree*avg_degree)
print "<k>", avg_degree
print "k_max", max_degree
print "H (degree heterogeneity)", avg_degree_square
print "S (average span of degree distribution)", float(max_degree-min_degree)/G.number_of_nodes()
#*******************************************************************
def statistics(self):
"""Return some topological information about the experiment"""
stat = {}
stat["net diameter"] = nx.diameter(self.network)
stat["net radius"] = nx.radius(self.network)
stat["net asp"] = nx.average_shortest_path_length(self.network)
stat["input asp"] = net.inputASL(self.network, self.inputc)
for m in self.measures.values():
distr = net.distances_to_roi(self.network, self.inputc,m.roi)
stat["stim to roi distances, mean",m.name] = np.mean(distr)
stat["stim to roi distances, var",m.name] = np.var(distr)
centrs = nx.closeness_centrality(self.network)
stat["roi centralities",m.name] = [centrs[tuple(node)]
for node in np.transpose(m.roi.nonzero())]
return stat
def main(filename, type, constructed_graph = -1):
# 1. original graph
original_graph_path = os.path.join("data",filename,"")
original_graph = generate_graph(original_graph_path,filename,-1)
plt.figure("original graph degree distribution")
draw_degree(original_graph)
print('original edge number: ',len(original_graph.edges()))
# 2. reconstruct graph
if constructed_graph == -1:
reconstruct_graph_path = os.path.join("reconstruction", filename, type,"")
reconstruct_graph_adj = pickle.load(open(glob.glob(reconstruct_graph_path+"*.adj")[0],'rb'))
else:
reconstruct_graph_adj = constructed_graph
reconstruct_graph = adj2Graph(reconstruct_graph_adj, edgesNumber = len(original_graph.edges()))
print('edge number: ', len(reconstruct_graph.edges()))
plt.figure("reconstruct graph degree distribution")
draw_degree(reconstruct_graph)
print("Clustering: ",nx.average_clustering(original_graph), ' ', nx.average_clustering(reconstruct_graph))
# print("Diameter: ", nx.average_shortest_path_length(original_graph), ' ', nx.average_shortest_path_length(reconstruct_graph))
# print("degree centrality: ", nx.degree_centrality(original_graph), ' ', nx.degree_centrality(reconstruct_graph))
#print("closeness centrality: ", nx.closeness_centrality(original_graph), ' ', nx.closeness_centrality(reconstruct_graph))
plt.show()
def run(self):
ip_addresses = ['192.168.1.%s' % x for x in range(1, self._number_clients)]
ports = [x for x in range(1, 2)]
clients = []
progress = 0
for ip_addr in ip_addresses:
print_progress(progress, self._number_clients, suffix="Running simulation")
for port in ports:
progress += 1
client = Client(ip_addr, port, clients[0] if len(clients) > 0 else None,
max_chache_size=self._number_connections_per_client)
clients.append(client)
connection = Connection(client, clients[0])
connection.initiate()
bootstrapper_connections = clients[0].get_connections()
for conn in bootstrapper_connections:
connection = Connection(client, conn.second_client)
connection.initiate()
graph = networkx.nx.Graph()
for client in clients:
logging.error(client.get_ident())
logging.error(client.get_connection_idents())
for node in client.get_connections():
graph.add_edge(node.first_client.get_ident(), node.second_client.get_ident())
networkx.draw(graph, with_labels=False)
plt.savefig("path_graph.pdf")
print("Network is connected: %s" % networkx.is_connected(graph))
print("Average shortest path length: %s" % networkx.average_shortest_path_length(graph))
print("Average bipartite clustering coefficent %s" % networkx.average_clustering(graph))
print("Bipartite clustering coefficent %s" % networkx.clustering(graph))
print("degree_assortativity_coefficient %s" % networkx.degree_assortativity_coefficient(graph))
screenplay_network_viz.py 文件源码
项目:sceneTransitionNetMovieClassification
作者: daltonsi
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def graph_info(g):
result = {}
components = list(nx.strongly_connected_component_subgraphs(g))
in_degrees = g.in_degree()
out_degrees = g.out_degree()
highest_in_degree_node = sorted(in_degrees, key = lambda x: in_degrees[x], reverse = True)[0]
highest_out_degree_node = sorted(out_degrees, key = lambda x: out_degrees[x], reverse = True)[0]
result['highest in_degree node'] = highest_in_degree_node
result['highest out_degree_node'] = highest_out_degree_node
result['numnber of components'] = len(components)
result['number of nodes'] = g.number_of_nodes()
result['number of edges'] = g.number_of_edges()
#Degree centrality
in_degree_centrality = nx.in_degree_centrality(g)
out_degree_centrality = nx.out_degree_centrality(g)
result['sorted in_degree centrality'] = sorted([(el,in_degree_centrality[el]) for el in g.nodes()], key = lambda x: x[1], reverse = True)
result['sorted out_degree centrality'] = sorted([(el,out_degree_centrality[el]) for el in g.nodes()], key = lambda x: x[1], reverse = True)
result['closeness_centrality'] = sorted([(el,nx.closeness_centrality(g)[el]) for el in nx.closeness_centrality(g)], key = lambda x: x[1], reverse = True)
result['highest in_degree node closeness'] = nx.closeness_centrality(g)[highest_in_degree_node]
result['highest out_degree node closeness'] = nx.closeness_centrality(g)[highest_out_degree_node]
result['betweenness centrality'] = sorted([(el,nx.betweenness_centrality(g)[el]) for el in nx.betweenness_centrality(g)], key = lambda x: x[1], reverse = True)
result['highest in_degree node betweenness'] = nx.betweenness_centrality(g)[highest_in_degree_node]
result['highest in_degree node betweenness'] = nx.betweenness_centrality(g)[highest_out_degree_node]
largest_component = sorted (components, key = lambda x: x.number_of_nodes(), reverse = True)[0]
result['largest strongly component percent'] = largest_component.number_of_nodes()/float(g.number_of_nodes())
result['largest strongly component diameter'] = nx.diameter(largest_component)
result['largest strongly component average path length'] = nx.average_shortest_path_length(largest_component)
result['average_degree (undireceted)'] = sum(g.degree().values())/float(g.number_of_nodes())
result['avg_cluster_coefficient (transitivity)'] = nx.transitivity(g)
return result
def structure_dependent_index(G, ebunch=None):
if ebunch is None:
ebunch = nx.non_edges(G)
#C = nx.average_clustering(G)
#d = nx.average_shortest_path_length(G)
path_range = max(2, math.ceil(nx.average_shortest_path_length(G)))
#print path_range
def predict(u, v):
#NeighborSet = nx.all_neighbors(G, u)
#len( sorted(nx.common_neighbors(G, u, v) ))
SD_Index = {}
#Generate all simple paths in the graph G from source to target, length <= cutoff .
paths = list( nx.all_simple_paths(G, source=u, target=v, cutoff = path_range))
print paths
for path in paths:
if SD_Index.has_key( len(path) ):
SD_Index[len(path)] = SD_Index[len(path)] + 1.0
else:
SD_Index[len(path)] = 1.0
#end for
print SD_Index
#Sum up the num of paths with different length
Coefficient = 0.6
SD_Value = 0.0
key_Sequence = list(sorted(SD_Index.keys()))
for key in key_Sequence:
if key != 2:
SD_Value = SD_Value + math.pow(Coefficient, key-2.0) * SD_Index[key]
#end for
return SD_Value #Coefficient = 0.6
Rank_List = []
for u, v in ebunch:
Rank_List.append((u, v, predict(u, v)))
return Rank_List #((u, v, predict(u, v)) for u, v in ebunch)
##======================================================================##
def Prediction_LinkScores_Ratio(G, Predictor, Proportion, Toleration, Predict_Gap):
print "Prediction_LinkScores_Ratio!"
Rank_List_Set = {}
OK_Value = float(G.number_of_edges())/Proportion
if nx.is_connected(G) == True:
Edge_Set = G.edges(data='True')
Total = 0
Error = 0
Rank_List_Set[0] = [Link_Predictors.Wighted_Link_Prediction(Predictor, G), nx.average_clustering(G), nx.average_shortest_path_length(G) ] ##Running time !!!!!
'''
while 1:
#print i,len(Edge_Set),
Tep_Edge = []
Del = random.randint(0, len(Edge_Set)-1)
Tep_Edge.append(Edge_Set[Del])
#print "random range:", len(Edge_Set)-1
#print Del,
#Prediction with different training set
G.remove_edge(Edge_Set[Del][0], Edge_Set[Del][1])
if nx.is_connected(G) != True:
G.add_edges_from(Tep_Edge)
Error = Error + 1
#print "Error:", Error
else:
#print Edge_Set[Del],
Error = 0
Total = Total + 1
#print "Total:", Total
if Total%Predict_Gap == 0:
V1 = Link_Predictors.Wighted_Link_Prediction(Predictor, G)
V2 = nx.average_clustering(G)
V3 = nx.average_shortest_path_length(G)
#V4 = Performance_Evaluation_AUC(Predictor, G, Probe_Set, Non_existing_links)
Rank_List_Set[Total] = [V1,V2,V3]
Edge_Set = G.edges(data='True')
#end if
if Total > OK_Value or Error == Toleration:
#print "complete with Total, Error:", Total, Error
return Rank_List_Set
#end while
'''
return Rank_List_Set
#end if
#return Rank_List_Set
##==========================================================================================
#Native_Prediction_Experiment(G, 'WSD', Probe_Set, Top_L, 3) #Top_K, Deleted_Ratio