def Prediction_Experiment(G, Predictor, Probe_Set, Top_L, Deleted_Ratio):
print "Prediction_Experiment!"
#Get Evaluation Link Set--------
#Top_L = (G.number_of_edges() - 0) / Top_k #The top proportion 1/Top_k of edges are considered
#Probe_Set = Probe_Set_Correspond_Training(G, Top_L, fpname) #****Get the probe set for evaluation*****
#Get Ranking List with different deleted links ratio----------
Edge_Num = float(G.number_of_edges())
'''AUC = Performance_Evaluation_AUC(Predictor, G, Probe_Set)'''
Unobserved_links = nx.non_edges(G)
Non_existing_links = list(set(Unobserved_links).difference(set(Probe_Set)))
AUC = Performance_Evaluation_AUC(Predictor, G, Probe_Set, Non_existing_links)
Rank_List_Set = Prediction_LinkScores_Ratio(G, Predictor, Deleted_Ratio, 50, 30) #Prediction_LinkScores_Ratio(G, Predictor, Proportion, Toleration, Predict_Gap)
#----Performance Evaluation with Precision under different Training Data Ratio----
Precision_Set = []
X_Set = []
Coefficient_Set = []
Avg_PathLen_Set = []
for key in sorted(Rank_List_Set.keys()):
Rank_List_Sorted = sorted(Rank_List_Set[key][0], key=lambda edge: edge[2], reverse=True)
Top_L_Rank_List = Rank_List_Sorted[0:Top_L]
Coefficient_Set.append(Rank_List_Set[key][1])
Avg_PathLen_Set.append(Rank_List_Set[key][2])
#AUC_Set.append(Rank_List_Set[key][3])
#print key, Performance_Evaluation_Precision(Top_L_Rank_List, Probe_Set)
X_Set.append(float(key)/Edge_Num)
Precision_Set.append(Performance_Evaluation_Precision(Top_L_Rank_List, Probe_Set))
'''
#Draw Curve Graph
if key%100 == 0:
data = []
for edge in Rank_List_Sorted:
data.append(edge[2])
matploit(data)
'''
#end for
print "*Different deleted links ratio:", X_Set
print "*Precision_Set with different deleted links ratio:", Precision_Set
print "*Coefficient_Set:", Coefficient_Set
print "*Avg_PathLen_Set:", Avg_PathLen_Set
print "*AUC Value:", AUC
return 1
#def Native_Prediction_Experiment(G, Predictor, Probe_Set, Top_L, Deleted_Ratio):
评论列表
文章目录