def kullback_leibler_divergence(prob_dist1, prob_dist2, base=math.e):
# Calculate the Kullback-Leibler divergence
kl_divergence = 0
# To avoid zero in the numerator or denominator
pseudo_count = 0.000001
for index in range(len(prob_dist1)):
#print 'KL Divergence PD1[{0}]: {1} PD2[{0}]: {2}'.format(index, prob_dist1[index], prob_dist2[index])
#print "newdiv == {0}".format(newdiv(float(prob_dist1[index]) + pseudo_count, float(prob_dist2[index]) + pseudo_count))
#kl_divergence += prob_dist1[index] * math.log(newdiv(float(prob_dist1[index]) + pseudo_count, float(prob_dist2[index]) + pseudo_count), base)
kl_divergence += prob_dist1[index] * math.log((float(prob_dist1[index]) + pseudo_count) / (float(prob_dist2[index]) + pseudo_count), base)
return kl_divergence
utils_jensen_shannon.py 文件源码
python
阅读 24
收藏 0
点赞 0
评论 0
评论列表
文章目录