def jensen_shannon_divergence(prob_dist1, prob_dist2, base=math.e):
# Calculate "M" == (prob_dist1 + prob_dist2) / 2
# m = []
len_pd1 = len(prob_dist1)
m = [0.5 * (prob_dist1[index] + prob_dist2[index]) for index in range(len_pd1)]
# for index in range(0, len(prob_dist1)):
# m.append(0.5 * (prob_dist1[index] + prob_dist2[index]))
#print 'M: {0}'.format(m)
# Return Jensen-Shannon Divergence
jsd = 0.5 * (kullback_leibler_divergence(prob_dist1, m, base) + kullback_leibler_divergence(prob_dist2, m, base))
#print 'Jensen-Shannon Divergence: {0}'.format(jsd)
return jsd
utils_jensen_shannon.py 文件源码
python
阅读 19
收藏 0
点赞 0
评论 0
评论列表
文章目录