def listnet(x, t):
"""
The Top-1 approximated ListNet loss as in Cao et al (2006), Learning to
Rank: From Pairwise Approach to Listwise Approach
:param x: The activation of the previous layer
:param t: The target labels
:return: The loss
"""
# ListNet top-1 reduces to a softmax and simple cross entropy
st = F.softmax(t, axis=0)
sx = F.softmax(x, axis=0)
return -F.mean(st * F.log(sx))
评论列表
文章目录