def calc_log_loss(self, Pairwise, Question, Answer, Review, TermtoTermR, TermtoTermP, Question_I, Answer_I, Review_I):
#print 'Doing for item %d'%(i)
shape1 = tf.shape(Pairwise)
shape2 = tf.shape(Answer)
nq = shape1[0]
nr = shape1[1]
na = shape2[1]
pairwise = tf.reshape(Pairwise, [-1, self.PairwiseDim])
pairwise = tf.reshape(tf.matmul(pairwise, self.theta), [nq, nr])
termTotermR = tf.sparse_reshape(TermtoTermR, [-1, self.V])
termTotermR = tf.reshape(tf.sparse_tensor_dense_matmul(termTotermR, self.RelvPar), [nq, nr])
QProj = tf.sparse_tensor_dense_matmul(Question_I, self.A)
RProjR = tf.sparse_tensor_dense_matmul(Review_I, self.B)
BilinearR = tf.matmul(QProj, tf.transpose(RProjR))
Relevance = tf.nn.softmax(pairwise + termTotermR + BilinearR)
termTotermP = tf.sparse_reshape(TermtoTermP, [-1, self.V])
termTotermP = tf.reshape(tf.sparse_tensor_dense_matmul(termTotermP, self.PredPar), [nq, na, nr])
AProj = tf.sparse_tensor_dense_matmul(tf.sparse_reshape(Answer_I, [-1, self.V]), self.X)
RProjP = tf.sparse_tensor_dense_matmul(Review_I, self.Y)
BilinearP = tf.reshape(tf.matmul(AProj, tf.transpose(RProjP)), [nq, na, nr])
Prediction = BilinearP + termTotermP
Prediction = tf.expand_dims(Prediction[:,0,:], 1) - Prediction
Prediction = Prediction[:,1:,:]
Prediction = tf.sigmoid(Prediction)
MoE = tf.reduce_sum(tf.multiply(Prediction, tf.expand_dims(Relevance, axis = 1)), axis = 2)
accuracy_count = tf.cast(tf.shape(tf.where(MoE > 0.5))[0], tf.float64)
count = nq * na
log_likelihood = tf.reduce_sum(tf.log(MoE))
R1 = tf.reduce_sum(tf.square(self.A)) + tf.reduce_sum(tf.square(self.B))
R2 = tf.reduce_sum(tf.square(self.X)) + tf.reduce_sum(tf.square(self.Y))
log_likelihood -= self.Lambda * (R1 + R2)
return -1*log_likelihood, MoE, Relevance
评论列表
文章目录