def softmax_topK_loss(self,sentence,image,K=50,margin=0.2):
sim_matrix = []
self.sparse_loss = tf.reduce_sum(2-(tf.reduce_sum(tf.nn.top_k(sentence, k=20,
sorted=False)[0],axis=1)+tf.reduce_sum(tf.nn.top_k(image, k=20,
sorted=False)[0],axis=1)))
with tf.device('cpu:0'):
for i in range(self.batch_size):
sim_matrix.append(tf.reduce_sum(tf.abs(sentence-image[i,:]),axis=1))
d=tf.stack(sim_matrix,axis=1)
positive = tf.stack([tf.matrix_diag_part(d)] * K, axis=1)
length = tf.shape(d)[-1]
d = tf.matrix_set_diag(d, 8 * tf.ones([length]))
sen_loss_K ,_ = tf.nn.top_k(-1.0 * d, K, sorted=False) # note: this is negative value
im_loss_K,_ = tf.nn.top_k(tf.transpose(-1.0 * d), K, sorted=False) # note: this is negative value
sentence_center_loss = tf.nn.relu(positive + sen_loss_K + margin)
image_center_loss = tf.nn.relu(positive + im_loss_K + margin)
self.d_neg = tf.reduce_mean((sen_loss_K + im_loss_K)/-2.0)
self.d_pos =tf.reduce_mean(positive)
self.endpoint['debug/im_loss_topK'] = -1.0 * im_loss_K
self.endpoint['debug/sen_loss_topK'] = -1.0 * sen_loss_K
self.endpoint['debug/d_Matrix'] = d
self.endpoint['debug/positive'] = positive
self.endpoint['debug/s_center_loss'] = sentence_center_loss
self.endpoint['debug/i_center_loss'] = image_center_loss
self.endpoint['debug/S'] = sim_matrix
return tf.reduce_sum(sentence_center_loss), tf.reduce_sum(image_center_loss),self.sparse_loss
Bidirectionnet_GMM_softmaxloss.py 文件源码
python
阅读 30
收藏 0
点赞 0
评论 0
评论列表
文章目录