def forward(self, is_train, req, in_data, out_data, aux):
cls_score = in_data[0].asnumpy()
labels = in_data[1].asnumpy()
self._labels = labels
pro_ = np.exp(cls_score - cls_score.max(axis=1).reshape((cls_score.shape[0], 1)))
pro_ /= pro_.sum(axis=1).reshape((cls_score.shape[0], 1))
# pro_ = mx.nd.SoftmaxActivation(cls_score) + 1e-14
# pro_ = pro_.asnumpy()
self.pro_ = pro_
# restore pt for backward
self._pt = pro_[np.arange(pro_.shape[0],dtype = 'int'), labels.astype('int')]
### note!!!!!!!!!!!!!!!!
# focal loss value is not used in this place we should forward the cls_pro in this layer, the focal vale should be calculated in metric.py
# the method is in readme
# focal loss (batch_size,num_class)
loss_ = -1 * np.power(1 - pro_, self._gamma) * np.log(pro_)
print "---------------"
print 'pro:',pro_[1],labels[1]
self.assign(out_data[0],req[0],mx.nd.array(pro_))
评论列表
文章目录