def _semantic_regularization(self, xSemPr, xSemLb, semReg):
ns = self.netspec
if self.semantics == ATTRIBUTES:
name = 'SCoRe/semLoss'
ns[name] = L.SigmoidCrossEntropyLoss(*[xSemPr, xSemLb], name=name,
loss_weight=semReg/(len(self.constrains)*np.sqrt(2.))*10.,
include=dict(phase=caffe.TRAIN))
else:
c_keys = [key for key in self.constrains.keys()]
losses = ['SCoRe/semLoss/%s' % key for key in c_keys]
scores = ['SCoRe/semLoss/%s/scores' % key for key in c_keys]
labels = ['SCoRe/semLoss/%s/labels' % key for key in c_keys]
# Slice semantic scores
xSemPr_name = [k for k, v in ns.tops.iteritems() if v ==xSemPr][0]
slice_scores = L.Slice(name='SCoRe/semLoss/slice_scores', bottom=[xSemPr_name], ntop=len(scores), top=scores, in_place=True,
slice_point=np.cumsum(self.num_states)[:-1].tolist(),
include=dict(phase=caffe.TRAIN))
# Slice semantic labels
xSemLb_name = [k for k, v in ns.tops.iteritems() if v ==xSemLb][0]
slice_labels = L.Slice(name='SCoRe/semLoss/slice_labels', bottom=[xSemLb_name], ntop=len(labels), top=labels, in_place=True,
slice_point=range(1, len(self.constrains)),
include=dict(phase=caffe.TRAIN))
# Add supervision to each slice
for i, xLoss in enumerate(losses):
ns[xLoss] = L.SoftmaxWithLoss(*[slice_scores[i], slice_labels[i]], name=xLoss, loss_weight=semReg/len(self.constrains),
include=dict(phase=caffe.TRAIN))
# Summarize supervisions for display
ns['SCoRe/semLoss'] = L.Eltwise(*[ns[l] for l in losses], name='SCoRe/semLoss',
operation=P.Eltwise.SUM, coeff=[semReg/len(self.constrains)]*len(losses),
include=dict(phase=caffe.TRAIN))
评论列表
文章目录