def losses(input_mask, labels, ious, box_delta_input, pred_class_probs, pred_conf, pred_box_delta):
batch_size = tf.shape(input_mask)[0]
num_objects = tf.reduce_sum(input_mask, name='num_objects')
with tf.variable_scope('class_regression') as scope:
# cross-entropy: q * -log(p) + (1-q) * -log(1-p)
# add a small value into log to prevent blowing up
class_loss = tf.truediv(
tf.reduce_sum(
(labels * (-tf.log(pred_class_probs + config.EPSILON))
+ (1 - labels) * (-tf.log(1 - pred_class_probs + config.EPSILON)))
* input_mask * config.LOSS_COEF_CLASS),
num_objects,
name='class_loss'
)
tf.losses.add_loss(class_loss)
with tf.variable_scope('confidence_score_regression') as scope:
input_mask_ = tf.reshape(input_mask, [batch_size, config.ANCHORS])
conf_loss = tf.reduce_mean(
tf.reduce_sum(
tf.square((ious - pred_conf))
* (input_mask_ * config.LOSS_COEF_CONF_POS / num_objects
+ (1 - input_mask_) * config.LOSS_COEF_CONF_NEG / (config.ANCHORS - num_objects)),
reduction_indices=[1]
),
name='confidence_loss'
)
tf.losses.add_loss(conf_loss)
with tf.variable_scope('bounding_box_regression') as scope:
bbox_loss = tf.truediv(
tf.reduce_sum(
config.LOSS_COEF_BBOX * tf.square(
input_mask * (pred_box_delta - box_delta_input))),
num_objects,
name='bbox_loss'
)
tf.losses.add_loss(bbox_loss)
# add above losses as well as weight decay losses to form the total loss
loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
return loss
评论列表
文章目录