def cosine_distance(dim, weights=1.0, name='CosineDistance', scope=None, collect=True):
"""Adds a cosine-distance loss to the training procedure.
Note that the function assumes that `predictions` and `labels` are already unit-normalized.
WARNING: `weights` also supports dimensions of 1, but the broadcasting does
not work as advertised, you'll wind up with weighted sum instead of weighted
mean for any but the last dimension. This will be cleaned up soon, so please
do not rely on the current behavior for anything but the shapes documented for
`weights` below.
Args:
dim: The dimension along which the cosine distance is computed.
weights: Coefficients for the loss a `scalar`.
name: name of the op.
scope: The scope for the operations performed in computing the loss.
collect: add to losses collection.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If `predictions` shape doesn't match `labels` shape, or `weights` is `None`.
"""
def inner_loss(y_true, y_pred):
radial_diffs = math_ops.multiply(y_pred, y_true)
losses = 1 - math_ops.reduce_sum(radial_diffs, axis=(dim,), keep_dims=True)
return losses
return built_loss(inner_loss, weights, name, scope, collect)
评论列表
文章目录