def log_loss(
predictions, labels=None, weights=_WEIGHT_SENTINEL, epsilon=1e-7,
scope=None, targets=None, weight=_WEIGHT_SENTINEL):
"""Adds a Log Loss term to the training procedure.
`weight` acts as a coefficient for the loss. If a scalar is provided, then the
loss is simply scaled by the given value. If `weight` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weight` vector. If the shape of
`weight` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weight`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
epsilon: A small increment to add to avoid taking a log of zero.
scope: The scope for the operations performed in computing the loss.
targets: Deprecated alias for `labels`.
weight: Deprecated alias for `weights`.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weight` is invalid.
"""
labels = _labels(labels, targets)
weights = _weights(weights, weight)
with ops.name_scope(scope, "log_loss",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
losses = -math_ops.mul(
labels,
math_ops.log(predictions + epsilon)) - math_ops.mul(
(1 - labels), math_ops.log(1 - predictions + epsilon))
return compute_weighted_loss(losses, weights)
python类mul()的实例源码
def unregularized_loss(self, examples):
"""Add operations to compute the loss (without the regularization loss).
Args:
examples: Examples to compute unregularized loss on.
Returns:
An Operation that computes mean (unregularized) loss for given set of
examples.
Raises:
ValueError: if examples are not well defined.
"""
self._assertSpecified(['example_labels', 'example_weights',
'sparse_features', 'dense_features'], examples)
self._assertList(['sparse_features', 'dense_features'], examples)
with name_scope('sdca/unregularized_loss'):
predictions = math_ops.cast(
self._linear_predictions(examples), dtypes.float64)
labels = math_ops.cast(
convert_to_tensor(examples['example_labels']), dtypes.float64)
weights = math_ops.cast(
convert_to_tensor(examples['example_weights']), dtypes.float64)
if self._options['loss_type'] == 'logistic_loss':
return math_ops.reduce_sum(math_ops.mul(
sigmoid_cross_entropy_with_logits(predictions, labels),
weights)) / math_ops.reduce_sum(weights)
if self._options['loss_type'] in ['hinge_loss', 'smooth_hinge_loss']:
# hinge_loss = max{0, 1 - y_i w*x} where y_i \in {-1, 1}. So, we need to
# first convert 0/1 labels into -1/1 labels.
all_ones = array_ops.ones_like(predictions)
adjusted_labels = math_ops.sub(2 * labels, all_ones)
# Tensor that contains (unweighted) error (hinge loss) per
# example.
error = nn_ops.relu(math_ops.sub(all_ones, math_ops.mul(adjusted_labels,
predictions)))
weighted_error = math_ops.mul(error, weights)
return math_ops.reduce_sum(weighted_error) / math_ops.reduce_sum(
weights)
# squared loss
err = math_ops.sub(labels, predictions)
weighted_squared_err = math_ops.mul(math_ops.square(err), weights)
# SDCA squared loss function is sum(err^2) / (2*sum(weights))
return (math_ops.reduce_sum(weighted_squared_err) /
(2.0 * math_ops.reduce_sum(weights)))
def dense_to_sparse_tensor(dense_tensor, ignore_value=None):
"""Converts a dense Tensor to a SparseTensor, dropping ignore_value cells.
Args:
dense_tensor: A `Tensor`.
ignore_value: Entries in `dense_tensor` equal to this value will be
absent from the return `SparseTensor`. If `None`, default value of
dense_tensor's dtype will be used (e.g. '' for `str`, 0 for `int`).
Returns:
A `SparseTensor` with the same shape as `dense_tensor`.
Raises:
ValueError: when `dense_tensor`'s rank is `None`.
"""
with ops.name_scope("DenseToSparseTensor"):
dense_t = ops.convert_to_tensor(dense_tensor)
if dense_t.get_shape().ndims is None:
# TODO(b/32318825): Implement dense_to_sparse_tensor for undefined rank.
raise ValueError("dense_tensor.get_shape() should be defined, got None.")
if ignore_value is None:
if dense_t.dtype == dtypes.string:
# Exception due to TF strings are converted to numpy objects by default.
ignore_value = ""
else:
ignore_value = dense_t.dtype.as_numpy_dtype()
dense_shape = math_ops.cast(array_ops.shape(dense_t), dtypes.int64)
indices = array_ops.where(
math_ops.not_equal(dense_t, math_ops.cast(ignore_value, dense_t.dtype)))
index_dims = len(dense_t.get_shape())
# Flattens the tensor and indices for use with gather.
flat_tensor = array_ops.reshape(dense_t, [-1])
flat_indices = indices[:, index_dims - 1]
# Computes the correct flattened indices for 2d (or higher) tensors.
if index_dims > 1:
higher_dims = indices[:, :index_dims - 1]
shape_multipliers = array_ops.pack(
_multiplier_helper(array_ops.unpack(dense_shape)[1:]))
offsets = math_ops.reduce_sum(
math_ops.mul(higher_dims, shape_multipliers), reduction_indices=[1])
flat_indices = math_ops.add(flat_indices, offsets)
values = array_ops.gather(flat_tensor, flat_indices)
return sparse_tensor.SparseTensor(indices, values, dense_shape)