def mean_squared_error(
predictions, labels=None, weights=_WEIGHT_SENTINEL, scope=None,
targets=None, weight=_WEIGHT_SENTINEL):
"""Adds a Sum-of-Squares loss to the training procedure.
`weight` acts as a coefficient for the loss. If a scalar is provided, then the
loss is simply scaled by the given value. If `weight` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weight` vector. If the shape of
`weight` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weight`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
targets: Deprecated alias for `labels`.
weight: Deprecated alias for `weights`.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weight` is invalid.
"""
labels = _labels(labels, targets)
weights = _weights(weights, weight)
with ops.name_scope(scope, "mean_squared_error",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
losses = math_ops.square(math_ops.sub(predictions, labels))
return compute_weighted_loss(losses, weights)
python类to_float()的实例源码
def cosine_distance(
predictions, labels=None, dim=None, weights=_WEIGHT_SENTINEL, scope=None,
targets=None, weight=_WEIGHT_SENTINEL):
"""Adds a cosine-distance loss to the training procedure.
Note that the function assumes that `predictions` and `labels` are already
unit-normalized.
Args:
predictions: An arbitrary matrix.
labels: A `Tensor` whose shape matches 'predictions'
dim: The dimension along which the cosine distance is computed.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
targets: Deprecated alias for `labels`.
weight: Deprecated alias for `weights`.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If `predictions` shape doesn't match `labels` shape, or
`weights` is `None`.
"""
labels = _labels(labels, targets)
weights = _weights(weights, weight)
if dim is None:
raise ValueError("`dim` cannot be None.")
with ops.name_scope(scope, "cosine_distance_loss",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
radial_diffs = math_ops.mul(predictions, labels)
losses = 1 - math_ops.reduce_sum(radial_diffs, reduction_indices=[dim,])
return compute_weighted_loss(losses, weights)
def _weight_tensor(features, weight_column_name):
if not weight_column_name:
return None
else:
return array_ops.reshape(
math_ops.to_float(features[weight_column_name]),
shape=(-1,))
def _mean_squared_loss(logits, labels):
# To prevent broadcasting inside "-".
if len(labels.get_shape()) == 1:
labels = array_ops.expand_dims(labels, dim=[1])
# TODO(zakaria): make sure it does not recreate the broadcast bug.
if len(logits.get_shape()) == 1:
logits = array_ops.expand_dims(logits, dim=[1])
logits.get_shape().assert_is_compatible_with(labels.get_shape())
return math_ops.square(logits - math_ops.to_float(labels))
def _log_loss_with_two_classes(logits, labels):
# sigmoid_cross_entropy_with_logits requires [batch_size, 1] labels.
if len(labels.get_shape()) == 1:
labels = array_ops.expand_dims(labels, dim=[1])
loss_vec = nn.sigmoid_cross_entropy_with_logits(logits,
math_ops.to_float(labels))
return loss_vec
def _sigmoid_cross_entropy_loss(logits, labels):
# sigmoid_cross_entropy_with_logits requires [batch_size, n_classes] labels.
return nn.sigmoid_cross_entropy_with_logits(logits, math_ops.to_float(labels))
def _float_weights_or_none(weights):
if weights is None:
return None
return math_ops.to_float(weights)
def _weighted_average_loss_metric_spec(loss_fn, predictoin_key,
label_key, weight_key):
def _streaming_weighted_average_loss(predictions, labels, weights=None):
loss_unweighted = loss_fn(predictions, labels)
if weights is not None:
weights = math_ops.to_float(weights)
_, weighted_average_loss = _loss(loss_unweighted,
weights,
name="eval_loss")
return metrics_lib.streaming_mean(weighted_average_loss)
return metric_spec.MetricSpec(_streaming_weighted_average_loss,
predictoin_key, label_key, weight_key)
def _make_streaming_with_threshold(streaming_metrics_fn, threshold):
def _streaming_metrics(predictions, labels):
return streaming_metrics_fn(predictions=math_ops.to_float(
math_ops.greater_equal(predictions, threshold)),
labels=labels)
return _streaming_metrics
def get_weight_tensor(self, features):
if not self._weight_column_name:
return None
else:
return array_ops.reshape(
math_ops.to_float(features[self._weight_column_name]),
shape=(-1,))
def loss(self, logits, target, features):
"""Returns loss tensor for this head.
The loss returned is the weighted average.
L = sum_{i} w_{i} * l_{i} / sum_{i} w_{i}
Args:
logits: logits, a float tensor.
target: either a tensor for labels or in multihead case, a dict of string
to target tensor.
features: features dict.
Returns:
Loss tensor.
"""
target = target[self.name] if isinstance(target, dict) else target
loss_unweighted = self._loss_fn(logits, target)
weight_tensor = self.get_weight_tensor(features)
if weight_tensor is None:
return math_ops.reduce_mean(loss_unweighted, name="loss")
loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor)
return math_ops.div(
math_ops.reduce_sum(loss_weighted),
math_ops.to_float(math_ops.reduce_sum(weight_tensor)),
name="loss")
def _mean_squared_loss(logits, target):
# To prevent broadcasting inside "-".
if len(target.get_shape()) == 1:
target = array_ops.expand_dims(target, dim=[1])
logits.get_shape().assert_is_compatible_with(target.get_shape())
return math_ops.square(logits - math_ops.to_float(target))
def _float_weights_or_none(weights):
if weights is None:
return None
return math_ops.to_float(weights)
def _accuracy_at_threshold(threshold):
def _accuracy_metric(predictions, labels, weights=None):
threshold_predictions = math_ops.to_float(
math_ops.greater_equal(predictions, threshold))
return metrics_lib.streaming_accuracy(predictions=threshold_predictions,
labels=labels,
weights=weights)
return _accuracy_metric
def insert_transformed_feature(self, columns_to_tensors):
"""Apply transformation and inserts it into columns_to_tensors.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have _FeatureColumn
as a key too. That means that _FeatureColumn is already transformed.
"""
# Transform the input tensor according to the normalizer function.
input_tensor = self._normalized_input_tensor(columns_to_tensors[self.name])
columns_to_tensors[self] = math_ops.to_float(input_tensor)
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
if input_tensor.dtype != dtypes.float32:
input_tensor = math_ops.to_float(input_tensor)
return _reshape_real_valued_tensor(input_tensor, output_rank, self.name)
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
if input_tensor.dtype != dtypes.float32:
input_tensor = math_ops.to_float(input_tensor)
return _reshape_real_valued_tensor(input_tensor, output_rank, self.name)
def average_size(self):
"""Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees.
"""
sizes = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(math_ops.to_float(array_ops.pack(sizes)))
# pylint: disable=unused-argument
def _r2(probabilities, targets, weights=None):
if targets.get_shape().ndims == 1:
targets = array_ops.expand_dims(targets, -1)
targets = math_ops.to_float(targets)
y_mean = math_ops.reduce_mean(targets, 0)
squares_total = math_ops.reduce_sum(math_ops.square(targets - y_mean), 0)
squares_residuals = math_ops.reduce_sum(math_ops.square(
targets - probabilities), 0)
score = 1 - math_ops.reduce_sum(squares_residuals / squares_total)
return metric_ops.streaming_mean(score, weights=weights)
def _count_condition(values, weights=None, metrics_collections=None,
updates_collections=None):
"""Sums the weights of cases where the given values are True.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `bool` `Tensor` of arbitrary size.
weights: An optional `Tensor` whose shape is broadcastable to `values`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
Returns:
value_tensor: A tensor representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
check_ops.assert_type(values, dtypes.bool)
count = _create_local('count', shape=[])
values = math_ops.to_float(values)
if weights is not None:
weights = math_ops.to_float(weights)
values = math_ops.mul(values, weights)
value_tensor = array_ops.identity(count)
update_op = state_ops.assign_add(count, math_ops.reduce_sum(values))
if metrics_collections:
ops.add_to_collections(metrics_collections, value_tensor)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return value_tensor, update_op