def var(x, reduce_instance_dims=True, name=None):
"""Computes the variance of the values of a `Tensor` over the whole dataset.
Uses the biased variance (0 delta degrees of freedom), as given by
(x - mean(x))**2 / length(x).
Args:
x: A `Tensor`.
reduce_instance_dims: By default collapses the batch and instance dimensions
to arrive at a single scalar output. If False, only collapses the batch
dimension and outputs a vector of the same shape as the input.
name: (Optional) A name for this operation.
Returns:
A `Tensor` containing the variance. If `x` is floating point, the variance
will have the same type as `x`. If `x` is integral, the output is cast to
float32 for int8 and int16 and float64 for int32 and int64 (similar to the
behavior of tf.truediv).
"""
with tf.name_scope(name, 'var'):
# Note: Calling `mean`, `sum`, and `size` as defined in this module, not the
# builtins.
x_mean = mean(x, reduce_instance_dims)
# x_mean will be float32 or float64, depending on type of x.
squared_deviations = tf.square(tf.cast(x, x_mean.dtype) - x_mean)
return mean(squared_deviations, reduce_instance_dims)
评论列表
文章目录