python类reduce_mean()的实例源码

tf-keras-skeleton.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def var(x, axis=None, keepdims=False):
      """Variance of a tensor, alongside the specified axis.

      Arguments:
          x: A tensor or variable.
          axis: An integer, the axis to compute the variance.
          keepdims: A boolean, whether to keep the dimensions or not.
              If `keepdims` is `False`, the rank of the tensor is reduced
              by 1. If `keepdims` is `True`,
              the reduced dimension is retained with length 1.

      Returns:
          A tensor with the variance of elements of `x`.
      """
      axis = _normalize_axis(axis, ndim(x))
      if x.dtype.base_dtype == dtypes_module.bool:
        x = math_ops.cast(x, floatx())
      m = math_ops.reduce_mean(x, reduction_indices=axis, keep_dims=True)
      devs_squared = math_ops.square(x - m)
      return math_ops.reduce_mean(
          devs_squared, reduction_indices=axis, keep_dims=keepdims)
tf-keras-skeleton.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def mean(x, axis=None, keepdims=False):
      """Mean of a tensor, alongside the specified axis.

      Arguments:
          x: A tensor or variable.
          axis: A list of integer. Axes to compute the mean.
          keepdims: A boolean, whether to keep the dimensions or not.
              If `keepdims` is `False`, the rank of the tensor is reduced
              by 1 for each entry in `axis`. If `keep_dims` is `True`,
              the reduced dimensions are retained with length 1.

      Returns:
          A tensor with the mean of elements of `x`.
      """
      axis = _normalize_axis(axis, ndim(x))
      if x.dtype.base_dtype == dtypes_module.bool:
        x = math_ops.cast(x, floatx())
      return math_ops.reduce_mean(x, reduction_indices=axis, keep_dims=keepdims)
dnn.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _rescale_eval_loss(loss, weights):
  """Rescales evaluation loss according to the given weights.

  The rescaling is needed because in the training loss weights are not
  considered in the denominator, whereas  for the evaluation loss we should
  divide by the sum of weights.

  The rescaling factor is:
    R = sum_{i} 1 / sum_{i} w_{i}

  Args:
    loss: the scalar weighted loss.
    weights: weight coefficients. Either a scalar, or a `Tensor` of shape
      [batch_size].

  Returns:
    The given loss multiplied by the rescaling factor.
  """
  rescaling_factor = math_ops.reduce_mean(weights)
  return math_ops.div(loss, rescaling_factor)
hybrid_model.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def loss(self, data, labels):
    """The loss to minimize while training."""

    if self.is_regression:
      diff = self.training_inference_graph(data) - math_ops.to_float(labels)
      mean_squared_error = math_ops.reduce_mean(diff * diff)
      root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss")
      loss = root_mean_squared_error
    else:
      loss = math_ops.reduce_mean(
          nn_ops.sparse_softmax_cross_entropy_with_logits(
              self.training_inference_graph(data),
              array_ops.squeeze(math_ops.to_int32(labels))),
          name="loss")
    if self.regularizer:
      loss += layers.apply_regularization(self.regularizer,
                                          variables.trainable_variables())
    return loss
hybrid_model.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def loss(self, data, labels):
    """The loss to minimize while training."""

    if self.is_regression:
      diff = self.training_inference_graph(data) - math_ops.to_float(labels)
      mean_squared_error = math_ops.reduce_mean(diff * diff)
      root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss")
      loss = root_mean_squared_error
    else:
      loss = math_ops.reduce_mean(
          nn_ops.sparse_softmax_cross_entropy_with_logits(
              self.training_inference_graph(data),
              array_ops.squeeze(math_ops.to_int32(labels))),
          name="loss")
    if self.regularizer:
      loss += layers.apply_regularization(self.regularizer,
                                          variables.trainable_variables())
    return loss
head.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _centered_bias_step(centered_bias, logits_dimension, labels, loss_fn):
  """Creates and returns training op for centered bias."""
  if (logits_dimension is None) or (logits_dimension < 1):
    raise ValueError("Invalid logits_dimension %s." % logits_dimension)
  with ops.name_scope(None, "centered_bias_step", (labels,)) as name:
    batch_size = array_ops.shape(labels)[0]
    logits = array_ops.reshape(
        array_ops.tile(centered_bias, (batch_size,)),
        (batch_size, logits_dimension))
    with ops.name_scope(None, "centered_bias", (labels, logits)):
      centered_bias_loss = math_ops.reduce_mean(
          loss_fn(logits, labels), name="training_loss")
  # Learn central bias by an optimizer. 0.1 is a convervative lr for a
  # single variable.
  return training.AdagradOptimizer(0.1).minimize(
      centered_bias_loss, var_list=(centered_bias,), name=name)
gmm_ops.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _covariance(x, diag):
  """Defines the covariance operation of a matrix.

  Args:
    x: a matrix Tensor. Dimension 0 should contain the number of examples.
    diag: if True, it computes the diagonal covariance.

  Returns:
    A Tensor representing the covariance of x. In the case of
  diagonal matrix just the diagonal is returned.
  """
  num_points = math_ops.to_float(array_ops.shape(x)[0])
  x -= math_ops.reduce_mean(x, 0, keep_dims=True)
  if diag:
    cov = math_ops.reduce_sum(
        math_ops.square(x), 0, keep_dims=True) / (num_points - 1)
  else:
    cov = math_ops.matmul(x, x, transpose_a=True) / (num_points - 1)
  return cov
tensor_forest.py 文件源码 项目:deep-learning 作者: lbkchen 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def average_size(self):
    """Constructs a TF graph for evaluating the average size of a forest.

    Returns:
      The average number of nodes over the trees.
    """
    sizes = []
    for i in range(self.params.num_trees):
      with ops.device(self.device_assigner.get_device(i)):
        sizes.append(self.trees[i].size())
    return math_ops.reduce_mean(array_ops.pack(sizes))

  # pylint: disable=unused-argument
tensor_forest.py 文件源码 项目:deep-learning 作者: lbkchen 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def average_impurity(self):
    """Constructs a TF graph for evaluating the leaf impurity of a forest.

    Returns:
      The last op in the graph.
    """
    impurities = []
    for i in range(self.params.num_trees):
      with ops.device(self.device_assigner.get_device(i)):
        impurities.append(self.trees[i].average_impurity())
    return math_ops.reduce_mean(array_ops.pack(impurities))
monte_carlo.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _sample_mean(values):
  """Mean over sample indices.  In this module this is always [0]."""
  return math_ops.reduce_mean(values, reduction_indices=[0])
target_column.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def training_loss(self, logits, target, features, name="training_loss"):
    """Returns training loss tensor for this head.

    Training loss is different from the loss reported on the tensorboard as we
    should respect the example weights when computing the gradient.

      L = sum_{i} w_{i} * l_{i} / B

    where B is the number of examples in the batch, l_{i}, w_{i} are individual
    losses, and example weight.

    Args:
      logits: logits, a float tensor.
      target: either a tensor for labels or in multihead case, a dict of string
        to target tensor.
      features: features dict.
      name: Op name.

    Returns:
      Loss tensor.
    """
    target = target[self.name] if isinstance(target, dict) else target
    loss_unweighted = self._loss_fn(logits, target)

    weight_tensor = self.get_weight_tensor(features)
    if weight_tensor is None:
      return math_ops.reduce_mean(loss_unweighted, name=name)
    loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor)
    return math_ops.reduce_mean(loss_weighted, name=name)
tensor_forest.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def average_size(self):
    """Constructs a TF graph for evaluating the average size of a forest.

    Returns:
      The average number of nodes over the trees.
    """
    sizes = []
    for i in range(self.params.num_trees):
      with ops.device(self.device_assigner.get_device(i)):
        sizes.append(self.trees[i].size())
    return math_ops.reduce_mean(math_ops.to_float(array_ops.pack(sizes)))

  # pylint: disable=unused-argument
tensor_forest.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def average_impurity(self):
    """Constructs a TF graph for evaluating the leaf impurity of a forest.

    Returns:
      The last op in the graph.
    """
    impurities = []
    for i in range(self.params.num_trees):
      with ops.device(self.device_assigner.get_device(i)):
        impurities.append(self.trees[i].average_impurity())
    return math_ops.reduce_mean(array_ops.pack(impurities))
eval_metrics.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _r2(probabilities, targets):
  if targets.get_shape().ndims == 1:
    targets = array_ops.expand_dims(targets, -1)
  y_mean = math_ops.reduce_mean(targets, 0)
  squares_total = math_ops.reduce_sum(math_ops.square(targets - y_mean), 0)
  squares_residuals = math_ops.reduce_sum(math_ops.square(
      targets - probabilities), 0)
  score = 1 - math_ops.reduce_sum(squares_residuals / squares_total)
  return metric_ops.streaming_mean(score)
hybrid_model.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _do_layer_inference(self, layer, data):

    # If this is a collection of layers, return the mean of their inference
    # results.
    if isinstance(layer, collections.Iterable):
      return math_ops.reduce_mean(
          array_ops.pack([l.inference_graph(data) for l in layer]), 0)
    # If this is a single layer, return its inference result.
    else:
      return layer.inference_graph(data)
stochastic_gradient_estimators.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def get_mean_baseline(ema_decay=0.99, name=None):
  """ExponentialMovingAverage baseline.

  Args:
    ema_decay: decay rate for the ExponentialMovingAverage.
    name: name for variable scope of the ExponentialMovingAverage.

  Returns:
    Callable baseline function that takes the `StochasticTensor` (unused) and
    the downstream `loss`, and returns an EMA of the loss.
  """

  def mean_baseline(_, loss):
    with vs.variable_scope(name, default_name="MeanBaseline"):
      reduced_loss = math_ops.reduce_mean(loss)

      ema = training.ExponentialMovingAverage(decay=ema_decay)
      update_op = ema.apply([reduced_loss])

      with ops.control_dependencies([update_op]):
        # Using `identity` causes an op to be added in this context, which
        # triggers the update. Removing the `identity` means nothing is updated.
        baseline = array_ops.identity(ema.average(reduced_loss))

      return baseline

  return mean_baseline
monte_carlo.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _sample_mean(values):
  """Mean over sample indices.  In this module this is always [0]."""
  return math_ops.reduce_mean(values, reduction_indices=[0])
head.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _loss(loss_unweighted, weight, name):
  """Returns loss."""
  if weight is None:
    loss = math_ops.reduce_mean(loss_unweighted, name=name)
    return loss, loss
  else:
    loss_weighted = _weighted_loss(loss_unweighted, weight)
    weighted_average_loss = math_ops.div(
        math_ops.reduce_sum(loss_weighted),
        math_ops.to_float(math_ops.reduce_sum(weight)),
        name="weighted_average_loss")
    loss = math_ops.reduce_mean(loss_weighted, name=name)
    return loss, weighted_average_loss
head.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _centered_bias_step(logits_dimension, weight_collection, labels,
                        train_loss_fn):
  """Creates and returns training op for centered bias."""
  centered_bias = ops.get_collection(weight_collection)
  batch_size = array_ops.shape(labels)[0]
  logits = array_ops.reshape(
      array_ops.tile(centered_bias[0], [batch_size]),
      [batch_size, logits_dimension])
  with ops.name_scope(None, "centered_bias", (labels, logits)):
    centered_bias_loss = math_ops.reduce_mean(
        train_loss_fn(logits, labels), name="training_loss")
  # Learn central bias by an optimizer. 0.1 is a convervative lr for a
  # single variable.
  return training.AdagradOptimizer(0.1).minimize(
      centered_bias_loss, var_list=centered_bias)
target_column.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def training_loss(self, logits, target, features, name="training_loss"):
    """Returns training loss tensor for this head.

    Training loss is different from the loss reported on the tensorboard as we
    should respect the example weights when computing the gradient.

      L = sum_{i} w_{i} * l_{i} / B

    where B is the number of examples in the batch, l_{i}, w_{i} are individual
    losses, and example weight.

    Args:
      logits: logits, a float tensor.
      target: either a tensor for labels or in multihead case, a dict of string
        to target tensor.
      features: features dict.
      name: Op name.

    Returns:
      Loss tensor.
    """
    target = target[self.name] if isinstance(target, dict) else target
    loss_unweighted = self._loss_fn(logits, target)

    weight_tensor = self.get_weight_tensor(features)
    if weight_tensor is None:
      return math_ops.reduce_mean(loss_unweighted, name=name)
    loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor)
    return math_ops.reduce_mean(loss_weighted, name=name)
target_column.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def loss(self, logits, target, features):
    """Returns loss tensor for this head.

    The loss returned is the weighted average.

      L = sum_{i} w_{i} * l_{i} / sum_{i} w_{i}

    Args:
      logits: logits, a float tensor.
      target: either a tensor for labels or in multihead case, a dict of string
        to target tensor.
      features: features dict.

    Returns:
      Loss tensor.
    """
    target = target[self.name] if isinstance(target, dict) else target
    loss_unweighted = self._loss_fn(logits, target)

    weight_tensor = self.get_weight_tensor(features)
    if weight_tensor is None:
      return math_ops.reduce_mean(loss_unweighted, name="loss")
    loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor)
    return math_ops.div(
        math_ops.reduce_sum(loss_weighted),
        math_ops.to_float(math_ops.reduce_sum(weight_tensor)),
        name="loss")
tensor_forest.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def average_size(self):
    """Constructs a TF graph for evaluating the average size of a forest.

    Returns:
      The average number of nodes over the trees.
    """
    sizes = []
    for i in range(self.params.num_trees):
      with ops.device(self.device_assigner.get_device(i)):
        sizes.append(self.trees[i].size())
    return math_ops.reduce_mean(math_ops.to_float(array_ops.pack(sizes)))

  # pylint: disable=unused-argument
tensor_forest.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def average_impurity(self):
    """Constructs a TF graph for evaluating the leaf impurity of a forest.

    Returns:
      The last op in the graph.
    """
    impurities = []
    for i in range(self.params.num_trees):
      with ops.device(self.device_assigner.get_device(i)):
        impurities.append(self.trees[i].average_impurity())
    return math_ops.reduce_mean(array_ops.pack(impurities))
eval_metrics.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _r2(probabilities, targets, weights=None):
  if targets.get_shape().ndims == 1:
    targets = array_ops.expand_dims(targets, -1)
  targets = math_ops.to_float(targets)
  y_mean = math_ops.reduce_mean(targets, 0)
  squares_total = math_ops.reduce_sum(math_ops.square(targets - y_mean), 0)
  squares_residuals = math_ops.reduce_sum(math_ops.square(
      targets - probabilities), 0)
  score = 1 - math_ops.reduce_sum(squares_residuals / squares_total)
  return metric_ops.streaming_mean(score, weights=weights)
classification.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 351 收藏 0 点赞 0 评论 0
def accuracy(predictions, labels, weights=None):
  """Computes the percentage of times that predictions matches labels.

  Args:
    predictions: the predicted values, a `Tensor` whose dtype and shape
                 matches 'labels'.
    labels: the ground truth values, a `Tensor` of any shape and
            bool, integer, or string dtype.
    weights: None or `Tensor` of float values to reweight the accuracy.

  Returns:
    Accuracy `Tensor`.

  Raises:
    ValueError: if dtypes don't match or
                if dtype is not bool, integer, or string.
  """
  if not (labels.dtype.is_integer or
          labels.dtype in (dtypes.bool, dtypes.string)):
    raise ValueError(
        'Labels should have bool, integer, or string dtype, not %r' %
        labels.dtype)
  if not labels.dtype.is_compatible_with(predictions.dtype):
    raise ValueError('Dtypes of predictions and labels should match. '
                     'Given: predictions (%r) and labels (%r)' %
                     (predictions.dtype, labels.dtype))
  with ops.name_scope('accuracy', values=[predictions, labels]):
    is_correct = math_ops.cast(
        math_ops.equal(predictions, labels), dtypes.float32)
    if weights is not None:
      is_correct = math_ops.mul(is_correct, weights)
      num_values = math_ops.mul(weights, array_ops.ones_like(is_correct))
      return math_ops.div(math_ops.reduce_sum(is_correct),
                          math_ops.reduce_sum(num_values))
    return math_ops.reduce_mean(is_correct)
stochastic_gradient_estimators.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def get_mean_baseline(ema_decay=0.99, name=None):
  """ExponentialMovingAverage baseline.

  Args:
    ema_decay: decay rate for the ExponentialMovingAverage.
    name: name for variable scope of the ExponentialMovingAverage.

  Returns:
    Callable baseline function that takes the `StochasticTensor` (unused) and
    the downstream `loss`, and returns an EMA of the loss.
  """

  def mean_baseline(_, loss):
    with vs.variable_scope(name, default_name="MeanBaseline"):
      reduced_loss = math_ops.reduce_mean(loss)

      ema = training.ExponentialMovingAverage(decay=ema_decay, zero_debias=True)
      update_op = ema.apply([reduced_loss])

      with ops.control_dependencies([update_op]):
        # Using `identity` causes an op to be added in this context, which
        # triggers the update. Removing the `identity` means nothing is updated.
        baseline = array_ops.identity(ema.average(reduced_loss))

      return baseline

  return mean_baseline
monte_carlo.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _sample_mean(values):
  """Mean over sample indices.  In this module this is always [0]."""
  return math_ops.reduce_mean(values, reduction_indices=[0])
entropy_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test_fitting_two_dimensional_normal_n_equals_1000(self):
    # Minmizing Renyi divergence should allow us to make one normal match
    # another one exactly.
    n = 1000
    mu_true = np.array([1.0, -1.0], dtype=np.float64)
    chol_true = np.array([[2.0, 0.0], [0.5, 1.0]], dtype=np.float64)
    with self.test_session() as sess:
      target = distributions.MultivariateNormalCholesky(mu_true, chol_true)

      # Set up q distribution by defining mean/covariance as Variables
      mu = variables.Variable(
          np.zeros(mu_true.shape), dtype=mu_true.dtype, name='mu')
      mat = variables.Variable(
          np.zeros(chol_true.shape), dtype=chol_true.dtype, name='mat')
      chol = distributions.matrix_diag_transform(mat, transform=nn_ops.softplus)
      q = distributions.MultivariateNormalCholesky(mu, chol)
      for alpha in [0.25, 0.75]:

        negative_renyi_divergence = entropy.renyi_ratio(
            log_p=target.log_prob, q=q, n=n, alpha=alpha, seed=0)
        train_op = get_train_op(
            math_ops.reduce_mean(-negative_renyi_divergence),
            optimizer='SGD',
            learning_rate=0.5,
            decay=0.1)

        variables.global_variables_initializer().run()
        renyis = []
        for step in range(1000):
          sess.run(train_op)
          if step in [1, 5, 100]:
            renyis.append(negative_renyi_divergence.eval())

        # This optimization should maximize the renyi divergence.
        _assert_monotonic_increasing(renyis, atol=0)

        # Relative tolerance (rtol) chosen 2 times as large as minimim needed to
        # pass.
        self.assertAllClose(target.mu.eval(), q.mu.eval(), rtol=0.06)
        self.assertAllClose(target.sigma.eval(), q.sigma.eval(), rtol=0.02)
loss_ops_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def testAddExternalLoss(self):
    logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]])
    labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]])
    losses = loss_ops.hinge_loss(logits, labels)
    self.assertFalse(loss_ops.get_losses())
    loss_ops.add_loss(math_ops.reduce_mean(losses))
    self.assertTrue(loss_ops.get_losses())
    total_loss = loss_ops.get_total_loss()
    with self.test_session():
      self.assertAllClose(losses.eval(), [[0.0, 1.4, 0.0, 2.1]], atol=1e-3)
      self.assertAllClose(total_loss.eval(), 3.5 / 4.0, atol=1e-3)
loss_ops_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def testNoneLossCollection(self):
    logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]])
    labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]])
    losses = loss_ops.hinge_loss(logits, labels)
    self.assertFalse(loss_ops.get_losses())
    loss_ops.add_loss(math_ops.reduce_mean(losses), loss_collection=None)
    self.assertFalse(loss_ops.get_losses())
    with self.test_session():
      self.assertAllClose(losses.eval(), [[0.0, 1.4, 0.0, 2.1]], atol=1e-3)


问题


面经


文章

微信
公众号

扫码关注公众号