python类Optimizer()的实例源码

model_deploy.py 文件源码 项目:isbi2017-part3 作者: learningtitans 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def optimize_clones(clones, optimizer,
                    regularization_losses=None,
                    **kwargs):
  """Compute clone losses and gradients for the given list of `Clones`.

  Note: The regularization_losses are added to the first clone losses.

  Args:
   clones: List of `Clones` created by `create_clones()`.
   optimizer: An `Optimizer` object.
   regularization_losses: Optional list of regularization losses. If None it
     will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
     exclude them.
   **kwargs: Optional list of keyword arguments to pass to `compute_gradients`.

  Returns:
   A tuple (total_loss, grads_and_vars).
     - total_loss: A Tensor containing the average of the clone losses including
       the regularization loss.
     - grads_and_vars: A List of tuples (gradient, variable) containing the sum
       of the gradients for each variable.

  """
  grads_and_vars = []
  clones_losses = []
  num_clones = len(clones)
  if regularization_losses is None:
    regularization_losses = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
  for clone in clones:
    with tf.name_scope(clone.scope):
      clone_loss, clone_grad = _optimize_clone(
          optimizer, clone, num_clones, regularization_losses, **kwargs)
      if clone_loss is not None:
        clones_losses.append(clone_loss)
        grads_and_vars.append(clone_grad)
      # Only use regularization_losses for the first clone
      regularization_losses = None
  # Compute the total_loss summing all the clones_losses.
  total_loss = tf.add_n(clones_losses, name='total_loss')
  # Sum the gradients accross clones.
  grads_and_vars = _sum_clones_gradients(grads_and_vars)
  return total_loss, grads_and_vars
model_deploy.py 文件源码 项目:terngrad 作者: wenwei202 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def optimize_clones(clones, optimizer,
                    regularization_losses=None,
                    **kwargs):
  """Compute clone losses and gradients for the given list of `Clones`.

  Note: The regularization_losses are added to the first clone losses.

  Args:
   clones: List of `Clones` created by `create_clones()`.
   optimizer: An `Optimizer` object.
   regularization_losses: Optional list of regularization losses. If None it
     will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
     exclude them.
   **kwargs: Optional list of keyword arguments to pass to `compute_gradients`.

  Returns:
   A tuple (total_loss, grads_and_vars).
     - total_loss: A Tensor containing the average of the clone losses including
       the regularization loss.
     - grads_and_vars: A List of tuples (gradient, variable) containing the sum
       of the gradients for each variable.

  """
  grads_and_vars = []
  clones_losses = []
  num_clones = len(clones)
  if regularization_losses is None:
    regularization_losses = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
  for clone in clones:
    with tf.name_scope(clone.scope):
      clone_loss, clone_grad = _optimize_clone(
          optimizer, clone, num_clones, regularization_losses, **kwargs)
      if clone_loss is not None:
        clones_losses.append(clone_loss)
        grads_and_vars.append(clone_grad)
      # Only use regularization_losses for the first clone
      regularization_losses = None
  # Compute the total_loss summing all the clones_losses.
  total_loss = tf.add_n(clones_losses, name='total_loss')
  # Sum the gradients across clones.
  grads_and_vars = _sum_clones_gradients(grads_and_vars)
  return total_loss, grads_and_vars
tfmodel_deploy.py 文件源码 项目:antgo 作者: jianzfb 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def optimize_clones(clones,
                    optimizer,
                    regularization_losses=None,
                    **kwargs):
  """Compute clone losses and gradients for the given list of `Clones`.

  Note: The regularization_losses are added to the first clone losses.

  Args:
   clones: List of `Clones` created by `create_clones()`.
   optimizer: An `Optimizer` object.
   regularization_losses: Optional list of regularization losses. If None it
     will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
     exclude them.
   **kwargs: Optional list of keyword arguments to pass to `compute_gradients`.

  Returns:
   A tuple (total_loss, grads_and_vars).
     - total_loss: A Tensor containing the average of the clone losses including
       the regularization loss.
     - grads_and_vars: A List of tuples (gradient, variable) containing the sum
       of the gradients for each variable.

  """
  grads_and_vars = []
  clones_losses = []
  num_clones = len(clones)
  if regularization_losses is None:
    regularization_losses = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)

  for clone in clones:
    with tf.name_scope(clone.scope):
      clone_loss, clone_grad = _optimize_clone(
          optimizer, clone, num_clones, regularization_losses, **kwargs)
      if clone_loss is not None:
        clones_losses.append(clone_loss)
        grads_and_vars.append(clone_grad)
      # Only use regularization_losses for the first clone
      regularization_losses = None
  # Compute the total_loss summing all the clones_losses.
  total_loss = tf.add_n(clones_losses, name='total_loss')
  # Sum the gradients across clones.
  grads_and_vars = _sum_clones_gradients(grads_and_vars)
  return total_loss, grads_and_vars
model_deploy.py 文件源码 项目:YOLO2TensorFlow 作者: PaulChongPeng 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def optimize_clones(clones, optimizer,
                    regularization_losses=None,
                    **kwargs):
  """Compute clone losses and gradients for the given list of `Clones`.

  Note: The regularization_losses are added to the first clone losses.

  Args:
   clones: List of `Clones` created by `create_clones()`.
   optimizer: An `Optimizer` object.
   regularization_losses: Optional list of regularization losses. If None it
     will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
     exclude them.
   **kwargs: Optional list of keyword arguments to pass to `compute_gradients`.

  Returns:
   A tuple (total_loss, grads_and_vars).
     - total_loss: A Tensor containing the average of the clone losses including
       the regularization loss.
     - grads_and_vars: A List of tuples (gradient, variable) containing the sum
       of the gradients for each variable.

  """
  grads_and_vars = []
  clones_losses = []
  num_clones = len(clones)
  if regularization_losses is None:
    regularization_losses = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
  for clone in clones:
    with tf.name_scope(clone.scope):
      clone_loss, clone_grad = _optimize_clone(
          optimizer, clone, num_clones, regularization_losses, **kwargs)
      if clone_loss is not None:
        clones_losses.append(clone_loss)
        grads_and_vars.append(clone_grad)
      # Only use regularization_losses for the first clone
      regularization_losses = None
  # Compute the total_loss summing all the clones_losses.
  total_loss = tf.add_n(clones_losses, name='total_loss')
  # Sum the gradients across clones.
  grads_and_vars = _sum_clones_gradients(grads_and_vars)
  return total_loss, grads_and_vars
model_deploy.py 文件源码 项目:tensorflow_yolo2 作者: wenxichen 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def optimize_clones(clones, optimizer,
                    regularization_losses=None,
                    **kwargs):
  """Compute clone losses and gradients for the given list of `Clones`.

  Note: The regularization_losses are added to the first clone losses.

  Args:
   clones: List of `Clones` created by `create_clones()`.
   optimizer: An `Optimizer` object.
   regularization_losses: Optional list of regularization losses. If None it
     will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
     exclude them.
   **kwargs: Optional list of keyword arguments to pass to `compute_gradients`.

  Returns:
   A tuple (total_loss, grads_and_vars).
     - total_loss: A Tensor containing the average of the clone losses including
       the regularization loss.
     - grads_and_vars: A List of tuples (gradient, variable) containing the sum
       of the gradients for each variable.

  """
  grads_and_vars = []
  clones_losses = []
  num_clones = len(clones)
  if regularization_losses is None:
    regularization_losses = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
  for clone in clones:
    with tf.name_scope(clone.scope):
      clone_loss, clone_grad = _optimize_clone(
          optimizer, clone, num_clones, regularization_losses, **kwargs)
      if clone_loss is not None:
        clones_losses.append(clone_loss)
        grads_and_vars.append(clone_grad)
      # Only use regularization_losses for the first clone
      regularization_losses = None
  # Compute the total_loss summing all the clones_losses.
  total_loss = tf.add_n(clones_losses, name='total_loss')
  # Sum the gradients across clones.
  grads_and_vars = _sum_clones_gradients(grads_and_vars)
  return total_loss, grads_and_vars
optimizer.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def swapping_saver(self, var_list=None, name='swapping_saver', **kwargs):
        """Create a saver swapping moving averages and variables.

        You should use this saver during training.  It will save the moving averages
        of the trained parameters under the original parameter names.  For
        evaluations or inference you should use a regular saver and it will
        automatically use the moving averages for the trained variable.

        You must call this function after all variables have been created and after
        you have called Optimizer.minimize().

        Args:
          var_list: List of variables to save, as per `Saver()`.
                    If set to None, will save all the variables that have been
                    created before this call.
          name: The name of the saver.
          **kwargs: Keyword arguments of `Saver()`.

        Returns:
          A `tf.train.Saver` object.

        Raises:
          RuntimeError: If apply_gradients or minimize has not been called before.
        """

        if self._variable_map is None:
            raise RuntimeError('Must call apply_gradients or minimize before '
                               'creating the swapping_saver')
        if var_list is None:
            var_list = tf.global_variables()
        if not isinstance(var_list, dict):
            var_list = saver.BaseSaverBuilder.OpListToDict(var_list)
        # Now swap variables and moving averages
        swapped_var_list = {}
        for k, v in six.iteritems(var_list):
            v_swap = self._variable_map.get(v.op.name, None)
            if v_swap:
                swapped_var_list[k] = v_swap
            else:
                swapped_var_list[k] = v
        # Build the swapping saver.
        return saver.Saver(swapped_var_list, name=name, **kwargs)
model_deploy.py 文件源码 项目:segmentation-models 作者: desimone 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def optimize_clones(clones, optimizer, regularization_losses=None, **kwargs):
    """Compute clone losses and gradients for the given list of `Clones`.

      Note: The regularization_losses are added to the first clone losses.

      Args:
       clones: List of `Clones` created by `create_clones()`.
       optimizer: An `Optimizer` object.
       regularization_losses: Optional list of regularization losses. If None it
         will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
         exclude them.
       **kwargs: Optional list of keyword arguments to pass to `compute_gradients`.

      Returns:
       A tuple (total_loss, grads_and_vars).
         - total_loss: A Tensor containing the average of the clone losses including
           the regularization loss.
         - grads_and_vars: A List of tuples (gradient, variable) containing the sum
           of the gradients for each variable.

      """
    grads_and_vars = []
    clones_losses = []
    num_clones = len(clones)
    if regularization_losses is None:
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
    for clone in clones:
        with tf.name_scope(clone.scope):
            clone_loss, clone_grad = _optimize_clone(optimizer, clone,
                                                     num_clones,
                                                     regularization_losses,
                                                     **kwargs)
            if clone_loss is not None:
                clones_losses.append(clone_loss)
                grads_and_vars.append(clone_grad)
            # Only use regularization_losses for the first clone
            regularization_losses = None
    # Compute the total_loss summing all the clones_losses.
    total_loss = tf.add_n(clones_losses, name='total_loss')
    # Sum the gradients accross clones.
    grads_and_vars = _sum_clones_gradients(grads_and_vars)
    return total_loss, grads_and_vars
model_deploy.py 文件源码 项目:shuttleNet 作者: shiyemin 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def optimize_clones(clones, optimizer,
                    regularization_losses=None,
                    **kwargs):
  """Compute clone losses and gradients for the given list of `Clones`.

  Note: The regularization_losses are added to the first clone losses.

  Args:
   clones: List of `Clones` created by `create_clones()`.
   optimizer: An `Optimizer` object.
   regularization_losses: Optional list of regularization losses. If None it
     will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
     exclude them.
   **kwargs: Optional list of keyword arguments to pass to `compute_gradients`.

  Returns:
   A tuple (total_loss, grads_and_vars).
     - total_loss: A Tensor containing the average of the clone losses including
       the regularization loss.
     - grads_and_vars: A List of tuples (gradient, variable) containing the sum
       of the gradients for each variable.

  """
  grads_and_vars = []
  clones_losses = []
  num_clones = len(clones)
  if regularization_losses is None:
    regularization_losses = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
  for clone in clones:
    with tf.name_scope(clone.scope):
      clone_loss, clone_grad = _optimize_clone(
          optimizer, clone, num_clones, regularization_losses, **kwargs)
      if clone_loss is not None:
        clones_losses.append(clone_loss)
        grads_and_vars.append(clone_grad)
      # Only use regularization_losses for the first clone
      regularization_losses = None
  # Compute the total_loss summing all the clones_losses.
  total_loss = tf.add_n(clones_losses, name='total_loss')
  # Sum the gradients accross clones.
  grads_and_vars = _sum_clones_gradients(grads_and_vars)
  return total_loss, grads_and_vars
model_deploy.py 文件源码 项目:Embarrassingly-Parallel-Image-Classification 作者: Azure 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def optimize_clones(clones, optimizer,
                    regularization_losses=None,
                    **kwargs):
  """Compute clone losses and gradients for the given list of `Clones`.

  Note: The regularization_losses are added to the first clone losses.

  Args:
   clones: List of `Clones` created by `create_clones()`.
   optimizer: An `Optimizer` object.
   regularization_losses: Optional list of regularization losses. If None it
     will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
     exclude them.
   **kwargs: Optional list of keyword arguments to pass to `compute_gradients`.

  Returns:
   A tuple (total_loss, grads_and_vars).
     - total_loss: A Tensor containing the average of the clone losses including
       the regularization loss.
     - grads_and_vars: A List of tuples (gradient, variable) containing the sum
       of the gradients for each variable.

  """
  grads_and_vars = []
  clones_losses = []
  num_clones = len(clones)
  if regularization_losses is None:
    regularization_losses = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
  for clone in clones:
    with tf.name_scope(clone.scope):
      clone_loss, clone_grad = _optimize_clone(
          optimizer, clone, num_clones, regularization_losses, **kwargs)
      if clone_loss is not None:
        clones_losses.append(clone_loss)
        grads_and_vars.append(clone_grad)
      # Only use regularization_losses for the first clone
      regularization_losses = None
  # Compute the total_loss summing all the clones_losses.
  total_loss = tf.add_n(clones_losses, name='total_loss')
  # Sum the gradients accross clones.
  grads_and_vars = _sum_clones_gradients(grads_and_vars)
  return total_loss, grads_and_vars
model_deploy.py 文件源码 项目:the-neural-perspective 作者: GokuMohandas 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def optimize_clones(clones, optimizer,
                    regularization_losses=None,
                    **kwargs):
  """Compute clone losses and gradients for the given list of `Clones`.

  Note: The regularization_losses are added to the first clone losses.

  Args:
   clones: List of `Clones` created by `create_clones()`.
   optimizer: An `Optimizer` object.
   regularization_losses: Optional list of regularization losses. If None it
     will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
     exclude them.
   **kwargs: Optional list of keyword arguments to pass to `compute_gradients`.

  Returns:
   A tuple (total_loss, grads_and_vars).
     - total_loss: A Tensor containing the average of the clone losses including
       the regularization loss.
     - grads_and_vars: A List of tuples (gradient, variable) containing the sum
       of the gradients for each variable.

  """
  grads_and_vars = []
  clones_losses = []
  num_clones = len(clones)
  if regularization_losses is None:
    regularization_losses = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
  for clone in clones:
    with tf.name_scope(clone.scope):
      clone_loss, clone_grad = _optimize_clone(
          optimizer, clone, num_clones, regularization_losses, **kwargs)
      if clone_loss is not None:
        clones_losses.append(clone_loss)
        grads_and_vars.append(clone_grad)
      # Only use regularization_losses for the first clone
      regularization_losses = None
  # Compute the total_loss summing all the clones_losses.
  total_loss = tf.add_n(clones_losses, name='total_loss')
  # Sum the gradients accross clones.
  grads_and_vars = _sum_clones_gradients(grads_and_vars)
  return total_loss, grads_and_vars
model_deploy.py 文件源码 项目:MobileNet 作者: Zehaos 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def optimize_clones(clones, optimizer,
                    regularization_losses=None,
                    **kwargs):
  """Compute clone losses and gradients for the given list of `Clones`.

  Note: The regularization_losses are added to the first clone losses.

  Args:
   clones: List of `Clones` created by `create_clones()`.
   optimizer: An `Optimizer` object.
   regularization_losses: Optional list of regularization losses. If None it
     will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
     exclude them.
   **kwargs: Optional list of keyword arguments to pass to `compute_gradients`.

  Returns:
   A tuple (total_loss, grads_and_vars).
     - total_loss: A Tensor containing the average of the clone losses including
       the regularization loss.
     - grads_and_vars: A List of tuples (gradient, variable) containing the sum
       of the gradients for each variable.

  """
  grads_and_vars = []
  clones_losses = []
  num_clones = len(clones)
  if regularization_losses is None:
    regularization_losses = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
  for clone in clones:
    with tf.name_scope(clone.scope):
      clone_loss, clone_grad = _optimize_clone(
          optimizer, clone, num_clones, regularization_losses, **kwargs)
      if clone_loss is not None:
        clones_losses.append(clone_loss)
        grads_and_vars.append(clone_grad)
      # Only use regularization_losses for the first clone
      regularization_losses = None
  # Compute the total_loss summing all the clones_losses.
  total_loss = tf.add_n(clones_losses, name='total_loss')
  # Sum the gradients across clones.
  grads_and_vars = _sum_clones_gradients(grads_and_vars)
  return total_loss, grads_and_vars
model_deploy.py 文件源码 项目:Deep-Fashion 作者: TomPyonsuke 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def optimize_clones(clones, optimizer,
                    regularization_losses=None,
                    **kwargs):
  """Compute clone losses and gradients for the given list of `Clones`.

  Note: The regularization_losses are added to the first clone losses.

  Args:
   clones: List of `Clones` created by `create_clones()`.
   optimizer: An `Optimizer` object.
   regularization_losses: Optional list of regularization losses. If None it
     will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
     exclude them.
   **kwargs: Optional list of keyword arguments to pass to `compute_gradients`.

  Returns:
   A tuple (total_loss, grads_and_vars).
     - total_loss: A Tensor containing the average of the clone losses including
       the regularization loss.
     - grads_and_vars: A List of tuples (gradient, variable) containing the sum
       of the gradients for each variable.

  """
  grads_and_vars = []
  clones_losses = []
  num_clones = len(clones)
  if regularization_losses is None:
    regularization_losses = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
  for clone in clones:
    with tf.name_scope(clone.scope):
      clone_loss, clone_grad = _optimize_clone(
          optimizer, clone, num_clones, regularization_losses, **kwargs)
      if clone_loss is not None:
        clones_losses.append(clone_loss)
        grads_and_vars.append(clone_grad)
      # Only use regularization_losses for the first clone
      regularization_losses = None
  # Compute the total_loss summing all the clones_losses.
  total_loss = tf.add_n(clones_losses, name='total_loss')
  # Sum the gradients across clones.
  grads_and_vars = _sum_clones_gradients(grads_and_vars)
  return total_loss, grads_and_vars
model_deploy.py 文件源码 项目:TensorFlowOnSpark 作者: yahoo 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def optimize_clones(clones, optimizer,
                    regularization_losses=None,
                    **kwargs):
  """Compute clone losses and gradients for the given list of `Clones`.

  Note: The regularization_losses are added to the first clone losses.

  Args:
   clones: List of `Clones` created by `create_clones()`.
   optimizer: An `Optimizer` object.
   regularization_losses: Optional list of regularization losses. If None it
     will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
     exclude them.
   **kwargs: Optional list of keyword arguments to pass to `compute_gradients`.

  Returns:
   A tuple (total_loss, grads_and_vars).
     - total_loss: A Tensor containing the average of the clone losses including
       the regularization loss.
     - grads_and_vars: A List of tuples (gradient, variable) containing the sum
       of the gradients for each variable.

  """
  grads_and_vars = []
  clones_losses = []
  num_clones = len(clones)
  if regularization_losses is None:
    regularization_losses = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
  for clone in clones:
    with tf.name_scope(clone.scope):
      clone_loss, clone_grad = _optimize_clone(
          optimizer, clone, num_clones, regularization_losses, **kwargs)
      if clone_loss is not None:
        clones_losses.append(clone_loss)
        grads_and_vars.append(clone_grad)
      # Only use regularization_losses for the first clone
      regularization_losses = None
  # Compute the total_loss summing all the clones_losses.
  total_loss = tf.add_n(clones_losses, name='total_loss')
  # Sum the gradients across clones.
  grads_and_vars = _sum_clones_gradients(grads_and_vars)
  return total_loss, grads_and_vars
model_deploy.py 文件源码 项目:Densenet 作者: bysowhat 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def optimize_clones(clones, optimizer,
                    regularization_losses=None,
                    **kwargs):
  """Compute clone losses and gradients for the given list of `Clones`.

  Note: The regularization_losses are added to the first clone losses.

  Args:
   clones: List of `Clones` created by `create_clones()`.
   optimizer: An `Optimizer` object.
   regularization_losses: Optional list of regularization losses. If None it
     will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
     exclude them.
   **kwargs: Optional list of keyword arguments to pass to `compute_gradients`.

  Returns:
   A tuple (total_loss, grads_and_vars).
     - total_loss: A Tensor containing the average of the clone losses including
       the regularization loss.
     - grads_and_vars: A List of tuples (gradient, variable) containing the sum
       of the gradients for each variable.

  """
  grads_and_vars = []
  clones_losses = []
  num_clones = len(clones)
  if regularization_losses is None:
    regularization_losses = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
  for clone in clones:
    with tf.name_scope(clone.scope):
      clone_loss, clone_grad = _optimize_clone(
          optimizer, clone, num_clones, regularization_losses, **kwargs)
      if clone_loss is not None:
        clones_losses.append(clone_loss)
        grads_and_vars.append(clone_grad)
      # Only use regularization_losses for the first clone
      regularization_losses = None
  # Compute the total_loss summing all the clones_losses.
  total_loss = tf.add_n(clones_losses, name='total_loss')
  # Sum the gradients across clones.
  grads_and_vars = _sum_clones_gradients(grads_and_vars)
  return total_loss, grads_and_vars
model_deploy.py 文件源码 项目:Densenet 作者: bysowhat 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def optimize_clones(clones, optimizer,
                    regularization_losses=None,
                    **kwargs):
  """Compute clone losses and gradients for the given list of `Clones`.

  Note: The regularization_losses are added to the first clone losses.

  Args:
   clones: List of `Clones` created by `create_clones()`.
   optimizer: An `Optimizer` object.
   regularization_losses: Optional list of regularization losses. If None it
     will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
     exclude them.
   **kwargs: Optional list of keyword arguments to pass to `compute_gradients`.

  Returns:
   A tuple (total_loss, grads_and_vars).
     - total_loss: A Tensor containing the average of the clone losses including
       the regularization loss.
     - grads_and_vars: A List of tuples (gradient, variable) containing the sum
       of the gradients for each variable.

  """
  grads_and_vars = []
  clones_losses = []
  num_clones = len(clones)
  if regularization_losses is None:
    regularization_losses = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
  for clone in clones:
    with tf.name_scope(clone.scope):
      clone_loss, clone_grad = _optimize_clone(
          optimizer, clone, num_clones, regularization_losses, **kwargs)
      if clone_loss is not None:
        clones_losses.append(clone_loss)
        grads_and_vars.append(clone_grad)
      # Only use regularization_losses for the first clone
      regularization_losses = None
  # Compute the total_loss summing all the clones_losses.
  total_loss = tf.add_n(clones_losses, name='total_loss')
  # Sum the gradients across clones.
  grads_and_vars = _sum_clones_gradients(grads_and_vars)
  return total_loss, grads_and_vars
model_deploy.py 文件源码 项目:hops-tensorflow 作者: hopshadoop 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def optimize_clones(clones, optimizer,
                    regularization_losses=None,
                    **kwargs):
  """Compute clone losses and gradients for the given list of `Clones`.

  Note: The regularization_losses are added to the first clone losses.

  Args:
   clones: List of `Clones` created by `create_clones()`.
   optimizer: An `Optimizer` object.
   regularization_losses: Optional list of regularization losses. If None it
     will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
     exclude them.
   **kwargs: Optional list of keyword arguments to pass to `compute_gradients`.

  Returns:
   A tuple (total_loss, grads_and_vars).
     - total_loss: A Tensor containing the average of the clone losses including
       the regularization loss.
     - grads_and_vars: A List of tuples (gradient, variable) containing the sum
       of the gradients for each variable.

  """
  grads_and_vars = []
  clones_losses = []
  num_clones = len(clones)
  if regularization_losses is None:
    regularization_losses = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
  for clone in clones:
    with tf.name_scope(clone.scope):
      clone_loss, clone_grad = _optimize_clone(
          optimizer, clone, num_clones, regularization_losses, **kwargs)
      if clone_loss is not None:
        clones_losses.append(clone_loss)
        grads_and_vars.append(clone_grad)
      # Only use regularization_losses for the first clone
      regularization_losses = None
  # Compute the total_loss summing all the clones_losses.
  total_loss = tf.add_n(clones_losses, name='total_loss')
  # Sum the gradients across clones.
  grads_and_vars = _sum_clones_gradients(grads_and_vars)
  return total_loss, grads_and_vars
model_deploy.py 文件源码 项目:tensorflow-pspnet 作者: pudae 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def optimize_clones(clones, optimizer,
                    regularization_losses=None,
                    **kwargs):
  """Compute clone losses and gradients for the given list of `Clones`.

  Note: The regularization_losses are added to the first clone losses.

  Args:
   clones: List of `Clones` created by `create_clones()`.
   optimizer: An `Optimizer` object.
   regularization_losses: Optional list of regularization losses. If None it
     will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
     exclude them.
   **kwargs: Optional list of keyword arguments to pass to `compute_gradients`.

  Returns:
   A tuple (total_loss, grads_and_vars).
     - total_loss: A Tensor containing the average of the clone losses including
       the regularization loss.
     - grads_and_vars: A List of tuples (gradient, variable) containing the sum
       of the gradients for each variable.

  """
  grads_and_vars = []
  clones_losses = []
  num_clones = len(clones)
  if regularization_losses is None:
    regularization_losses = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
  for clone in clones:
    with tf.name_scope(clone.scope):
      clone_loss, clone_grad = _optimize_clone(
          optimizer, clone, num_clones, regularization_losses, **kwargs)
      if clone_loss is not None:
        clones_losses.append(clone_loss)
        grads_and_vars.append(clone_grad)
      # Only use regularization_losses for the first clone
      regularization_losses = None
  # Compute the total_loss summing all the clones_losses.
  total_loss = tf.add_n(clones_losses, name='total_loss')
  # Sum the gradients across clones.
  grads_and_vars = _sum_clones_gradients(grads_and_vars)
  return total_loss, grads_and_vars
model_deploy.py 文件源码 项目:the-neural-perspective 作者: johnsonc 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def optimize_clones(clones, optimizer,
                    regularization_losses=None,
                    **kwargs):
  """Compute clone losses and gradients for the given list of `Clones`.

  Note: The regularization_losses are added to the first clone losses.

  Args:
   clones: List of `Clones` created by `create_clones()`.
   optimizer: An `Optimizer` object.
   regularization_losses: Optional list of regularization losses. If None it
     will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
     exclude them.
   **kwargs: Optional list of keyword arguments to pass to `compute_gradients`.

  Returns:
   A tuple (total_loss, grads_and_vars).
     - total_loss: A Tensor containing the average of the clone losses including
       the regularization loss.
     - grads_and_vars: A List of tuples (gradient, variable) containing the sum
       of the gradients for each variable.

  """
  grads_and_vars = []
  clones_losses = []
  num_clones = len(clones)
  if regularization_losses is None:
    regularization_losses = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
  for clone in clones:
    with tf.name_scope(clone.scope):
      clone_loss, clone_grad = _optimize_clone(
          optimizer, clone, num_clones, regularization_losses, **kwargs)
      if clone_loss is not None:
        clones_losses.append(clone_loss)
        grads_and_vars.append(clone_grad)
      # Only use regularization_losses for the first clone
      regularization_losses = None
  # Compute the total_loss summing all the clones_losses.
  total_loss = tf.add_n(clones_losses, name='total_loss')
  # Sum the gradients accross clones.
  grads_and_vars = _sum_clones_gradients(grads_and_vars)
  return total_loss, grads_and_vars
model_deploy.py 文件源码 项目:places365-tf 作者: baileyqbb 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def optimize_clones(clones, optimizer,
                    regularization_losses=None,
                    **kwargs):
  """Compute clone losses and gradients for the given list of `Clones`.

  Note: The regularization_losses are added to the first clone losses.

  Args:
   clones: List of `Clones` created by `create_clones()`.
   optimizer: An `Optimizer` object.
   regularization_losses: Optional list of regularization losses. If None it
     will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
     exclude them.
   **kwargs: Optional list of keyword arguments to pass to `compute_gradients`.

  Returns:
   A tuple (total_loss, grads_and_vars).
     - total_loss: A Tensor containing the average of the clone losses including
       the regularization loss.
     - grads_and_vars: A List of tuples (gradient, variable) containing the sum
       of the gradients for each variable.

  """
  grads_and_vars = []
  clones_losses = []
  num_clones = len(clones)
  if regularization_losses is None:
    regularization_losses = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
  for clone in clones:
    with tf.name_scope(clone.scope):
      clone_loss, clone_grad = _optimize_clone(
          optimizer, clone, num_clones, regularization_losses, **kwargs)
      if clone_loss is not None:
        clones_losses.append(clone_loss)
        grads_and_vars.append(clone_grad)
      # Only use regularization_losses for the first clone
      regularization_losses = None
  # Compute the total_loss summing all the clones_losses.
  total_loss = tf.add_n(clones_losses, name='total_loss')
  # Sum the gradients across clones.
  grads_and_vars = _sum_clones_gradients(grads_and_vars)
  return total_loss, grads_and_vars
model_deploy.py 文件源码 项目:tensorflow-densenet 作者: pudae 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def optimize_clones(clones, optimizer,
                    regularization_losses=None,
                    **kwargs):
  """Compute clone losses and gradients for the given list of `Clones`.

  Note: The regularization_losses are added to the first clone losses.

  Args:
   clones: List of `Clones` created by `create_clones()`.
   optimizer: An `Optimizer` object.
   regularization_losses: Optional list of regularization losses. If None it
     will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
     exclude them.
   **kwargs: Optional list of keyword arguments to pass to `compute_gradients`.

  Returns:
   A tuple (total_loss, grads_and_vars).
     - total_loss: A Tensor containing the average of the clone losses including
       the regularization loss.
     - grads_and_vars: A List of tuples (gradient, variable) containing the sum
       of the gradients for each variable.

  """
  grads_and_vars = []
  clones_losses = []
  num_clones = len(clones)
  if regularization_losses is None:
    regularization_losses = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
  for clone in clones:
    with tf.name_scope(clone.scope):
      clone_loss, clone_grad = _optimize_clone(
          optimizer, clone, num_clones, regularization_losses, **kwargs)
      if clone_loss is not None:
        clones_losses.append(clone_loss)
        grads_and_vars.append(clone_grad)
      # Only use regularization_losses for the first clone
      regularization_losses = None
  # Compute the total_loss summing all the clones_losses.
  total_loss = tf.add_n(clones_losses, name='total_loss')
  # Sum the gradients across clones.
  grads_and_vars = _sum_clones_gradients(grads_and_vars)
  return total_loss, grads_and_vars


问题


面经


文章

微信
公众号

扫码关注公众号