python类Optimizer()的实例源码

model_deploy.py 文件源码 项目:isbi2017-part3 作者: learningtitans 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
                    **kwargs):
  """Compute losses and gradients for a single clone.

  Args:
    optimizer: A tf.Optimizer  object.
    clone: A Clone namedtuple.
    num_clones: The number of clones being deployed.
    regularization_losses: Possibly empty list of regularization_losses
      to add to the clone losses.
    **kwargs: Dict of kwarg to pass to compute_gradients().

  Returns:
    A tuple (clone_loss, clone_grads_and_vars).
      - clone_loss: A tensor for the total loss for the clone.  Can be None.
      - clone_grads_and_vars: List of (gradient, variable) for the clone.
        Can be empty.
  """
  sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
  clone_grad = None
  if sum_loss is not None:
    with tf.device(clone.device):
      clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
  return sum_loss, clone_grad
model_deploy.py 文件源码 项目:terngrad 作者: wenwei202 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
                    **kwargs):
  """Compute losses and gradients for a single clone.

  Args:
    optimizer: A tf.Optimizer  object.
    clone: A Clone namedtuple.
    num_clones: The number of clones being deployed.
    regularization_losses: Possibly empty list of regularization_losses
      to add to the clone losses.
    **kwargs: Dict of kwarg to pass to compute_gradients().

  Returns:
    A tuple (clone_loss, clone_grads_and_vars).
      - clone_loss: A tensor for the total loss for the clone.  Can be None.
      - clone_grads_and_vars: List of (gradient, variable) for the clone.
        Can be empty.
  """
  sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
  clone_grad = None
  if sum_loss is not None:
    with tf.device(clone.device):
      clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
  return sum_loss, clone_grad
tfmodel_deploy.py 文件源码 项目:antgo 作者: jianzfb 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
                    **kwargs):
  """Compute losses and gradients for a single clone.

  Args:
    optimizer: A tf.Optimizer  object.
    clone: A Clone namedtuple.
    num_clones: The number of clones being deployed.
    regularization_losses: Possibly empty list of regularization_losses
      to add to the clone losses.
    **kwargs: Dict of kwarg to pass to compute_gradients().

  Returns:
    A tuple (clone_loss, clone_grads_and_vars).
      - clone_loss: A tensor for the total loss for the clone.  Can be None.
      - clone_grads_and_vars: List of (gradient, variable) for the clone.
        Can be empty.
  """
  sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
  clone_grad = None
  if sum_loss is not None:
    with tf.device(clone.device):
      clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
  return sum_loss, clone_grad
model_deploy.py 文件源码 项目:YOLO2TensorFlow 作者: PaulChongPeng 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
                    **kwargs):
  """Compute losses and gradients for a single clone.

  Args:
    optimizer: A tf.Optimizer  object.
    clone: A Clone namedtuple.
    num_clones: The number of clones being deployed.
    regularization_losses: Possibly empty list of regularization_losses
      to add to the clone losses.
    **kwargs: Dict of kwarg to pass to compute_gradients().

  Returns:
    A tuple (clone_loss, clone_grads_and_vars).
      - clone_loss: A tensor for the total loss for the clone.  Can be None.
      - clone_grads_and_vars: List of (gradient, variable) for the clone.
        Can be empty.
  """
  sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
  clone_grad = None
  if sum_loss is not None:
    with tf.device(clone.device):
      clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
  return sum_loss, clone_grad
model_deploy.py 文件源码 项目:tensorflow_yolo2 作者: wenxichen 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
                    **kwargs):
  """Compute losses and gradients for a single clone.

  Args:
    optimizer: A tf.Optimizer  object.
    clone: A Clone namedtuple.
    num_clones: The number of clones being deployed.
    regularization_losses: Possibly empty list of regularization_losses
      to add to the clone losses.
    **kwargs: Dict of kwarg to pass to compute_gradients().

  Returns:
    A tuple (clone_loss, clone_grads_and_vars).
      - clone_loss: A tensor for the total loss for the clone.  Can be None.
      - clone_grads_and_vars: List of (gradient, variable) for the clone.
        Can be empty.
  """
  sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
  clone_grad = None
  if sum_loss is not None:
    with tf.device(clone.device):
      clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
  return sum_loss, clone_grad
optimizer.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, opt):
        """Constructor.

        Args:
          opt: an instance of a class that implements tf.train.Optimizer.
        """
        if not isinstance(opt, optimizer.Optimizer):
            raise TypeError(
                'Supplied optimizer must be an instance of tf.train.Optimizer')
        self._opt = opt
        overridden_methods = ('_apply_dense', '_resource_apply_dense',
                              '_apply_sparse', '_resource_apply_sparse')
        for name in overridden_methods:
            fn = getattr(self._opt, name)
            wrapper = _get_wrapper(fn, self._opt)
            setattr(self._opt, name, wrapper)
optimizer.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self, opt, average_decay=0.9999, num_updates=None,
                 sequential_update=True):
        """Construct a new MovingAverageOptimizer.

        Args:
          opt: A tf.Optimizer that will be used to compute and apply gradients.
          average_decay: Float.  Decay to use to maintain the moving averages
                         of trained variables.
                         See tf.train.ExponentialMovingAverage for details.
          num_updates: Optional count of number of updates applied to variables.
                       See tf.train.ExponentialMovingAverage for details.
          sequential_update: Bool. If False, will compute the moving average at the
                             same time as the model is updated, potentially doing
                             benign data races.
                             If True, will update the moving average after gradient
                             updates.
        """
        self._optimizer = opt
        self._ema = tf.train.ExponentialMovingAverage(
            average_decay, num_updates=num_updates)
        self._variable_map = None
        self._sequential_update = sequential_update
model_deploy.py 文件源码 项目:segmentation-models 作者: desimone 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
                    **kwargs):
    """Compute losses and gradients for a single clone.

      Args:
        optimizer: A tf.Optimizer  object.
        clone: A Clone namedtuple.
        num_clones: The number of clones being deployed.
        regularization_losses: Possibly empty list of regularization_losses
          to add to the clone losses.
        **kwargs: Dict of kwarg to pass to compute_gradients().

      Returns:
        A tuple (clone_loss, clone_grads_and_vars).
          - clone_loss: A tensor for the total loss for the clone.  Can be None.
          - clone_grads_and_vars: List of (gradient, variable) for the clone.
            Can be empty.
      """
    sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
    clone_grad = None
    if sum_loss is not None:
        with tf.device(clone.device):
            clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
    return sum_loss, clone_grad
model_deploy.py 文件源码 项目:shuttleNet 作者: shiyemin 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
                    **kwargs):
  """Compute losses and gradients for a single clone.

  Args:
    optimizer: A tf.Optimizer  object.
    clone: A Clone namedtuple.
    num_clones: The number of clones being deployed.
    regularization_losses: Possibly empty list of regularization_losses
      to add to the clone losses.
    **kwargs: Dict of kwarg to pass to compute_gradients().

  Returns:
    A tuple (clone_loss, clone_grads_and_vars).
      - clone_loss: A tensor for the total loss for the clone.  Can be None.
      - clone_grads_and_vars: List of (gradient, variable) for the clone.
        Can be empty.
  """
  sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
  clone_grad = None
  if sum_loss is not None:
    with tf.device(clone.device):
      clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
  return sum_loss, clone_grad
model_deploy.py 文件源码 项目:Embarrassingly-Parallel-Image-Classification 作者: Azure 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
                    **kwargs):
  """Compute losses and gradients for a single clone.

  Args:
    optimizer: A tf.Optimizer  object.
    clone: A Clone namedtuple.
    num_clones: The number of clones being deployed.
    regularization_losses: Possibly empty list of regularization_losses
      to add to the clone losses.
    **kwargs: Dict of kwarg to pass to compute_gradients().

  Returns:
    A tuple (clone_loss, clone_grads_and_vars).
      - clone_loss: A tensor for the total loss for the clone.  Can be None.
      - clone_grads_and_vars: List of (gradient, variable) for the clone.
        Can be empty.
  """
  sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
  clone_grad = None
  if sum_loss is not None:
    with tf.device(clone.device):
      clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
  return sum_loss, clone_grad
model_deploy.py 文件源码 项目:the-neural-perspective 作者: GokuMohandas 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
                    **kwargs):
  """Compute losses and gradients for a single clone.

  Args:
    optimizer: A tf.Optimizer  object.
    clone: A Clone namedtuple.
    num_clones: The number of clones being deployed.
    regularization_losses: Possibly empty list of regularization_losses
      to add to the clone losses.
    **kwargs: Dict of kwarg to pass to compute_gradients().

  Returns:
    A tuple (clone_loss, clone_grads_and_vars).
      - clone_loss: A tensor for the total loss for the clone.  Can be None.
      - clone_grads_and_vars: List of (gradient, variable) for the clone.
        Can be empty.
  """
  sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
  clone_grad = None
  if sum_loss is not None:
    with tf.device(clone.device):
      clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
  return sum_loss, clone_grad
model_deploy.py 文件源码 项目:MobileNet 作者: Zehaos 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
                    **kwargs):
  """Compute losses and gradients for a single clone.

  Args:
    optimizer: A tf.Optimizer  object.
    clone: A Clone namedtuple.
    num_clones: The number of clones being deployed.
    regularization_losses: Possibly empty list of regularization_losses
      to add to the clone losses.
    **kwargs: Dict of kwarg to pass to compute_gradients().

  Returns:
    A tuple (clone_loss, clone_grads_and_vars).
      - clone_loss: A tensor for the total loss for the clone.  Can be None.
      - clone_grads_and_vars: List of (gradient, variable) for the clone.
        Can be empty.
  """
  sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
  clone_grad = None
  if sum_loss is not None:
    with tf.device(clone.device):
      clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
  return sum_loss, clone_grad
model_deploy.py 文件源码 项目:Deep-Fashion 作者: TomPyonsuke 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
                    **kwargs):
  """Compute losses and gradients for a single clone.

  Args:
    optimizer: A tf.Optimizer  object.
    clone: A Clone namedtuple.
    num_clones: The number of clones being deployed.
    regularization_losses: Possibly empty list of regularization_losses
      to add to the clone losses.
    **kwargs: Dict of kwarg to pass to compute_gradients().

  Returns:
    A tuple (clone_loss, clone_grads_and_vars).
      - clone_loss: A tensor for the total loss for the clone.  Can be None.
      - clone_grads_and_vars: List of (gradient, variable) for the clone.
        Can be empty.
  """
  sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
  clone_grad = None
  if sum_loss is not None:
    with tf.device(clone.device):
      clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
  return sum_loss, clone_grad
model_deploy.py 文件源码 项目:TensorFlowOnSpark 作者: yahoo 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
                    **kwargs):
  """Compute losses and gradients for a single clone.

  Args:
    optimizer: A tf.Optimizer  object.
    clone: A Clone namedtuple.
    num_clones: The number of clones being deployed.
    regularization_losses: Possibly empty list of regularization_losses
      to add to the clone losses.
    **kwargs: Dict of kwarg to pass to compute_gradients().

  Returns:
    A tuple (clone_loss, clone_grads_and_vars).
      - clone_loss: A tensor for the total loss for the clone.  Can be None.
      - clone_grads_and_vars: List of (gradient, variable) for the clone.
        Can be empty.
  """
  sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
  clone_grad = None
  if sum_loss is not None:
    with tf.device(clone.device):
      clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
  return sum_loss, clone_grad
model_deploy.py 文件源码 项目:Densenet 作者: bysowhat 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
                    **kwargs):
  """Compute losses and gradients for a single clone.

  Args:
    optimizer: A tf.Optimizer  object.
    clone: A Clone namedtuple.
    num_clones: The number of clones being deployed.
    regularization_losses: Possibly empty list of regularization_losses
      to add to the clone losses.
    **kwargs: Dict of kwarg to pass to compute_gradients().

  Returns:
    A tuple (clone_loss, clone_grads_and_vars).
      - clone_loss: A tensor for the total loss for the clone.  Can be None.
      - clone_grads_and_vars: List of (gradient, variable) for the clone.
        Can be empty.
  """
  sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
  clone_grad = None
  if sum_loss is not None:
    with tf.device(clone.device):
      clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
  return sum_loss, clone_grad
model_deploy.py 文件源码 项目:Densenet 作者: bysowhat 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
                    **kwargs):
  """Compute losses and gradients for a single clone.

  Args:
    optimizer: A tf.Optimizer  object.
    clone: A Clone namedtuple.
    num_clones: The number of clones being deployed.
    regularization_losses: Possibly empty list of regularization_losses
      to add to the clone losses.
    **kwargs: Dict of kwarg to pass to compute_gradients().

  Returns:
    A tuple (clone_loss, clone_grads_and_vars).
      - clone_loss: A tensor for the total loss for the clone.  Can be None.
      - clone_grads_and_vars: List of (gradient, variable) for the clone.
        Can be empty.
  """
  sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
  clone_grad = None
  if sum_loss is not None:
    with tf.device(clone.device):
      clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
  return sum_loss, clone_grad
model_deploy.py 文件源码 项目:hops-tensorflow 作者: hopshadoop 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
                    **kwargs):
  """Compute losses and gradients for a single clone.

  Args:
    optimizer: A tf.Optimizer  object.
    clone: A Clone namedtuple.
    num_clones: The number of clones being deployed.
    regularization_losses: Possibly empty list of regularization_losses
      to add to the clone losses.
    **kwargs: Dict of kwarg to pass to compute_gradients().

  Returns:
    A tuple (clone_loss, clone_grads_and_vars).
      - clone_loss: A tensor for the total loss for the clone.  Can be None.
      - clone_grads_and_vars: List of (gradient, variable) for the clone.
        Can be empty.
  """
  sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
  clone_grad = None
  if sum_loss is not None:
    with tf.device(clone.device):
      clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
  return sum_loss, clone_grad
model_deploy.py 文件源码 项目:tensorflow-pspnet 作者: pudae 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
                    **kwargs):
  """Compute losses and gradients for a single clone.

  Args:
    optimizer: A tf.Optimizer  object.
    clone: A Clone namedtuple.
    num_clones: The number of clones being deployed.
    regularization_losses: Possibly empty list of regularization_losses
      to add to the clone losses.
    **kwargs: Dict of kwarg to pass to compute_gradients().

  Returns:
    A tuple (clone_loss, clone_grads_and_vars).
      - clone_loss: A tensor for the total loss for the clone.  Can be None.
      - clone_grads_and_vars: List of (gradient, variable) for the clone.
        Can be empty.
  """
  sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
  clone_grad = None
  if sum_loss is not None:
    with tf.device(clone.device):
      clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
  return sum_loss, clone_grad
model_deploy.py 文件源码 项目:the-neural-perspective 作者: johnsonc 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
                    **kwargs):
  """Compute losses and gradients for a single clone.

  Args:
    optimizer: A tf.Optimizer  object.
    clone: A Clone namedtuple.
    num_clones: The number of clones being deployed.
    regularization_losses: Possibly empty list of regularization_losses
      to add to the clone losses.
    **kwargs: Dict of kwarg to pass to compute_gradients().

  Returns:
    A tuple (clone_loss, clone_grads_and_vars).
      - clone_loss: A tensor for the total loss for the clone.  Can be None.
      - clone_grads_and_vars: List of (gradient, variable) for the clone.
        Can be empty.
  """
  sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
  clone_grad = None
  if sum_loss is not None:
    with tf.device(clone.device):
      clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
  return sum_loss, clone_grad
model_deploy.py 文件源码 项目:places365-tf 作者: baileyqbb 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
                    **kwargs):
  """Compute losses and gradients for a single clone.

  Args:
    optimizer: A tf.Optimizer  object.
    clone: A Clone namedtuple.
    num_clones: The number of clones being deployed.
    regularization_losses: Possibly empty list of regularization_losses
      to add to the clone losses.
    **kwargs: Dict of kwarg to pass to compute_gradients().

  Returns:
    A tuple (clone_loss, clone_grads_and_vars).
      - clone_loss: A tensor for the total loss for the clone.  Can be None.
      - clone_grads_and_vars: List of (gradient, variable) for the clone.
        Can be empty.
  """
  sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
  clone_grad = None
  if sum_loss is not None:
    with tf.device(clone.device):
      clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
  return sum_loss, clone_grad
model_deploy.py 文件源码 项目:tensorflow-densenet 作者: pudae 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
                    **kwargs):
  """Compute losses and gradients for a single clone.

  Args:
    optimizer: A tf.Optimizer  object.
    clone: A Clone namedtuple.
    num_clones: The number of clones being deployed.
    regularization_losses: Possibly empty list of regularization_losses
      to add to the clone losses.
    **kwargs: Dict of kwarg to pass to compute_gradients().

  Returns:
    A tuple (clone_loss, clone_grads_and_vars).
      - clone_loss: A tensor for the total loss for the clone.  Can be None.
      - clone_grads_and_vars: List of (gradient, variable) for the clone.
        Can be empty.
  """
  sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
  clone_grad = None
  if sum_loss is not None:
    with tf.device(clone.device):
      clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
  return sum_loss, clone_grad
model_deploy.py 文件源码 项目:ActionVLAD 作者: rohitgirdhar 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
                    **kwargs):
  """Compute losses and gradients for a single clone.

  Args:
    optimizer: A tf.Optimizer  object.
    clone: A Clone namedtuple.
    num_clones: The number of clones being deployed.
    regularization_losses: Possibly empty list of regularization_losses
      to add to the clone losses.
    **kwargs: Dict of kwarg to pass to compute_gradients().

  Returns:
    A tuple (clone_loss, clone_grads_and_vars).
      - clone_loss: A tensor for the total loss for the clone.  Can be None.
      - clone_grads_and_vars: List of (gradient, variable) for the clone.
        Can be empty.
  """
  sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
  clone_grad = None
  if sum_loss is not None:
    with tf.device(clone.device):
      clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
  return sum_loss, clone_grad
networks.py 文件源码 项目:comprehend 作者: Fenugreek 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def init_train_args(self, mode='recode'):
        # To be used for training by tf.Optimizer objects.
        self.train_args = [tf.placeholder(self.dtype,
                                          shape=[None, self.n_visible])]
        if mode == 'target':
            self.train_args.append(tf.placeholder(self.dtype,
                                                  shape=[None, self.n_hidden]))
        elif mode == 'label':
            self.train_args.append(tf.placeholder(tf.int32, shape=[None]))

        return self.train_args
networks.py 文件源码 项目:comprehend 作者: Fenugreek 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def init_train_args(self, mode='recode'):
        # To be used for training by tf.Optimizer objects.
        self.train_args = [tf.placeholder(self.dtype,
                                          shape=[None] + self.shapes[0])]
        if mode == 'target':
            h_shape = self.output_shape(**kwargs)
            self.train_args.append(tf.placeholder(self.dtype,
                                                  shape=[None] + h_shape[1:]))
        elif mode == 'label':
            self.train_args.append(tf.placeholder(tf.int32, shape=[None]))

        return self.train_args
networks.py 文件源码 项目:comprehend 作者: Fenugreek 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def init_train_args(self, mode='recode'):
        # To be used for training by tf.Optimizer objects.
        self.train_args = [tf.placeholder(self.dtype, name='train',
                                          shape=[None, self.n_visible, self.seq_length])]
        if mode == 'target':
            self.train_args.append(tf.placeholder(self.dtype,
                                                  shape=[None, self.n_output]))
        elif mode == 'label':
            self.train_args.append(tf.placeholder(tf.int32, shape=[None]))

        return self.train_args
builders.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def build_optimizer(args, steps, global_step):
    """Build the specified optimizer, log the learning rate and enalble
    learning rate decay is specified.
    Args:
        args: the optimization argument dict
        global_step: integer tensor, the current training step
    Returns:
        optimizer: tf.Optimizer object initialized
    """
    # Extract the initial learning rate
    initial_lr = float(args["gd"]["args"]['learning_rate'])

    if args["lr_decay"]["enabled"]:
        # Decay the learning rate exponentially based on the number of steps.
        learning_rate = tf.train.exponential_decay(
            initial_lr,
            global_step,
            steps["decay"],
            args["lr_decay"]["factor"],
            staircase=True)
        # Update the learning rate parameter of the optimizer
        args["gd"]["args"]['learning_rate'] = learning_rate
        # Log the learning rate
        tf_log(tf.summary.scalar('learning_rate', learning_rate))
    else:
        learning_rate = tf.constant(initial_lr)

    # Instantiate the optimizer
    optimizer = args["gd"]["optimizer"](**args["gd"]["args"])
    return optimizer
optimizers.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def build(self, step_tensor=None):
        """ build optimizer tensor.

        This method creates the optimizer with specified parameters. It must
        be implemented for every `Optimizer`.

        Arguments:
            step_tensor: `tf.Tensor`. A variable holding the training step.
                Only necessary when optimizer has a learning rate decay.

        """
        raise NotImplementedError
optimizers.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def get_tensor(self):
        """ get_tensor.

        A method to retrieve the optimizer tensor.

        Returns:
            The `Optimizer`.

        """
        if not self.built:
            self.build()
        return self.tensor
optimizers.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __call__(self):
        """ __call__

        A shortcut for `get_tensor`. Retrieve the optimizer tensor.

        Returns:
            The `Optimizer`.

        """
        return self.get_tensor()
optimizer.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self,
                 opt,
                 vars_to_clip_dims,
                 max_norm,
                 use_locking=False,
                 colocate_clip_ops_with_vars=False,
                 name="VariableClipping"):
        """Construct a new clip-norm optimizer.

        Args:
          opt: The actual optimizer that will be used to compute and apply the
            gradients. Must be one of the Optimizer classes.
          vars_to_clip_dims: A dict with keys as Variables and values as lists
            of dimensions along which to compute the L2-norm.  See
            `tf.clip_by_norm` for more details.
          max_norm: The L2-norm to clip to, for all variables specified.
          use_locking: If `True` use locks for clip update operations.
          colocate_clip_ops_with_vars: If `True`, try colocating the clip norm
            ops with the corresponding variable.
          name: Optional name prefix for the operations created when applying
            gradients.  Defaults to "VariableClipping".
        """
        super(VariableClippingOptimizer, self).__init__(use_locking, name)
        self._opt = opt
        # Defensive copy of input dict
        self._vars_to_clip_dims = {
            var: clip_dims[:] for var, clip_dims in vars_to_clip_dims.items()}
        self._max_norm = max_norm
        self._colocate_clip_ops_with_vars = colocate_clip_ops_with_vars


问题


面经


文章

微信
公众号

扫码关注公众号