python类optimizers()的实例源码

nutszebra_chainer.py 文件源码 项目:trainer 作者: nutszebra 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def save_optimizer(self, optimizer, path=''):
        """Save optimizer model

        Example:

        ::

            path = './test.optimizer'
            self.save_optimizer(optimizer, path)

        Args:
            optimizer (chainer.optimizers): optimizer
            path (str): path

        Returns:
            bool: True if saving successful
        """

        # if path is ''
        if path == '':
            path = str(self.save_optimizer_epoch) + '.optimizer'
        # increment self.nz_save_optimizer_epoch
        self.nz_save_optimizer_epoch += 1
        serializers.save_npz(path, optimizer)
        return True
image_converter.py 文件源码 项目:neural_style_synthesizer 作者: dwango 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def __init__(self, gpu=-1, optimizer=None, model=None, content_weight=1, texture_weight=1, average_pooling=False):
        self.content_weight = content_weight
        self.texture_weight = texture_weight
        self.average_pooling = average_pooling
        if optimizer is None:
            self.optimizer = chainer.optimizers.Adam(alpha=4.0)
        else:
            self.optimizer = optimizer
        if model is None:
            self.model = neural_art.utility.load_nn("vgg")
        else:
            self.model = model

        if gpu >= 0:
            chainer.cuda.get_device(gpu).use()
            self.xp = chainer.cuda.cupy
            self.model.model.to_gpu()
        else:
            self.xp = numpy
trainer.py 文件源码 项目:NlpUtil 作者: trtd56 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def set_trainer(self, out_dir, gpu, n_epoch, g_clip, opt_name, lr=None):
        if opt_name == "Adam":
            opt = getattr(optimizers, opt_name)()
        else:
            opt = getattr(optimizers, opt_name)(lr)
        opt.setup(self.model)
        opt.add_hook(optimizer.GradientClipping(g_clip))

        updater = training.StandardUpdater(self.train_iter, opt, device=gpu)
        self.trainer = training.Trainer(updater, (n_epoch, 'epoch'), out=out_dir)
        self.trainer.extend(extensions.Evaluator(self.test_iter, self.model, device=gpu))
        self.trainer.extend(extensions.dump_graph('main/loss'))
        self.trainer.extend(extensions.snapshot(), trigger=(n_epoch, 'epoch'))
        self.trainer.extend(extensions.LogReport())
        self.trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss'],
                                                   'epoch', file_name='loss.png'))
        self.trainer.extend(extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'],
                                                   'epoch', file_name='accuracy.png'))
        self.trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'validation/main/loss',
                                                    'main/accuracy', 'validation/main/accuracy',
                                                    'elapsed_time']))
        self.trainer.extend(extensions.ProgressBar())
builder.py 文件源码 项目:lencon 作者: kiyukuta 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _build_optimizer(self, config):
        kwargs = {k: float(v) for k, v in config.items() if k != 'name'}
        o = getattr(chainer.optimizers, config['name'])(**kwargs)
        return o
train_util.py 文件源码 项目:chainer_nmt 作者: odashi 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def init_optimizer(args, mdl):
  logger = logging.getLogger(__name__)

  logger.info('Making Adam optimizer:')
  logger.info('* learning rate = %f', args.learning_rate)
  logger.info('* gradient clipping = %f', args.gradient_clipping)
  logger.info('* weight decay = %f', args.weight_decay)

  opt = chainer.optimizers.Adam(alpha=args.learning_rate)
  opt.setup(mdl)
  opt.add_hook(chainer.optimizer.GradientClipping(args.gradient_clipping))
  opt.add_hook(chainer.optimizer.WeightDecay(args.weight_decay))

  return opt
optimizer.py 文件源码 项目:chainercmd 作者: mitmul 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_optimizer(model, method, optimizer_args, weight_decay=None):
    optimizer = getattr(optimizers, method)(**optimizer_args)
    optimizer.setup(model)
    if weight_decay is not None:
        optimizer.add_hook(chainer.optimizer.WeightDecay(weight_decay))
    return optimizer
train.py 文件源码 项目:chainer-pspnet 作者: mitmul 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def get_optimizer_from_config(model, config):
    opt_config = Optimizer(**config['optimizer'])
    optimizer = getattr(optimizers, opt_config.method)(**opt_config.args)
    optimizer.setup(model)
    if opt_config.weight_decay is not None:
        optimizer.add_hook(
            chainer.optimizer.WeightDecay(opt_config.weight_decay))
    return optimizer
train_mn.py 文件源码 项目:chainer-pspnet 作者: mitmul 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_optimizer(model, method, optimizer_args, weight_decay=None):
    optimizer = getattr(optimizers, method)(**optimizer_args)
    optimizer.setup(model)
    if weight_decay is not None:
        optimizer.add_hook(chainer.optimizer.WeightDecay(weight_decay))
    return optimizer


问题


面经


文章

微信
公众号

扫码关注公众号