def init_optimizer(args, mdl):
logger = logging.getLogger(__name__)
logger.info('Making Adam optimizer:')
logger.info('* learning rate = %f', args.learning_rate)
logger.info('* gradient clipping = %f', args.gradient_clipping)
logger.info('* weight decay = %f', args.weight_decay)
opt = chainer.optimizers.Adam(alpha=args.learning_rate)
opt.setup(mdl)
opt.add_hook(chainer.optimizer.GradientClipping(args.gradient_clipping))
opt.add_hook(chainer.optimizer.WeightDecay(args.weight_decay))
return opt
评论列表
文章目录