def decay_learning_rate(opt, factor, final_value):
if isinstance(opt, optimizers.NesterovAG):
if opt.lr <= final_value:
return final_value
opt.lr *= factor
return
if isinstance(opt, optimizers.SGD):
if opt.lr <= final_value:
return final_value
opt.lr *= factor
return
if isinstance(opt, optimizers.MomentumSGD):
if opt.lr <= final_value:
return final_value
opt.lr *= factor
return
if isinstance(opt, optimizers.Adam):
if opt.alpha <= final_value:
return final_value
opt.alpha *= factor
return
raise NotImplementedError()
python类NesterovAG()的实例源码
def decay_learning_rate(opt, factor, final_value):
if isinstance(opt, optimizers.NesterovAG):
if opt.lr <= final_value:
return final_value
opt.lr *= factor
return
if isinstance(opt, optimizers.SGD):
if opt.lr <= final_value:
return final_value
opt.lr *= factor
return
if isinstance(opt, optimizers.MomentumSGD):
if opt.lr <= final_value:
return final_value
opt.lr *= factor
return
if isinstance(opt, optimizers.Adam):
if opt.alpha <= final_value:
return final_value
opt.alpha *= factor
return
raise NotImplementedError()
def get_optimizer(self, name, lr, momentum=0.9):
if name.lower() == "adam":
return optimizers.Adam(alpha=lr, beta1=momentum)
if name.lower() == "smorms3":
return optimizers.SMORMS3(lr=lr)
if name.lower() == "adagrad":
return optimizers.AdaGrad(lr=lr)
if name.lower() == "adadelta":
return optimizers.AdaDelta(rho=momentum)
if name.lower() == "nesterov" or name.lower() == "nesterovag":
return optimizers.NesterovAG(lr=lr, momentum=momentum)
if name.lower() == "rmsprop":
return optimizers.RMSprop(lr=lr, alpha=momentum)
if name.lower() == "momentumsgd":
return optimizers.MomentumSGD(lr=lr, mommentum=mommentum)
if name.lower() == "sgd":
return optimizers.SGD(lr=lr)
def decrease_learning_rate(opt, factor, final_value):
if isinstance(opt, optimizers.NesterovAG):
if opt.lr <= final_value:
return final_value
opt.lr *= factor
return
if isinstance(opt, optimizers.SGD):
if opt.lr <= final_value:
return final_value
opt.lr *= factor
return
if isinstance(opt, optimizers.MomentumSGD):
if opt.lr <= final_value:
return final_value
opt.lr *= factor
return
if isinstance(opt, optimizers.Adam):
if opt.alpha <= final_value:
return final_value
opt.alpha *= factor
return
raise NotImplementedError()
def decay_learning_rate(opt, factor, final_value):
if isinstance(opt, optimizers.NesterovAG):
if opt.lr <= final_value:
return
opt.lr *= factor
return
if isinstance(opt, optimizers.SGD):
if opt.lr <= final_value:
return
opt.lr *= factor
return
if isinstance(opt, optimizers.Adam):
if opt.alpha <= final_value:
return
opt.alpha *= factor
return
raise NotImplementationError()
def get_optimizer(name, lr, momentum=0.9):
if name.lower() == "adam":
return optimizers.Adam(alpha=lr, beta1=momentum)
if name.lower() == "eve":
return Eve(alpha=lr, beta1=momentum)
if name.lower() == "adagrad":
return optimizers.AdaGrad(lr=lr)
if name.lower() == "adadelta":
return optimizers.AdaDelta(rho=momentum)
if name.lower() == "nesterov" or name.lower() == "nesterovag":
return optimizers.NesterovAG(lr=lr, momentum=momentum)
if name.lower() == "rmsprop":
return optimizers.RMSprop(lr=lr, alpha=momentum)
if name.lower() == "momentumsgd":
return optimizers.MomentumSGD(lr=lr, mommentum=mommentum)
if name.lower() == "sgd":
return optimizers.SGD(lr=lr)
def update_momentum(self, momentum):
if isinstance(self.optimizer, optimizers.Adam):
self.optimizer.beta1 = momentum
return
if isinstance(self.optimizer, Eve):
self.optimizer.beta1 = momentum
return
if isinstance(self.optimizer, optimizers.AdaDelta):
self.optimizer.rho = momentum
return
if isinstance(self.optimizer, optimizers.NesterovAG):
self.optimizer.momentum = momentum
return
if isinstance(self.optimizer, optimizers.RMSprop):
self.optimizer.alpha = momentum
return
if isinstance(self.optimizer, optimizers.MomentumSGD):
self.optimizer.mommentum = momentum
return
def get_optimizer(name, lr, momentum=0.9):
if name.lower() == "adam":
return chainer.optimizers.Adam(alpha=lr, beta1=momentum)
if name.lower() == "eve":
return Eve(alpha=lr, beta1=momentum)
if name.lower() == "adagrad":
return chainer.optimizers.AdaGrad(lr=lr)
if name.lower() == "adadelta":
return chainer.optimizers.AdaDelta(rho=momentum)
if name.lower() == "nesterov" or name.lower() == "nesterovag":
return chainer.optimizers.NesterovAG(lr=lr, momentum=momentum)
if name.lower() == "rmsprop":
return chainer.optimizers.RMSprop(lr=lr, alpha=momentum)
if name.lower() == "momentumsgd":
return chainer.optimizers.MomentumSGD(lr=lr, mommentum=mommentum)
if name.lower() == "sgd":
return chainer.optimizers.SGD(lr=lr)
raise Exception()
def update_momentum(self, momentum):
if isinstance(self.optimizer, optimizers.Adam):
self.optimizer.beta1 = momentum
return
if isinstance(self.optimizer, Eve):
self.optimizer.beta1 = momentum
return
if isinstance(self.optimizer, optimizers.AdaDelta):
self.optimizer.rho = momentum
return
if isinstance(self.optimizer, optimizers.NesterovAG):
self.optimizer.momentum = momentum
return
if isinstance(self.optimizer, optimizers.RMSprop):
self.optimizer.alpha = momentum
return
if isinstance(self.optimizer, optimizers.MomentumSGD):
self.optimizer.mommentum = momentum
return
def update_momentum(self, momentum):
if isinstance(self._optimizer, optimizers.Adam):
self._optimizer.beta1 = momentum
return
if isinstance(self._optimizer, Eve):
self._optimizer.beta1 = momentum
return
if isinstance(self._optimizer, optimizers.AdaDelta):
self._optimizer.rho = momentum
return
if isinstance(self._optimizer, optimizers.NesterovAG):
self._optimizer.momentum = momentum
return
if isinstance(self._optimizer, optimizers.RMSprop):
self._optimizer.alpha = momentum
return
if isinstance(self._optimizer, optimizers.MomentumSGD):
self._optimizer.mommentum = momentum
return
def get_optimizer(name, lr, momentum=0.9):
if name.lower() == "adam":
return optimizers.Adam(alpha=lr, beta1=momentum)
if name.lower() == "eve":
return Eve(alpha=lr, beta1=momentum)
if name.lower() == "adagrad":
return optimizers.AdaGrad(lr=lr)
if name.lower() == "adadelta":
return optimizers.AdaDelta(rho=momentum)
if name.lower() == "nesterov" or name.lower() == "nesterovag":
return optimizers.NesterovAG(lr=lr, momentum=momentum)
if name.lower() == "rmsprop":
return optimizers.RMSprop(lr=lr, alpha=momentum)
if name.lower() == "momentumsgd":
return optimizers.MomentumSGD(lr=lr, mommentum=mommentum)
if name.lower() == "sgd":
return optimizers.SGD(lr=lr)
def get_learning_rate(opt):
if isinstance(opt, optimizers.NesterovAG):
return opt.lr
if isinstance(opt, optimizers.MomentumSGD):
return opt.lr
if isinstance(opt, optimizers.SGD):
return opt.lr
if isinstance(opt, optimizers.Adam):
return opt.alpha
raise NotImplementedError()
def set_learning_rate(opt, lr):
if isinstance(opt, optimizers.NesterovAG):
opt.lr = lr
return
if isinstance(opt, optimizers.MomentumSGD):
opt.lr = lr
return
if isinstance(opt, optimizers.SGD):
opt.lr = lr
return
if isinstance(opt, optimizers.Adam):
opt.alpha = lr
return
raise NotImplementedError()
def set_momentum(opt, momentum):
if isinstance(opt, optimizers.NesterovAG):
opt.momentum = momentum
return
if isinstance(opt, optimizers.MomentumSGD):
opt.momentum = momentum
return
if isinstance(opt, optimizers.SGD):
return
if isinstance(opt, optimizers.Adam):
opt.beta1 = momentum
return
raise NotImplementedError()
def get_optimizer(name, lr, momentum):
if name == "sgd":
return optimizers.SGD(lr=lr)
if name == "msgd":
return optimizers.MomentumSGD(lr=lr, momentum=momentum)
if name == "nesterov":
return optimizers.NesterovAG(lr=lr, momentum=momentum)
if name == "adam":
return optimizers.Adam(alpha=lr, beta1=momentum)
raise NotImplementedError()
def get_current_learning_rate(opt):
if isinstance(opt, optimizers.NesterovAG):
return opt.lr
if isinstance(opt, optimizers.MomentumSGD):
return opt.lr
if isinstance(opt, optimizers.SGD):
return opt.lr
if isinstance(opt, optimizers.Adam):
return opt.alpha
raise NotImplementedError()
def get_optimizer(name, lr, momentum):
if name == "sgd":
return optimizers.SGD(lr=lr)
if name == "msgd":
return optimizers.MomentumSGD(lr=lr, momentum=momentum)
if name == "nesterov":
return optimizers.NesterovAG(lr=lr, momentum=momentum)
if name == "adam":
return optimizers.Adam(alpha=lr, beta1=momentum)
raise NotImplementedError()
test_optimizers_by_linear_model.py 文件源码
项目:chainer-deconv
作者: germanRos
项目源码
文件源码
阅读 15
收藏 0
点赞 0
评论 0
def create(self):
return optimizers.NesterovAG(0.1)
def get_current_learning_rate(opt):
if isinstance(opt, optimizers.NesterovAG):
return opt.lr
if isinstance(opt, optimizers.MomentumSGD):
return opt.lr
if isinstance(opt, optimizers.SGD):
return opt.lr
if isinstance(opt, optimizers.Adam):
return opt.alpha
raise NotImplementedError()
def set_learning_rate(opt, lr):
if isinstance(opt, optimizers.NesterovAG):
opt.lr = lr
return
if isinstance(opt, optimizers.MomentumSGD):
opt.lr = lr
return
if isinstance(opt, optimizers.SGD):
opt.lr = lr
return
if isinstance(opt, optimizers.Adam):
opt.alpha = lr
return
raise NotImplementedError()
def get_optimizer(name, lr, momentum):
name = name.lower()
if name == "sgd":
return optimizers.SGD(lr=lr)
if name == "msgd":
return optimizers.MomentumSGD(lr=lr, momentum=momentum)
if name == "nesterov":
return optimizers.NesterovAG(lr=lr, momentum=momentum)
if name == "adam":
return optimizers.Adam(alpha=lr, beta1=momentum)
raise NotImplementedError()
def get_current_learning_rate(opt):
if isinstance(opt, optimizers.NesterovAG):
return opt.lr
if isinstance(opt, optimizers.Adam):
return opt.alpha
if isinstance(opt, optimizers.SGD):
return opt.lr
raise NotImplementationError()
def get_optimizer(name, lr, momentum):
if name == "nesterov":
return optimizers.NesterovAG(lr=lr, momentum=momentum)
if name == "adam":
return optimizers.Adam(alpha=lr, beta1=momentum)
if name == "sgd":
return optimizers.SGD(lr=lr)
raise NotImplementationError()