python类MomentumSGD()的实例源码

nutszebra_optimizer.py 文件源码 项目:resnext 作者: nutszebra 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, model=None, schedule=(60, 120, 160), lr=0.1, momentum=0.9, weight_decay=5.0e-4):
        super(OptimizerWideRes, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
nutszebra_optimizer.py 文件源码 项目:resnext 作者: nutszebra 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, model=None, schedule=(196, 224), lr=0.1, momentum=0.9, weight_decay=1.0e-4):
        super(OptimizerSwapout, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
nutszebra_optimizer.py 文件源码 项目:resnext 作者: nutszebra 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, model=None, lr=0.045, momentum=0.9, weight_decay=1.0e-5, period=2):
        super(OptimizerXception, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
        self.period = int(period)
nutszebra_optimizer.py 文件源码 项目:resnext 作者: nutszebra 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, model=None, lr=0.0015, momentum=0.9, weight_decay=2.0e-4):
        super(OptimizerGooglenet, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
nutszebra_optimizer.py 文件源码 项目:resnext 作者: nutszebra 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, model=None, lr=0.1, momentum=0.9, weight_decay=1.0e-4, schedule=(int(1.0e5 / (50000. / 128)), )):
        super(OptimizerNetworkInNetwork, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
        self.schedule = schedule
nutszebra_optimizer.py 文件源码 项目:resnext 作者: nutszebra 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, model=None, lr=0.045, momentum=0.9, weight_decay=4.0e-5):
        super(OptimizerGooglenetV2, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
nutszebra_optimizer.py 文件源码 项目:resnext 作者: nutszebra 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, model=None, lr=0.1, momentum=0.9, weight_decay=5.0e-4, schedule=(150, 225)):
        super(OptimizerResNext, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
nutszebra_optimizer.py 文件源码 项目:neural_architecture_search_with_reinforcement_learning_appendix_a 作者: nutszebra 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, model=None, schedule=(int(32000. / (50000. / 128)), int(48000. / (50000. / 128))), lr=0.1, momentum=0.9, weight_decay=1.0e-4, warm_up_lr=0.01):
        super(OptimizerResnet, self).__init__(model)
        optimizer = optimizers.MomentumSGD(warm_up_lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.warmup_lr = warm_up_lr
        self.momentum = momentum
        self.weight_decay = weight_decay
nutszebra_optimizer.py 文件源码 项目:neural_architecture_search_with_reinforcement_learning_appendix_a 作者: nutszebra 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, model=None, schedule=(150, 225), lr=0.1, momentum=0.9, weight_decay=1.0e-4):
        super(OptimizerDense, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
nutszebra_optimizer.py 文件源码 项目:neural_architecture_search_with_reinforcement_learning_appendix_a 作者: nutszebra 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, model=None, schedule=(60, 120, 160), lr=0.1, momentum=0.9, weight_decay=5.0e-4):
        super(OptimizerWideRes, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
nutszebra_optimizer.py 文件源码 项目:neural_architecture_search_with_reinforcement_learning_appendix_a 作者: nutszebra 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, model=None, schedule=(196, 224), lr=0.1, momentum=0.9, weight_decay=1.0e-4):
        super(OptimizerSwapout, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
nutszebra_optimizer.py 文件源码 项目:neural_architecture_search_with_reinforcement_learning_appendix_a 作者: nutszebra 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, model=None, lr=0.045, momentum=0.9, weight_decay=1.0e-5, period=2):
        super(OptimizerXception, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
        self.period = int(period)
nutszebra_optimizer.py 文件源码 项目:neural_architecture_search_with_reinforcement_learning_appendix_a 作者: nutszebra 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, model=None, lr=0.0015, momentum=0.9, weight_decay=2.0e-4):
        super(OptimizerGooglenet, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
nutszebra_optimizer.py 文件源码 项目:neural_architecture_search_with_reinforcement_learning_appendix_a 作者: nutszebra 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, model=None, lr=0.1, momentum=0.9, weight_decay=1.0e-4, schedule=(int(1.0e5 / (50000. / 128)), )):
        super(OptimizerNetworkInNetwork, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
        self.schedule = schedule
nutszebra_optimizer.py 文件源码 项目:neural_architecture_search_with_reinforcement_learning_appendix_a 作者: nutszebra 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, model=None, lr=0.045, momentum=0.9, weight_decay=4.0e-5):
        super(OptimizerGooglenetV2, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
nutszebra_optimizer.py 文件源码 项目:neural_architecture_search_with_reinforcement_learning_appendix_a 作者: nutszebra 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, model=None, lr=0.1, momentum=0.9, weight_decay=5.0e-4, schedule=(150, 225)):
        super(OptimizerResNext, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
nutszebra_optimizer.py 文件源码 项目:neural_architecture_search_with_reinforcement_learning_appendix_a 作者: nutszebra 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, model=None, lr=0.02, momentum=0.9, schedule=(150, 225, 300, 375)):
        super(OptimizerFractalNet, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        optimizer.setup(self.model)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
nutszebra_optimizer.py 文件源码 项目:neural_architecture_search_with_reinforcement_learning_appendix_a 作者: nutszebra 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, model=None, lr=0.01, momentum=0.9, schedule=(150, 225), weight_decay=5.0e-4):
        super(OptimizerPReLUNet, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
nutszebra_optimizer.py 文件源码 项目:neural_architecture_search_with_reinforcement_learning_appendix_a 作者: nutszebra 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, model=None, schedule=(42, 62), lr=0.1, momentum=0.9, weight_decay=1.0e-4):
        super(OptimizerResnetInResnet, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
nutszebra_optimizer.py 文件源码 项目:neural_architecture_search_with_reinforcement_learning_appendix_a 作者: nutszebra 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, model=None, schedule=(150, 175), lr=0.1, momentum=0.9, weight_decay=1.0e-4):
        super(OptimizerAppendixA, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay


问题


面经


文章

微信
公众号

扫码关注公众号