python类Adam()的实例源码

test_pgt.py 文件源码 项目:chainerrl 作者: chainer 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def make_agent(self, env, gpu):
        model = self.make_model(env)
        policy = model['policy']
        q_func = model['q_function']

        actor_opt = optimizers.Adam(alpha=1e-4)
        actor_opt.setup(policy)

        critic_opt = optimizers.Adam(alpha=1e-3)
        critic_opt.setup(q_func)

        explorer = self.make_explorer(env)
        rbuf = self.make_replay_buffer(env)
        return self.make_pgt_agent(env=env, model=model,
                                   actor_opt=actor_opt, critic_opt=critic_opt,
                                   explorer=explorer, rbuf=rbuf, gpu=gpu)
test_ddpg.py 文件源码 项目:chainerrl 作者: chainer 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def make_agent(self, env, gpu):
        model = self.make_model(env)
        policy = model['policy']
        q_func = model['q_function']

        actor_opt = optimizers.Adam(alpha=1e-4)
        actor_opt.setup(policy)

        critic_opt = optimizers.Adam(alpha=1e-3)
        critic_opt.setup(q_func)

        explorer = self.make_explorer(env)
        rbuf = self.make_replay_buffer(env)
        return self.make_ddpg_agent(env=env, model=model,
                                    actor_opt=actor_opt, critic_opt=critic_opt,
                                    explorer=explorer, rbuf=rbuf, gpu=gpu)
traintest.py 文件源码 项目:LSTMVAE 作者: ashwatthaman 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def train(args,encdec,model_name_base = "./{}/model/cvaehidden_kl_{}_{}_l{}.npz"):
    encdec.loadModel(model_name_base,args)
    if args.gpu >= 0:
        import cupy as cp
        global xp;
        xp = cp
        encdec.to_gpu()

    optimizer = optimizers.Adam()
    optimizer.setup(encdec)
    for e_i in range(encdec.epoch_now, args.epoch):
        encdec.setEpochNow(e_i)
        loss_sum = 0
        for tupl in encdec.getBatchGen(args):
            loss = encdec(tupl)
            loss_sum += loss.data

            encdec.cleargrads()
            loss.backward()
            optimizer.update()
        print("epoch{}:loss_sum:{}".format(e_i, loss_sum))
        model_name = model_name_base.format(args.dataname, args.dataname, e_i, args.n_latent)
        serializers.save_npz(model_name, encdec)
prepare_train.py 文件源码 项目:chainer-faster-rcnn 作者: mitmul 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def get_optimizer(model, opt, lr=None, adam_alpha=None, adam_beta1=None,
                  adam_beta2=None, adam_eps=None, weight_decay=None):
    if opt == 'MomentumSGD':
        optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9)
    elif opt == 'Adam':
        optimizer = optimizers.Adam(
            alpha=adam_alpha, beta1=adam_beta1,
            beta2=adam_beta2, eps=adam_eps)
    elif opt == 'AdaGrad':
        optimizer = optimizers.AdaGrad(lr=lr)
    elif opt == 'RMSprop':
        optimizer = optimizers.RMSprop(lr=lr)
    else:
        raise Exception('No optimizer is selected')

    # The first model as the master model
    optimizer.setup(model)
    if opt == 'MomentumSGD':
        optimizer.add_hook(
            chainer.optimizer.WeightDecay(weight_decay))

    return optimizer
train_utils.py 文件源码 项目:chainer-segnet 作者: pfnet-research 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_optimizer(opt, lr=None, adam_alpha=None, adam_beta1=None,
                  adam_beta2=None, adam_eps=None, weight_decay=None):
    if opt == 'MomentumSGD':
        optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9)
    elif opt == 'Adam':
        optimizer = optimizers.Adam(
            alpha=adam_alpha, beta1=adam_beta1,
            beta2=adam_beta2, eps=adam_eps)
    elif opt == 'AdaGrad':
        optimizer = optimizers.AdaGrad(lr=lr)
    elif opt == 'RMSprop':
        optimizer = optimizers.RMSprop(lr=lr)
    else:
        raise Exception('No optimizer is selected')

    # The first model as the master model
    if opt == 'MomentumSGD':
        optimizer.decay = weight_decay

    return optimizer
grad_check.py 文件源码 项目:double-dqn 作者: musyoku 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def backprop_check():
    xp = cuda.cupy if config.use_gpu else np
    duel = DDQN()

    state = xp.random.uniform(-1.0, 1.0, (2, config.rl_agent_history_length * config.ale_screen_channels, config.ale_scaled_screen_size[1], config.ale_scaled_screen_size[0])).astype(xp.float32)
    reward = [1, 0]
    action = [3, 4]
    episode_ends = [0, 0]
    next_state = xp.random.uniform(-1.0, 1.0, (2, config.rl_agent_history_length * config.ale_screen_channels, config.ale_scaled_screen_size[1], config.ale_scaled_screen_size[0])).astype(xp.float32)

    optimizer_conv = optimizers.Adam(alpha=config.rl_learning_rate, beta1=config.rl_gradient_momentum)
    optimizer_conv.setup(duel.conv)
    optimizer_fc = optimizers.Adam(alpha=config.rl_learning_rate, beta1=config.rl_gradient_momentum)
    optimizer_fc.setup(duel.fc)

    for i in xrange(10000):
        optimizer_conv.zero_grads()
        optimizer_fc.zero_grads()
        loss, _ = duel.forward_one_step(state, action, reward, next_state, episode_ends)
        loss.backward()
        optimizer_conv.update()
        optimizer_fc.update()
        print loss.data,
        print duel.conv.layer_2.W.data[0, 0, 0, 0],
        print duel.fc.layer_2.W.data[0, 0],
main.py 文件源码 项目:cnn-text-classification 作者: marevol 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def create_classifier(n_vocab, doc_length, wv_size, filter_sizes, hidden_units, output_channel, initialW, non_static, batch_size, epoch, gpu):
    model = NNModel(n_vocab=n_vocab,
                    doc_length=doc_length,
                    wv_size=wv_size,
                    filter_sizes=filter_sizes,
                    hidden_units=hidden_units,
                    output_channel=output_channel,
                    initialW=initialW,
                    non_static=non_static)
#    optimizer = optimizers.Adam()
    optimizer = optimizers.AdaDelta()
    return (model, ChainerEstimator(model=SoftmaxCrossEntropyClassifier(model),
                                    optimizer=optimizer,
                                    batch_size=batch_size,
                                    device=gpu,
                                    stop_trigger=(epoch, 'epoch')))
optimizers.py 文件源码 项目:chainer-speech-recognition 作者: musyoku 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def decay_learning_rate(opt, factor, final_value):
    if isinstance(opt, optimizers.NesterovAG):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.SGD):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.MomentumSGD):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.Adam):
        if opt.alpha <= final_value:
            return final_value
        opt.alpha *= factor
        return
    raise NotImplementedError()
optim.py 文件源码 项目:chainer-qrnn 作者: musyoku 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def decay_learning_rate(opt, factor, final_value):
    if isinstance(opt, optimizers.NesterovAG):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.SGD):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.MomentumSGD):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.Adam):
        if opt.alpha <= final_value:
            return final_value
        opt.alpha *= factor
        return
    raise NotImplementedError()
fp.py 文件源码 项目:nfp 作者: pfnet 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, d, f, R, gpu):
        self.d = d
        self.f = f
        self.R = R
        self.gpu = gpu
        g = ChainList(*[L.Linear(1, f) for i in six.moves.range(AtomIdMax)])
        H = ChainList(*[L.Linear(f, f) for i in six.moves.range(R)])
        W = ChainList(*[L.Linear(f, d) for i in six.moves.range(R + 1)])
        self.optimizer = optimizers.Adam()
        self.model = Chain(H=H, W=W, g=g)
        if gpu:
            self.model.to_gpu(0)
        self.optimizer.setup(self.model)
        self.to = [[] for i in six.moves.range(2)]
        self.atom_sid = [[] for i in six.moves.range(2)]
        self.anum = [[] for i in six.moves.range(2)]
model.py 文件源码 项目:self-driving-cars 作者: musyoku 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self):
        Model.__init__(self)

        self.fc_value = self.build_network(output_dim=1)
        self.fc_advantage = self.build_network(output_dim=len(config.actions))

        self.optimizer_fc_value = optimizers.Adam(alpha=config.rl_learning_rate, beta1=config.rl_gradient_momentum)
        self.optimizer_fc_value.setup(self.fc_value)
        self.optimizer_fc_value.add_hook(optimizer.GradientClipping(10.0))

        self.optimizer_fc_advantage = optimizers.Adam(alpha=config.rl_learning_rate, beta1=config.rl_gradient_momentum)
        self.optimizer_fc_advantage.setup(self.fc_advantage)
        self.optimizer_fc_advantage.add_hook(optimizer.GradientClipping(10.0))

        self.load()
        self.update_target()
base_classifier.py 文件源码 项目:char-classify 作者: ekatek 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, data, target, hidden_layers):
        """ Must submit either a net configuration, or something to load from """
        if hidden_layers == [] and model_filename == "":
            raise Exception("Must provide a net configuration or a file to load from")

        """ Divide the data into training and test """
        self.trainsize = int(len(data) * 5 / 6)
        self.testsize = len(data) - self.trainsize
        self.x_train, self.x_test = np.split(data, [self.trainsize])
        self.y_train, self.y_test = np.split(target, [self.trainsize])

        """ Create the underlying neural network model """
        self.sizes = [len(data[0])]
        self.sizes.extend(hidden_layers)
        self.sizes.append(len(set(target)))
        self.model = L.Classifier(BaseNetwork(self.sizes))

        """ Create the underlying optimizer """
        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model)
chain.py 文件源码 项目:ddnn 作者: kunglab 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def get_optimizer(self, name, lr, momentum=0.9):
        if name.lower() == "adam":
            return optimizers.Adam(alpha=lr, beta1=momentum)
        if name.lower() == "smorms3":
            return optimizers.SMORMS3(lr=lr)
        if name.lower() == "adagrad":
            return optimizers.AdaGrad(lr=lr)
        if name.lower() == "adadelta":
            return optimizers.AdaDelta(rho=momentum)
        if name.lower() == "nesterov" or name.lower() == "nesterovag":
            return optimizers.NesterovAG(lr=lr, momentum=momentum)
        if name.lower() == "rmsprop":
            return optimizers.RMSprop(lr=lr, alpha=momentum)
        if name.lower() == "momentumsgd":
            return optimizers.MomentumSGD(lr=lr, mommentum=mommentum)
        if name.lower() == "sgd":
            return optimizers.SGD(lr=lr)
optim.py 文件源码 项目:adversarial-autoencoder 作者: musyoku 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def decrease_learning_rate(opt, factor, final_value):
    if isinstance(opt, optimizers.NesterovAG):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.SGD):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.MomentumSGD):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.Adam):
        if opt.alpha <= final_value:
            return final_value
        opt.alpha *= factor
        return
    raise NotImplementedError()
plot_chainer_MLP.py 文件源码 项目:soft-dtw 作者: mblondel 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def train(network, loss, X_tr, Y_tr, X_te, Y_te, n_epochs=30, gamma=1):
    model= Objective(network, loss=loss, gamma=gamma)

    #optimizer = optimizers.SGD()
    optimizer = optimizers.Adam()
    optimizer.setup(model)

    train = tuple_dataset.TupleDataset(X_tr, Y_tr)
    test = tuple_dataset.TupleDataset(X_te, Y_te)

    train_iter = iterators.SerialIterator(train, batch_size=1, shuffle=True)
    test_iter = iterators.SerialIterator(test, batch_size=1, repeat=False,
                                         shuffle=False)
    updater = training.StandardUpdater(train_iter, optimizer)
    trainer = training.Trainer(updater, (n_epochs, 'epoch'))

    trainer.run()
iris.py 文件源码 项目:workspace 作者: nojima 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def learn(dataset: DataSet, n_iter: int = 10000) -> IrisChain:
    model = IrisChain()
    optimizer = optimizers.Adam()
    optimizer.setup(model)

    x_train = dataset.train.drop('class', axis=1).values
    y_train = to_hot_vector(dataset.train['class']).values

    for i in range(n_iter):
        model.cleargrads()
        x = Variable(x_train)
        y = Variable(y_train)
        loss = model(x, y)
        loss.backward()
        optimizer.update()

    return model
iris.py 文件源码 项目:workspace 作者: nojima 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def learn_by_mini_batch(dataset: DataSet, batch_size: int = 25, n_iter: int = 5000) -> IrisChain:
    n = len(dataset.train)

    model = IrisChain()
    optimizer = optimizers.Adam()
    optimizer.setup(model)

    x_train = dataset.train.drop('class', axis=1).values
    y_train = to_hot_vector(dataset.train['class']).values

    for j in range(n_iter):
        shuffled = np.random.permutation(n)
        for i in range(0, n, batch_size):
            indices = shuffled[i:i+batch_size]
            x = Variable(x_train[indices])
            y = Variable(y_train[indices])
            model.cleargrads()
            loss = model(x, y)
            loss.backward()
            optimizer.update()

    return model
iris.py 文件源码 项目:workspace 作者: nojima 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def pretrain(dataset: DataSet, batch_size: int = 25, n_iter: int = 3000) -> IrisAutoEncoder:
    n = len(dataset.train)

    model = IrisAutoEncoder()
    optimizer = optimizers.Adam()
    optimizer.setup(model)

    x_train = dataset.train.drop('class', axis=1).values

    for j in range(n_iter):
        shuffled = np.random.permutation(n)
        for i in range(0, n, batch_size):
            indices = shuffled[i:i+batch_size]
            x = Variable(x_train[indices])
            model.cleargrads()
            loss = model(x)
            loss.backward()
            optimizer.update()

    return model
ae.py 文件源码 项目:workspace 作者: nojima 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def train(dataset: DataSet, n_iter: int = 3000, batch_size: int = 25) -> Iterator[AutoEncoder]:
    n = dataset.size

    input_dimension = dataset.input.shape[1]
    hidden_dimension = 2
    model = AutoEncoder(input_dimension, hidden_dimension)

    optimizer = optimizers.Adam()
    optimizer.setup(model)

    for j in range(n_iter):
        shuffled = np.random.permutation(n)

        for i in range(0, n, batch_size):
            indices = shuffled[i:i+batch_size]
            x = Variable(dataset.input[indices])
            model.cleargrads()
            loss = model(x)
            loss.backward()
            optimizer.update()

        yield model
train.py 文件源码 项目:chainer-glu 作者: musyoku 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def decay_learning_rate(opt, factor, final_value):
    if isinstance(opt, optimizers.NesterovAG):
        if opt.lr <= final_value:
            return
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.SGD):
        if opt.lr <= final_value:
            return
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.Adam):
        if opt.alpha <= final_value:
            return
        opt.alpha *= factor
        return
    raise NotImplementationError()
chain.py 文件源码 项目:unrolled-gan 作者: musyoku 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_optimizer(name, lr, momentum=0.9):
    if name.lower() == "adam":
        return optimizers.Adam(alpha=lr, beta1=momentum)
    if name.lower() == "eve":
        return Eve(alpha=lr, beta1=momentum)
    if name.lower() == "adagrad":
        return optimizers.AdaGrad(lr=lr)
    if name.lower() == "adadelta":
        return optimizers.AdaDelta(rho=momentum)
    if name.lower() == "nesterov" or name.lower() == "nesterovag":
        return optimizers.NesterovAG(lr=lr, momentum=momentum)
    if name.lower() == "rmsprop":
        return optimizers.RMSprop(lr=lr, alpha=momentum)
    if name.lower() == "momentumsgd":
        return optimizers.MomentumSGD(lr=lr, mommentum=mommentum)
    if name.lower() == "sgd":
        return optimizers.SGD(lr=lr)
chain.py 文件源码 项目:unrolled-gan 作者: musyoku 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def update_momentum(self, momentum):
        if isinstance(self.optimizer, optimizers.Adam):
            self.optimizer.beta1 = momentum
            return
        if isinstance(self.optimizer, Eve):
            self.optimizer.beta1 = momentum
            return
        if isinstance(self.optimizer, optimizers.AdaDelta):
            self.optimizer.rho = momentum
            return
        if isinstance(self.optimizer, optimizers.NesterovAG):
            self.optimizer.momentum = momentum
            return
        if isinstance(self.optimizer, optimizers.RMSprop):
            self.optimizer.alpha = momentum
            return
        if isinstance(self.optimizer, optimizers.MomentumSGD):
            self.optimizer.mommentum = momentum
            return
wavenet.py 文件源码 项目:wavenet 作者: musyoku 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_optimizer(name, lr, momentum=0.9):
    if name.lower() == "adam":
        return chainer.optimizers.Adam(alpha=lr, beta1=momentum)
    if name.lower() == "eve":
        return Eve(alpha=lr, beta1=momentum)
    if name.lower() == "adagrad":
        return chainer.optimizers.AdaGrad(lr=lr)
    if name.lower() == "adadelta":
        return chainer.optimizers.AdaDelta(rho=momentum)
    if name.lower() == "nesterov" or name.lower() == "nesterovag":
        return chainer.optimizers.NesterovAG(lr=lr, momentum=momentum)
    if name.lower() == "rmsprop":
        return chainer.optimizers.RMSprop(lr=lr, alpha=momentum)
    if name.lower() == "momentumsgd":
        return chainer.optimizers.MomentumSGD(lr=lr, mommentum=mommentum)
    if name.lower() == "sgd":
        return chainer.optimizers.SGD(lr=lr)
    raise Exception()
wavenet.py 文件源码 项目:wavenet 作者: musyoku 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def update_momentum(self, momentum):
        if isinstance(self.optimizer, optimizers.Adam):
            self.optimizer.beta1 = momentum
            return
        if isinstance(self.optimizer, Eve):
            self.optimizer.beta1 = momentum
            return
        if isinstance(self.optimizer, optimizers.AdaDelta):
            self.optimizer.rho = momentum
            return
        if isinstance(self.optimizer, optimizers.NesterovAG):
            self.optimizer.momentum = momentum
            return
        if isinstance(self.optimizer, optimizers.RMSprop):
            self.optimizer.alpha = momentum
            return
        if isinstance(self.optimizer, optimizers.MomentumSGD):
            self.optimizer.mommentum = momentum
            return
chain.py 文件源码 项目:LSGAN 作者: musyoku 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def update_momentum(self, momentum):
        if isinstance(self._optimizer, optimizers.Adam):
            self._optimizer.beta1 = momentum
            return
        if isinstance(self._optimizer, Eve):
            self._optimizer.beta1 = momentum
            return
        if isinstance(self._optimizer, optimizers.AdaDelta):
            self._optimizer.rho = momentum
            return
        if isinstance(self._optimizer, optimizers.NesterovAG):
            self._optimizer.momentum = momentum
            return
        if isinstance(self._optimizer, optimizers.RMSprop):
            self._optimizer.alpha = momentum
            return
        if isinstance(self._optimizer, optimizers.MomentumSGD):
            self._optimizer.mommentum = momentum
            return
chain.py 文件源码 项目:adgm 作者: musyoku 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def get_optimizer(name, lr, momentum=0.9):
    if name.lower() == "adam":
        return optimizers.Adam(alpha=lr, beta1=momentum)
    if name.lower() == "eve":
        return Eve(alpha=lr, beta1=momentum)
    if name.lower() == "adagrad":
        return optimizers.AdaGrad(lr=lr)
    if name.lower() == "adadelta":
        return optimizers.AdaDelta(rho=momentum)
    if name.lower() == "nesterov" or name.lower() == "nesterovag":
        return optimizers.NesterovAG(lr=lr, momentum=momentum)
    if name.lower() == "rmsprop":
        return optimizers.RMSprop(lr=lr, alpha=momentum)
    if name.lower() == "momentumsgd":
        return optimizers.MomentumSGD(lr=lr, mommentum=mommentum)
    if name.lower() == "sgd":
        return optimizers.SGD(lr=lr)
vae_m2.py 文件源码 项目:variational-autoencoder 作者: musyoku 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, conf, name="vae"):
        conf.check()
        self.encoder_xy_z, self.encoder_x_y, self.decoder = self.build(conf)
        self.name = name

        self.optimizer_encoder_xy_z = optimizers.Adam(alpha=conf.learning_rate, beta1=conf.gradient_momentum)
        self.optimizer_encoder_xy_z.setup(self.encoder_xy_z)
        # self.optimizer_encoder_xy_z.add_hook(optimizer.WeightDecay(0.00001))
        self.optimizer_encoder_xy_z.add_hook(GradientClipping(conf.gradient_clipping))

        self.optimizer_encoder_x_y = optimizers.Adam(alpha=conf.learning_rate, beta1=conf.gradient_momentum)
        self.optimizer_encoder_x_y.setup(self.encoder_x_y)
        # self.optimizer_encoder_x_y.add_hook(optimizer.WeightDecay(0.00001))
        self.optimizer_encoder_x_y.add_hook(GradientClipping(conf.gradient_clipping))

        self.optimizer_decoder = optimizers.Adam(alpha=conf.learning_rate, beta1=conf.gradient_momentum)
        self.optimizer_decoder.setup(self.decoder)
        # self.optimizer_decoder.add_hook(optimizer.WeightDecay(0.00001))
        self.optimizer_decoder.add_hook(GradientClipping(conf.gradient_clipping))

        self.type_pz = conf.type_pz
        self.type_qz = conf.type_qz
vae_m1.py 文件源码 项目:variational-autoencoder 作者: musyoku 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, conf, name="vae"):
        conf.check()
        self.encoder, self.decoder = self.build(conf)
        self.name = name

        self.optimizer_encoder = optimizers.Adam(alpha=conf.learning_rate, beta1=conf.gradient_momentum)
        self.optimizer_encoder.setup(self.encoder)
        # self.optimizer_encoder.add_hook(optimizer.WeightDecay(0.001))
        self.optimizer_encoder.add_hook(GradientClipping(conf.gradient_clipping))

        self.optimizer_decoder = optimizers.Adam(alpha=conf.learning_rate, beta1=conf.gradient_momentum)
        self.optimizer_decoder.setup(self.decoder)
        # self.optimizer_decoder.add_hook(optimizer.WeightDecay(0.001))
        self.optimizer_decoder.add_hook(GradientClipping(conf.gradient_clipping))

        self.type_pz = conf.type_pz
        self.type_qz = conf.type_qz
test_linear_network.py 文件源码 项目:shoelace 作者: rjagerman 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def test_linear_network():

    # To ensure repeatability of experiments
    np.random.seed(1042)

    # Load data set
    dataset = get_dataset(True)
    iterator = LtrIterator(dataset, repeat=True, shuffle=True)
    eval_iterator = LtrIterator(dataset, repeat=False, shuffle=False)

    # Create neural network with chainer and apply our loss function
    predictor = links.Linear(None, 1)
    loss = Ranker(predictor, listnet)

    # Build optimizer, updater and trainer
    optimizer = optimizers.Adam(alpha=0.2)
    optimizer.setup(loss)
    updater = training.StandardUpdater(iterator, optimizer)
    trainer = training.Trainer(updater, (10, 'epoch'))

    # Evaluate loss before training
    before_loss = eval(loss, eval_iterator)

    # Train neural network
    trainer.run()

    # Evaluate loss after training
    after_loss = eval(loss, eval_iterator)

    # Assert precomputed values
    assert_almost_equal(before_loss, 0.26958397)
    assert_almost_equal(after_loss, 0.2326711)
test_ppo.py 文件源码 项目:chainerrl 作者: chainer 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def make_agent(self, env, gpu):
        model = self.make_model(env)

        opt = optimizers.Adam(alpha=3e-4)
        opt.setup(model)

        return self.make_ppo_agent(env=env, model=model, opt=opt, gpu=gpu)


问题


面经


文章

微信
公众号

扫码关注公众号