def fit(model, train, valid, device=-1, batchsize=4096, n_epoch=500,
resume=None, alpha=1e-3):
if device >= 0:
chainer.cuda.get_device(device).use()
model.to_gpu(device)
optimizer = chainer.optimizers.Adam(alpha)
optimizer.setup(model)
# Setup iterators
train_iter = chainer.iterators.SerialIterator(train, batchsize)
valid_iter = chainer.iterators.SerialIterator(valid, batchsize,
repeat=False, shuffle=False)
updater = training.StandardUpdater(train_iter, optimizer, device=device)
trainer = training.Trainer(updater, (n_epoch, 'epoch'),
out='out_' + str(device))
# Setup logging, printing & saving
keys = ['loss', 'rmse', 'bias', 'kld0', 'kld1']
keys += ['kldg', 'kldi', 'hypg', 'hypi']
keys += ['hypglv', 'hypilv']
reports = ['epoch']
reports += ['main/' + key for key in keys]
reports += ['validation/main/rmse']
trainer.extend(TestModeEvaluator(valid_iter, model, device=device))
trainer.extend(extensions.Evaluator(valid_iter, model, device=device))
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=(10, 'epoch'))
trainer.extend(extensions.LogReport(trigger=(1, 'epoch')))
trainer.extend(extensions.PrintReport(reports))
trainer.extend(extensions.ProgressBar(update_interval=10))
# If previous model detected, resume
if resume:
print("Loading from {}".format(resume))
chainer.serializers.load_npz(resume, trainer)
# Run the model
trainer.run()
评论列表
文章目录