def train(args):
model = BiaffineJaLSTMParser(args.model, args.word_emb_size, args.char_emb_size,
args.nlayers, args.hidden_dim, args.dep_dim, args.dropout_ratio)
with open(args.model + "/params", "w") as f: log(args, f)
if args.initmodel:
print('Load model from', args.initmodel)
chainer.serializers.load_npz(args.initmodel, model)
if args.pretrained:
print('Load pretrained word embeddings from', args.pretrained)
model.load_pretrained_embeddings(args.pretrained)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
train = LSTMParserDataset(args.model, args.train)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
val = LSTMParserDataset(args.model, args.val)
val_iter = chainer.iterators.SerialIterator(
val, args.batchsize, repeat=False, shuffle=False)
optimizer = chainer.optimizers.Adam(beta2=0.9)
# optimizer = chainer.optimizers.MomentumSGD(momentum=0.7)
optimizer.setup(model)
optimizer.add_hook(WeightDecay(2e-6))
# optimizer.add_hook(GradientClipping(5.))
updater = training.StandardUpdater(train_iter, optimizer,
device=args.gpu, converter=converter)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)
val_interval = 1000, 'iteration'
log_interval = 200, 'iteration'
eval_model = model.copy()
eval_model.train = False
trainer.extend(extensions.ExponentialShift(
"eps", .75, 2e-3), trigger=(2500, 'iteration'))
trainer.extend(extensions.Evaluator(
val_iter, eval_model, converter, device=args.gpu), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.PrintReport([
'epoch', 'iteration',
'main/tagging_accuracy', 'main/tagging_loss',
'main/parsing_accuracy', 'main/parsing_loss',
'validation/main/tagging_accuracy',
'validation/main/parsing_accuracy'
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.run()
评论列表
文章目录