def train(args):
model = EmbeddingTagger(args.model, 50, 20, 30)
model.setup_training(args.embed)
if args.initmodel:
print('Load model from', args.initmodel)
chainer.serializers.load_npz(args.initmodel, model)
train = CCGBankDataset(args.model, args.train)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
val = CCGBankDataset(args.model, args.val)
val_iter = chainer.iterators.SerialIterator(
val, args.batchsize, repeat=False, shuffle=False)
optimizer = chainer.optimizers.SGD(lr=0.01)
optimizer.setup(model)
updater = training.StandardUpdater(train_iter, optimizer)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)
val_interval = 5000, 'iteration'
log_interval = 200, 'iteration'
val_model = model.copy()
trainer.extend(extensions.Evaluator(val_iter, val_model), trigger=val_interval)
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy',
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.run()
python类snapshot_object()的实例源码
def pretrain_source_cnn(data, args, epochs=1000):
print(":: pretraining source encoder")
source_cnn = Loss(num_classes=10)
if args.device >= 0:
source_cnn.to_gpu()
optimizer = chainer.optimizers.Adam()
optimizer.setup(source_cnn)
train_iterator, test_iterator = data2iterator(data, args.batchsize, multiprocess=False)
# train_iterator = chainer.iterators.MultiprocessIterator(data, args.batchsize, n_processes=4)
updater = chainer.training.StandardUpdater(iterator=train_iterator, optimizer=optimizer, device=args.device)
trainer = chainer.training.Trainer(updater, (epochs, 'epoch') ,out=args.output)
# learning rate decay
# trainer.extend(extensions.ExponentialShift("alpha", rate=0.9, init=args.learning_rate, target=args.learning_rate*10E-5))
trainer.extend(extensions.Evaluator(test_iterator, source_cnn, device=args.device))
# trainer.extend(extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}'), trigger=(10, "epoch"))
trainer.extend(extensions.snapshot_object(optimizer.target, "source_model_epoch_{.updater.epoch}"), trigger=(epochs, "epoch"))
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.extend(extensions.LogReport(trigger=(1, "epoch")))
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
trainer.run()
return source_cnn
def train_target_cnn(source, target, source_cnn, target_cnn, args, epochs=10000):
print(":: training encoder with target domain")
discriminator = Discriminator()
if args.device >= 0:
source_cnn.to_gpu()
target_cnn.to_gpu()
discriminator.to_gpu()
# target_optimizer = chainer.optimizers.Adam(alpha=1.0E-5, beta1=0.5)
target_optimizer = chainer.optimizers.RMSprop(lr=args.lr)
# target_optimizer = chainer.optimizers.MomentumSGD(lr=1.0E-4, momentum=0.99)
target_optimizer.setup(target_cnn.encoder)
target_optimizer.add_hook(chainer.optimizer.WeightDecay(args.weight_decay))
# discriminator_optimizer = chainer.optimizers.Adam(alpha=1.0E-5, beta1=0.5)
discriminator_optimizer = chainer.optimizers.RMSprop(lr=args.lr)
# discriminator_optimizer = chainer.optimizers.MomentumSGD(lr=1.0E-4, momentum=0.99)
discriminator_optimizer.setup(discriminator)
discriminator_optimizer.add_hook(chainer.optimizer.WeightDecay(args.weight_decay))
source_train_iterator, source_test_iterator = data2iterator(source, args.batchsize, multiprocess=False)
target_train_iterator, target_test_iterator = data2iterator(target, args.batchsize, multiprocess=False)
updater = ADDAUpdater(source_train_iterator, target_train_iterator, source_cnn, target_optimizer, discriminator_optimizer, args)
trainer = chainer.training.Trainer(updater, (epochs, 'epoch'), out=args.output)
trainer.extend(extensions.Evaluator(target_test_iterator, target_cnn, device=args.device))
# trainer.extend(extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}'), trigger=(10, "epoch"))
trainer.extend(extensions.snapshot_object(target_cnn, "target_model_epoch_{.updater.epoch}"), trigger=(epochs, "epoch"))
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.extend(extensions.LogReport(trigger=(1, "epoch")))
trainer.extend(extensions.PrintReport(
["epoch", "loss/discrim", "loss/encoder",
"validation/main/loss", "validation/main/accuracy", "elapsed_time"]))
trainer.run()
def train_main(args):
"""
trains model specfied in args.
main method for train subcommand.
"""
# load text
with open(args.text_path) as f:
text = f.read()
logger.info("corpus length: %s.", len(text))
# data iterator
data_iter = DataIterator(text, args.batch_size, args.seq_len)
# load or build model
if args.restore:
logger.info("restoring model.")
load_path = args.checkpoint_path if args.restore is True else args.restore
model = load_model(load_path)
else:
net = Network(vocab_size=VOCAB_SIZE,
embedding_size=args.embedding_size,
rnn_size=args.rnn_size,
num_layers=args.num_layers,
drop_rate=args.drop_rate)
model = L.Classifier(net)
# make checkpoint directory
log_dir = make_dirs(args.checkpoint_path)
with open("{}.json".format(args.checkpoint_path), "w") as f:
json.dump(model.predictor.args, f, indent=2)
chainer.serializers.save_npz(args.checkpoint_path, model)
logger.info("model saved: %s.", args.checkpoint_path)
# optimizer
optimizer = chainer.optimizers.Adam(alpha=args.learning_rate)
optimizer.setup(model)
# clip gradient norm
optimizer.add_hook(chainer.optimizer.GradientClipping(args.clip_norm))
# trainer
updater = BpttUpdater(data_iter, optimizer)
trainer = chainer.training.Trainer(updater, (args.num_epochs, 'epoch'), out=log_dir)
trainer.extend(extensions.snapshot_object(model, filename=os.path.basename(args.checkpoint_path)))
trainer.extend(extensions.ProgressBar(update_interval=1))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PlotReport(y_keys=["main/loss"]))
trainer.extend(LoggerExtension(text))
# training start
model.predictor.reset_state()
logger.info("start of training.")
time_train = time.time()
trainer.run()
# training end
duration_train = time.time() - time_train
logger.info("end of training, duration: %ds.", duration_train)
# generate text
seed = generate_seed(text)
generate_text(model, seed, 1024, 3)
return model
def create_trainer(
config,
project_path,
updater,
model,
eval_func,
iterator_test,
iterator_train_varidation,
loss_names,
converter=chainer.dataset.convert.concat_examples,
):
# type: (TrainConfig, str, any, typing.Dict, any, any, any, any, any) -> any
def _make_evaluator(iterator):
return utility.chainer_utility.NoVariableEvaluator(
iterator,
target=model,
converter=converter,
eval_func=eval_func,
device=config.gpu,
)
trainer = chainer.training.Trainer(updater, out=project_path)
log_trigger = (config.log_iteration, 'iteration')
save_trigger = (config.save_iteration, 'iteration')
eval_test_name = 'eval/test'
eval_train_name = 'eval/train'
snapshot = extensions.snapshot_object(model['main'], '{.updater.iteration}.model')
trainer.extend(snapshot, trigger=save_trigger)
trainer.extend(extensions.dump_graph('main/' + loss_names[0], out_name='main.dot'))
trainer.extend(_make_evaluator(iterator_test), name=eval_test_name, trigger=log_trigger)
trainer.extend(_make_evaluator(iterator_train_varidation), name=eval_train_name, trigger=log_trigger)
report_target = []
for evaluator_name in ['', eval_test_name + '/', eval_train_name + '/']:
for model_name in ['main/']:
for loss_name in loss_names:
report_target.append(evaluator_name + model_name + loss_name)
trainer.extend(extensions.LogReport(trigger=log_trigger, log_name="log.txt"))
trainer.extend(extensions.PrintReport(report_target))
return trainer
def train(args):
model = LSTMParser(args.model, args.word_emb_size, args.afix_emb_size, args.nlayers,
args.hidden_dim, args.elu_dim, args.dep_dim, args.dropout_ratio)
with open(args.model + "/params", "w") as f: log(args, f)
if args.initmodel:
print 'Load model from', args.initmodel
chainer.serializers.load_npz(args.initmodel, model)
if args.pretrained:
print 'Load pretrained word embeddings from', args.pretrained
model.load_pretrained_embeddings(args.pretrained)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
train = LSTMParserDataset(args.model, args.train)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
val = LSTMParserDataset(args.model, args.val)
val_iter = chainer.iterators.SerialIterator(
val, args.batchsize, repeat=False, shuffle=False)
optimizer = chainer.optimizers.Adam(beta2=0.9)
# optimizer = chainer.optimizers.MomentumSGD(momentum=0.7)
optimizer.setup(model)
optimizer.add_hook(WeightDecay(1e-6))
# optimizer.add_hook(GradientClipping(5.))
updater = training.StandardUpdater(train_iter, optimizer,
device=args.gpu, converter=converter)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)
val_interval = 1000, 'iteration'
log_interval = 200, 'iteration'
eval_model = model.copy()
eval_model.train = False
trainer.extend(extensions.Evaluator(val_iter, eval_model,
converter, device=args.gpu), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.PrintReport([
'epoch', 'iteration',
'main/tagging_accuracy', 'main/tagging_loss',
'main/parsing_accuracy', 'main/parsing_loss',
'validation/main/tagging_accuracy',
'validation/main/parsing_accuracy'
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.run()
def train(args):
model = LSTMTagger(args.model, args.word_emb_size, args.afix_emb_size,
args.nlayers, args.hidden_dim, args.relu_dim, args.dropout_ratio)
with open(args.model + "/params", "w") as f:
log(args, f)
if args.initmodel:
print('Load model from', args.initmodel)
chainer.serializers.load_npz(args.initmodel, model)
if args.pretrained:
print('Load pretrained word embeddings from', args.pretrained)
model.load_pretrained_embeddings(args.pretrained)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
train = LSTMTaggerDataset(args.model, args.train)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
val = LSTMTaggerDataset(args.model, args.val)
val_iter = chainer.iterators.SerialIterator(
val, args.batchsize, repeat=False, shuffle=False)
optimizer = chainer.optimizers.MomentumSGD(momentum=0.7)
optimizer.setup(model)
optimizer.add_hook(WeightDecay(1e-6))
optimizer.add_hook(GradientClipping(5.))
updater = training.StandardUpdater(train_iter, optimizer,
device=args.gpu, converter=converter)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)
val_interval = 2000, 'iteration'
log_interval = 200, 'iteration'
eval_model = model.copy()
eval_model.train = False
trainer.extend(extensions.Evaluator(
val_iter, eval_model, converter, device=args.gpu), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy',
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.run()
def train(args):
model = BiaffineJaLSTMParser(args.model, args.word_emb_size, args.char_emb_size,
args.nlayers, args.hidden_dim, args.dep_dim, args.dropout_ratio)
with open(args.model + "/params", "w") as f: log(args, f)
if args.initmodel:
print('Load model from', args.initmodel)
chainer.serializers.load_npz(args.initmodel, model)
if args.pretrained:
print('Load pretrained word embeddings from', args.pretrained)
model.load_pretrained_embeddings(args.pretrained)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
train = LSTMParserDataset(args.model, args.train)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
val = LSTMParserDataset(args.model, args.val)
val_iter = chainer.iterators.SerialIterator(
val, args.batchsize, repeat=False, shuffle=False)
optimizer = chainer.optimizers.Adam(beta2=0.9)
# optimizer = chainer.optimizers.MomentumSGD(momentum=0.7)
optimizer.setup(model)
optimizer.add_hook(WeightDecay(2e-6))
# optimizer.add_hook(GradientClipping(5.))
updater = training.StandardUpdater(train_iter, optimizer,
device=args.gpu, converter=converter)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)
val_interval = 1000, 'iteration'
log_interval = 200, 'iteration'
eval_model = model.copy()
eval_model.train = False
trainer.extend(extensions.ExponentialShift(
"eps", .75, 2e-3), trigger=(2500, 'iteration'))
trainer.extend(extensions.Evaluator(
val_iter, eval_model, converter, device=args.gpu), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.PrintReport([
'epoch', 'iteration',
'main/tagging_accuracy', 'main/tagging_loss',
'main/parsing_accuracy', 'main/parsing_loss',
'validation/main/tagging_accuracy',
'validation/main/parsing_accuracy'
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.run()
def train(args):
model = JaLSTMParser(args.model, args.word_emb_size, args.char_emb_size,
args.nlayers, args.hidden_dim, args.relu_dim, args.dep_dim, args.dropout_ratio)
with open(args.model + "/params", "w") as f: log(args, f)
if args.initmodel:
print('Load model from', args.initmodel)
chainer.serializers.load_npz(args.initmodel, model)
if args.pretrained:
print('Load pretrained word embeddings from', args.pretrained)
model.load_pretrained_embeddings(args.pretrained)
train = LSTMParserDataset(args.model, args.train)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
val = LSTMParserDataset(args.model, args.val)
val_iter = chainer.iterators.SerialIterator(
val, args.batchsize, repeat=False, shuffle=False)
optimizer = chainer.optimizers.Adam(beta2=0.9)
# optimizer = chainer.optimizers.MomentumSGD(momentum=0.7)
optimizer.setup(model)
optimizer.add_hook(WeightDecay(1e-6))
# optimizer.add_hook(GradientClipping(5.))
updater = training.StandardUpdater(train_iter, optimizer, converter=converter)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)
val_interval = 1000, 'iteration'
log_interval = 200, 'iteration'
eval_model = model.copy()
eval_model.train = False
trainer.extend(extensions.Evaluator(
val_iter, eval_model, converter), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'main/tagging_loss',
'main/tagging_accuracy', 'main/tagging_loss',
'main/parsing_accuracy', 'main/parsing_loss',
'validation/main/tagging_loss', 'validation/main/tagging_accuracy',
'validation/main/parsing_loss', 'validation/main/parsing_accuracy'
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.run()
def train(args):
model = LSTMTagger(args.model, args.word_emb_size, args.char_emb_size,
args.nlayers, args.hidden_dim, args.relu_dim, args.dropout_ratio)
with open(args.model + "/params", "w") as f:
log(args, f)
if args.initmodel:
print('Load model from', args.initmodel)
chainer.serializers.load_npz(args.initmodel, model)
if args.pretrained:
print('Load pretrained word embeddings from', args.pretrained)
model.load_pretrained_embeddings(args.pretrained)
train = LSTMTaggerDataset(args.model, args.train)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
val = LSTMTaggerDataset(args.model, args.val)
val_iter = chainer.iterators.SerialIterator(
val, args.batchsize, repeat=False, shuffle=False)
optimizer = chainer.optimizers.MomentumSGD(momentum=0.7)
optimizer.setup(model)
optimizer.add_hook(WeightDecay(1e-6))
# optimizer.add_hook(GradientClipping(5.))
updater = training.StandardUpdater(train_iter, optimizer, converter=converter)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)
val_interval = 1000, 'iteration'
log_interval = 200, 'iteration'
eval_model = model.copy()
eval_model.train = False
trainer.extend(extensions.Evaluator(
val_iter, eval_model, converter), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy',
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.run()
def train(args):
model = LSTMTagger(args.model, args.word_emb_size, args.afix_emb_size,
args.nlayers, args.hidden_dim, args.relu_dim, args.dropout_ratio)
with open(args.model + "/params", "w") as f:
log(args, f)
if args.initmodel:
print 'Load model from', args.initmodel
chainer.serializers.load_npz(args.initmodel, model)
if args.pretrained:
print 'Load pretrained word embeddings from', args.pretrained
model.load_pretrained_embeddings(args.pretrained)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
train = LSTMTaggerDataset(args.model, args.train)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
val = LSTMTaggerDataset(args.model, args.val)
val_iter = chainer.iterators.SerialIterator(
val, args.batchsize, repeat=False, shuffle=False)
optimizer = chainer.optimizers.MomentumSGD(momentum=0.7)
optimizer.setup(model)
optimizer.add_hook(WeightDecay(1e-6))
optimizer.add_hook(GradientClipping(5.))
updater = training.StandardUpdater(train_iter, optimizer,
device=args.gpu, converter=converter)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)
val_interval = 2000, 'iteration'
log_interval = 200, 'iteration'
eval_model = model.copy()
eval_model.train = False
trainer.extend(extensions.Evaluator(
val_iter, eval_model, converter, device=args.gpu), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy',
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.run()
def train(args):
model = LSTMParser(args.model, args.word_emb_size, args.afix_emb_size, args.nlayers,
args.hidden_dim, args.elu_dim, args.dep_dim, args.dropout_ratio)
with open(args.model + "/params", "w") as f: log(args, f)
if args.initmodel:
print('Load model from', args.initmodel)
chainer.serializers.load_npz(args.initmodel, model)
if args.pretrained:
print('Load pretrained word embeddings from', args.pretrained)
model.load_pretrained_embeddings(args.pretrained)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
train = LSTMParserDataset(args.model, args.train)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
val = LSTMParserDataset(args.model, args.val)
val_iter = chainer.iterators.SerialIterator(
val, args.batchsize, repeat=False, shuffle=False)
optimizer = chainer.optimizers.Adam(beta2=0.9)
# optimizer = chainer.optimizers.MomentumSGD(momentum=0.7)
optimizer.setup(model)
optimizer.add_hook(WeightDecay(1e-6))
# optimizer.add_hook(GradientClipping(5.))
updater = training.StandardUpdater(train_iter, optimizer,
device=args.gpu, converter=converter)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)
val_interval = 1000, 'iteration'
log_interval = 200, 'iteration'
eval_model = model.copy()
eval_model.train = False
trainer.extend(extensions.Evaluator(val_iter, eval_model,
converter, device=args.gpu), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.PrintReport([
'epoch', 'iteration',
'main/tagging_accuracy', 'main/tagging_loss',
'main/parsing_accuracy', 'main/parsing_loss',
'validation/main/tagging_accuracy',
'validation/main/parsing_accuracy'
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.run()
def train(args):
model = JaCCGEmbeddingTagger(args.model,
args.word_emb_size, args.char_emb_size)
if args.initmodel:
print('Load model from', args.initmodel)
chainer.serializers.load_npz(args.initmodel, model)
if args.pretrained:
print('Load pretrained word embeddings from', args.pretrained)
model.load_pretrained_embeddings(args.pretrained)
train = JaCCGTaggerDataset(args.model, args.train)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
val = JaCCGTaggerDataset(args.model, args.val)
val_iter = chainer.iterators.SerialIterator(
val, args.batchsize, repeat=False, shuffle=False)
optimizer = chainer.optimizers.AdaGrad()
optimizer.setup(model)
# optimizer.add_hook(WeightDecay(1e-8))
my_converter = lambda x, dev: convert.concat_examples(x, dev, (None,-1,None,None))
updater = training.StandardUpdater(train_iter, optimizer, converter=my_converter)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)
val_interval = 1000, 'iteration'
log_interval = 200, 'iteration'
eval_model = model.copy()
eval_model.train = False
trainer.extend(extensions.Evaluator(
val_iter, eval_model, my_converter), trigger=val_interval)
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy',
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.run()
def train(args):
model = PeepHoleLSTMTagger(args.model, args.word_emb_size, args.afix_emb_size,
args.nlayers, args.hidden_dim, args.relu_dim, args.dropout_ratio)
with open(args.model + "/params", "w") as f:
log(args, f)
if args.initmodel:
print('Load model from', args.initmodel)
chainer.serializers.load_npz(args.initmodel, model)
if args.pretrained:
print('Load pretrained word embeddings from', args.pretrained)
model.load_pretrained_embeddings(args.pretrained)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
converter = lambda x, device: \
concat_examples(x, device=device, padding=-1)
train = LSTMTaggerDataset(args.model, args.train)
train_iter = SerialIterator(train, args.batchsize)
val = LSTMTaggerDataset(args.model, args.val)
val_iter = chainer.iterators.SerialIterator(
val, args.batchsize, repeat=False, shuffle=False)
optimizer = chainer.optimizers.MomentumSGD(momentum=0.7)
optimizer.setup(model)
optimizer.add_hook(WeightDecay(1e-6))
optimizer.add_hook(GradientClipping(5.))
updater = training.StandardUpdater(train_iter, optimizer,
device=args.gpu, converter=converter)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)
val_interval = 1000, 'iteration'
log_interval = 200, 'iteration'
eval_model = model.copy()
eval_model.train = False
trainer.extend(extensions.Evaluator(
val_iter, eval_model, converter, device=args.gpu), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy',
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.run()
def start(self):
""" Train pose net. """
# set random seed.
if self.seed is not None:
random.seed(self.seed)
np.random.seed(self.seed)
if self.gpu >= 0:
chainer.cuda.cupy.random.seed(self.seed)
# initialize model to train.
model = AlexNet(self.Nj, self.use_visibility)
if self.resume_model:
serializers.load_npz(self.resume_model, model)
# prepare gpu.
if self.gpu >= 0:
chainer.cuda.get_device(self.gpu).use()
model.to_gpu()
# load the datasets.
train = PoseDataset(self.train, data_augmentation=self.data_augmentation)
val = PoseDataset(self.val, data_augmentation=False)
# training/validation iterators.
train_iter = chainer.iterators.MultiprocessIterator(
train, self.batchsize)
val_iter = chainer.iterators.MultiprocessIterator(
val, self.batchsize, repeat=False, shuffle=False)
# set up an optimizer.
optimizer = self._get_optimizer()
optimizer.setup(model)
if self.resume_opt:
chainer.serializers.load_npz(self.resume_opt, optimizer)
# set up a trainer.
updater = training.StandardUpdater(train_iter, optimizer, device=self.gpu)
trainer = training.Trainer(
updater, (self.epoch, 'epoch'), os.path.join(self.out, 'chainer'))
# standard trainer settings
trainer.extend(extensions.dump_graph('main/loss'))
val_interval = (10, 'epoch')
trainer.extend(TestModeEvaluator(val_iter, model, device=self.gpu), trigger=val_interval)
# save parameters and optimization state per validation step
resume_interval = (self.epoch/10, 'epoch')
trainer.extend(extensions.snapshot_object(
model, "epoch-{.updater.epoch}.model"), trigger=resume_interval)
trainer.extend(extensions.snapshot_object(
optimizer, "epoch-{.updater.epoch}.state"), trigger=resume_interval)
trainer.extend(extensions.snapshot(
filename="epoch-{.updater.epoch}.iter"), trigger=resume_interval)
# show log
log_interval = (10, "iteration")
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.observe_lr(), trigger=log_interval)
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss', 'lr']), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
# start training
if self.resume:
chainer.serializers.load_npz(self.resume, trainer)
trainer.run()
def __init__(self, folder, chain, train, test, batchsize=500, resume=True, gpu=0, nepoch=1, reports=[]):
self.reports = reports
self.nepoch = nepoch
self.folder = folder
self.chain = chain
self.gpu = gpu
if self.gpu >= 0:
chainer.cuda.get_device(gpu).use()
chain.to_gpu(gpu)
self.eval_chain = eval_chain = chain.copy()
self.chain.test = False
self.eval_chain.test = True
self.testset = test
if not os.path.exists(folder):
os.makedirs(folder)
train_iter = chainer.iterators.SerialIterator(train, batchsize, shuffle=True)
test_iter = chainer.iterators.SerialIterator(test, batchsize,
repeat=False, shuffle=False)
updater = training.StandardUpdater(train_iter, chain.optimizer, device=gpu)
trainer = training.Trainer(updater, (nepoch, 'epoch'), out=folder)
# trainer.extend(TrainingModeSwitch(chain))
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.Evaluator(test_iter, eval_chain, device=gpu), trigger=(1,'epoch'))
trainer.extend(extensions.snapshot_object(
chain, 'chain_snapshot_epoch_{.updater.epoch:06}'), trigger=(1,'epoch'))
trainer.extend(extensions.snapshot(
filename='snapshot_epoch_{.updater.epoch:06}'), trigger=(1,'epoch'))
trainer.extend(extensions.LogReport(trigger=(1,'epoch')), trigger=(1,'iteration'))
trainer.extend(extensions.PrintReport(
['epoch']+reports), trigger=IntervalTrigger(1,'epoch'))
self.trainer = trainer
if resume:
#if resumeFrom is not None:
# trainerFile = os.path.join(resumeFrom[0],'snapshot_epoch_{:06}'.format(resumeFrom[1]))
# S.load_npz(trainerFile, trainer)
i = 1
trainerFile = os.path.join(folder,'snapshot_epoch_{:06}'.format(i))
while i <= nepoch and os.path.isfile(trainerFile):
i = i + 1
trainerFile = os.path.join(folder,'snapshot_epoch_{:06}'.format(i))
i = i - 1
trainerFile = os.path.join(folder,'snapshot_epoch_{:06}'.format(i))
if i >= 0 and os.path.isfile(trainerFile):
S.load_npz(trainerFile, trainer)
def create_trainer(
config: TrainConfig,
project_path: str,
updater,
model: typing.Dict,
eval_func,
iterator_test,
iterator_train_eval,
loss_names,
converter=chainer.dataset.convert.concat_examples,
log_name='log.txt',
):
trainer = chainer.training.Trainer(updater, out=project_path)
log_trigger = (config.log_iteration, 'iteration')
save_trigger = (config.save_iteration, 'iteration')
eval_test_name = 'eval/test'
eval_train_name = 'eval/train'
snapshot = extensions.snapshot_object(model['encoder'], 'encoder{.updater.iteration}.model')
trainer.extend(snapshot, trigger=save_trigger)
snapshot = extensions.snapshot_object(model['generator'], 'generator{.updater.iteration}.model')
trainer.extend(snapshot, trigger=save_trigger)
snapshot = extensions.snapshot_object(model['mismatch_discriminator'], 'mismatch_discriminator{.updater.iteration}.model')
trainer.extend(snapshot, trigger=save_trigger)
trainer.extend(utility.chainer.dump_graph([
'encoder/' + loss_names[0],
'generator/' + loss_names[0],
'mismatch_discriminator/' + loss_names[0],
], out_name='main.dot'))
def _make_evaluator(iterator):
return utility.chainer.NoVariableEvaluator(
iterator,
target=model,
converter=converter,
eval_func=eval_func,
device=config.gpu,
)
trainer.extend(_make_evaluator(iterator_test), name=eval_test_name, trigger=log_trigger)
trainer.extend(_make_evaluator(iterator_train_eval), name=eval_train_name, trigger=log_trigger)
report_target = []
for evaluator_name in ['', eval_test_name + '/', eval_train_name + '/']:
for model_name in [s + '/' for s in model.keys()]:
for loss_name in set(loss_names):
report_target.append(evaluator_name + model_name + loss_name)
trainer.extend(extensions.LogReport(trigger=log_trigger, log_name=log_name))
trainer.extend(extensions.PrintReport(report_target))
return trainer