def set_trainer(self, out_dir, gpu, n_epoch, g_clip, opt_name, lr=None):
if opt_name == "Adam":
opt = getattr(optimizers, opt_name)()
else:
opt = getattr(optimizers, opt_name)(lr)
opt.setup(self.model)
opt.add_hook(optimizer.GradientClipping(g_clip))
updater = training.StandardUpdater(self.train_iter, opt, device=gpu)
self.trainer = training.Trainer(updater, (n_epoch, 'epoch'), out=out_dir)
self.trainer.extend(extensions.Evaluator(self.test_iter, self.model, device=gpu))
self.trainer.extend(extensions.dump_graph('main/loss'))
self.trainer.extend(extensions.snapshot(), trigger=(n_epoch, 'epoch'))
self.trainer.extend(extensions.LogReport())
self.trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss'],
'epoch', file_name='loss.png'))
self.trainer.extend(extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'],
'epoch', file_name='accuracy.png'))
self.trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy',
'elapsed_time']))
self.trainer.extend(extensions.ProgressBar())
python类dump_graph()的实例源码
def __init__(self, **kwargs):
required_keys = []
optional_keys = [
'dump_graph',
'Evaluator',
'ExponentialShift',
'LinearShift',
'LogReport',
'observe_lr',
'observe_value',
'snapshot',
'PlotReport',
'PrintReport',
]
super().__init__(
required_keys, optional_keys, kwargs, self.__class__.__name__)
def __init__(self, **kwargs):
required_keys = []
optional_keys = [
'dump_graph',
'Evaluator',
'ExponentialShift',
'LinearShift',
'LogReport',
'observe_lr',
'observe_value',
'snapshot',
'PlotReport',
'PrintReport',
]
super().__init__(
required_keys, optional_keys, kwargs, self.__class__.__name__)
def train(args):
model = EmbeddingTagger(args.model, 50, 20, 30)
model.setup_training(args.embed)
if args.initmodel:
print('Load model from', args.initmodel)
chainer.serializers.load_npz(args.initmodel, model)
train = CCGBankDataset(args.model, args.train)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
val = CCGBankDataset(args.model, args.val)
val_iter = chainer.iterators.SerialIterator(
val, args.batchsize, repeat=False, shuffle=False)
optimizer = chainer.optimizers.SGD(lr=0.01)
optimizer.setup(model)
updater = training.StandardUpdater(train_iter, optimizer)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)
val_interval = 5000, 'iteration'
log_interval = 200, 'iteration'
val_model = model.copy()
trainer.extend(extensions.Evaluator(val_iter, val_model), trigger=val_interval)
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy',
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.run()
def main():
unit = 1000
batchsize = 100
epoch = 20
model = L.Classifier(MLP(unit, 10))
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
train, test = chainer.datasets.get_mnist()
train_iter = chainer.iterators.SerialIterator(train, batchsize)
test_iter = chainer.iterators.SerialIterator(test, batchsize, repeat=False, shuffle=False)
updater = training.StandardUpdater(train_iter, optimizer)
trainer = training.Trainer(updater, (epoch, 'epoch'), out='result')
trainer.extend(extensions.Evaluator(test_iter, model))
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=(epoch, 'epoch'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
trainer.extend(extensions.ProgressBar())
trainer.run()
def fit(model, train, valid, device=-1, batchsize=4096, n_epoch=500,
resume=None, alpha=1e-3):
if device >= 0:
chainer.cuda.get_device(device).use()
model.to_gpu(device)
optimizer = chainer.optimizers.Adam(alpha)
optimizer.setup(model)
# Setup iterators
train_iter = chainer.iterators.SerialIterator(train, batchsize)
valid_iter = chainer.iterators.SerialIterator(valid, batchsize,
repeat=False, shuffle=False)
updater = training.StandardUpdater(train_iter, optimizer, device=device)
trainer = training.Trainer(updater, (n_epoch, 'epoch'),
out='out_' + str(device))
# Setup logging, printing & saving
keys = ['loss', 'rmse', 'bias', 'kld0', 'kld1']
keys += ['kldg', 'kldi', 'hypg', 'hypi']
keys += ['hypglv', 'hypilv']
reports = ['epoch']
reports += ['main/' + key for key in keys]
reports += ['validation/main/rmse']
trainer.extend(TestModeEvaluator(valid_iter, model, device=device))
trainer.extend(extensions.Evaluator(valid_iter, model, device=device))
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=(10, 'epoch'))
trainer.extend(extensions.LogReport(trigger=(1, 'epoch')))
trainer.extend(extensions.PrintReport(reports))
trainer.extend(extensions.ProgressBar(update_interval=10))
# If previous model detected, resume
if resume:
print("Loading from {}".format(resume))
chainer.serializers.load_npz(resume, trainer)
# Run the model
trainer.run()
def fit(self, X, y=None, **kwargs):
"""If hyper parameters are set to None, then instance's variable is used,
this functionality is used Grid search with `set_params` method.
Also if instance's variable is not set, _default_hyperparam is used.
Usage: model.fit(train_dataset) or model.fit(X, y)
Args:
train: training dataset, assumes chainer's dataset class
test: test dataset for evaluation, assumes chainer's dataset class
batchsize: batchsize for both training and evaluation
iterator_class: iterator class used for this training,
currently assumes SerialIterator or MultiProcessIterator
optimizer: optimizer instance to update parameter
epoch: training epoch
out: directory path to save the result
snapshot_frequency (int): snapshot frequency in epoch.
Negative value indicates not to take snapshot.
dump_graph: Save computational graph info or not, default is False.
log_report: Enable LogReport or not
plot_report: Enable PlotReport or not
print_report: Enable PrintReport or not
progress_report: Enable ProgressReport or not
resume: specify trainer saved path to resume training.
"""
kwargs = self.filter_sk_params(self.fit_core, kwargs)
return self.fit_core(X, y, **kwargs)
def main(config_file):
with open(config_file) as fp:
conf = json.load(fp)
fe_conf = conf['feature_extractor']
cl_conf = conf['classifier']
fe_class = getattr(cnn_feature_extractors, fe_conf['model'])
feature_extractor = fe_class(n_classes=fe_conf['n_classes'], n_base_units=fe_conf['n_base_units'])
chainer.serializers.load_npz(fe_conf['out_file'], feature_extractor)
model = classifiers.MLPClassifier(cl_conf['n_classes'], feature_extractor)
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
device = cl_conf.get('device', -1)
train_dataset = feature_dataset(os.path.join(cl_conf['dataset_path'], 'train'), model)
train_iter = chainer.iterators.SerialIterator(train_dataset, conf.get('batch_size', 1))
updater = chainer.training.StandardUpdater(train_iter, optimizer, device=device)
trainer = chainer.training.Trainer(updater, (cl_conf['epoch'], 'epoch'), out='out_re')
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.ProgressBar(update_interval=10))
test_dataset_path = os.path.join(cl_conf['dataset_path'], 'test')
if os.path.exists(test_dataset_path):
test_dataset = feature_dataset(test_dataset_path, model)
test_iter = chainer.iterators.SerialIterator(test_dataset, 10, repeat=False, shuffle=False)
trainer.extend(extensions.Evaluator(test_iter, model, device=device))
trainer.extend(extensions.PrintReport([
'epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy'
]))
else:
trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'main/accuracy']))
trainer.run()
chainer.serializers.save_npz(cl_conf['out_file'], model)
def main(config_file):
with open(config_file) as fp:
conf = json.load(fp)['feature_extractor']
model_class = getattr(cnn_feature_extractors, conf['model'])
model = model_class(conf['n_classes'], conf['n_base_units'])
resume_file = conf['out_file'] + '.to_resume'
if os.path.exists(resume_file):
chainer.serializers.load_npz(resume_file, model)
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
device = conf.get('device', -1)
train_dataset = create_dataset(os.path.join(conf['dataset_path'], 'train'))
train_iter = chainer.iterators.SerialIterator(train_dataset, conf.get('batch_size', 10))
updater = chainer.training.StandardUpdater(train_iter, optimizer, device=device)
trainer = chainer.training.Trainer(updater, (conf['epoch'], 'epoch'), out='out')
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.ProgressBar(update_interval=10))
test_dataset_path = os.path.join(conf['dataset_path'], 'test')
if os.path.exists(test_dataset_path):
test_dataset = create_dataset(test_dataset_path)
test_iter = chainer.iterators.SerialIterator(test_dataset, 20, repeat=False, shuffle=False)
trainer.extend(extensions.Evaluator(test_iter, model, device=device))
trainer.extend(extensions.PrintReport([
'epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy'
]))
else:
trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'main/accuracy']))
trainer.run()
model = model.to_cpu()
chainer.serializers.save_npz(conf['out_file'], model)
def create_trainer(
config,
project_path,
updater,
model,
eval_func,
iterator_test,
iterator_train_varidation,
loss_names,
converter=chainer.dataset.convert.concat_examples,
):
# type: (TrainConfig, str, any, typing.Dict, any, any, any, any, any) -> any
def _make_evaluator(iterator):
return utility.chainer_utility.NoVariableEvaluator(
iterator,
target=model,
converter=converter,
eval_func=eval_func,
device=config.gpu,
)
trainer = chainer.training.Trainer(updater, out=project_path)
log_trigger = (config.log_iteration, 'iteration')
save_trigger = (config.save_iteration, 'iteration')
eval_test_name = 'eval/test'
eval_train_name = 'eval/train'
snapshot = extensions.snapshot_object(model['main'], '{.updater.iteration}.model')
trainer.extend(snapshot, trigger=save_trigger)
trainer.extend(extensions.dump_graph('main/' + loss_names[0], out_name='main.dot'))
trainer.extend(_make_evaluator(iterator_test), name=eval_test_name, trigger=log_trigger)
trainer.extend(_make_evaluator(iterator_train_varidation), name=eval_train_name, trigger=log_trigger)
report_target = []
for evaluator_name in ['', eval_test_name + '/', eval_train_name + '/']:
for model_name in ['main/']:
for loss_name in loss_names:
report_target.append(evaluator_name + model_name + loss_name)
trainer.extend(extensions.LogReport(trigger=log_trigger, log_name="log.txt"))
trainer.extend(extensions.PrintReport(report_target))
return trainer
def train(args):
model = JaCCGEmbeddingTagger(args.model,
args.word_emb_size, args.char_emb_size)
if args.initmodel:
print('Load model from', args.initmodel)
chainer.serializers.load_npz(args.initmodel, model)
if args.pretrained:
print('Load pretrained word embeddings from', args.pretrained)
model.load_pretrained_embeddings(args.pretrained)
train = JaCCGTaggerDataset(args.model, args.train)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
val = JaCCGTaggerDataset(args.model, args.val)
val_iter = chainer.iterators.SerialIterator(
val, args.batchsize, repeat=False, shuffle=False)
optimizer = chainer.optimizers.AdaGrad()
optimizer.setup(model)
# optimizer.add_hook(WeightDecay(1e-8))
my_converter = lambda x, dev: convert.concat_examples(x, dev, (None,-1,None,None))
updater = training.StandardUpdater(train_iter, optimizer, converter=my_converter)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)
val_interval = 1000, 'iteration'
log_interval = 200, 'iteration'
eval_model = model.copy()
eval_model.train = False
trainer.extend(extensions.Evaluator(
val_iter, eval_model, my_converter), trigger=val_interval)
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy',
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.run()
def start(self):
""" Train pose net. """
# set random seed.
if self.seed is not None:
random.seed(self.seed)
np.random.seed(self.seed)
if self.gpu >= 0:
chainer.cuda.cupy.random.seed(self.seed)
# initialize model to train.
model = AlexNet(self.Nj, self.use_visibility)
if self.resume_model:
serializers.load_npz(self.resume_model, model)
# prepare gpu.
if self.gpu >= 0:
chainer.cuda.get_device(self.gpu).use()
model.to_gpu()
# load the datasets.
train = PoseDataset(self.train, data_augmentation=self.data_augmentation)
val = PoseDataset(self.val, data_augmentation=False)
# training/validation iterators.
train_iter = chainer.iterators.MultiprocessIterator(
train, self.batchsize)
val_iter = chainer.iterators.MultiprocessIterator(
val, self.batchsize, repeat=False, shuffle=False)
# set up an optimizer.
optimizer = self._get_optimizer()
optimizer.setup(model)
if self.resume_opt:
chainer.serializers.load_npz(self.resume_opt, optimizer)
# set up a trainer.
updater = training.StandardUpdater(train_iter, optimizer, device=self.gpu)
trainer = training.Trainer(
updater, (self.epoch, 'epoch'), os.path.join(self.out, 'chainer'))
# standard trainer settings
trainer.extend(extensions.dump_graph('main/loss'))
val_interval = (10, 'epoch')
trainer.extend(TestModeEvaluator(val_iter, model, device=self.gpu), trigger=val_interval)
# save parameters and optimization state per validation step
resume_interval = (self.epoch/10, 'epoch')
trainer.extend(extensions.snapshot_object(
model, "epoch-{.updater.epoch}.model"), trigger=resume_interval)
trainer.extend(extensions.snapshot_object(
optimizer, "epoch-{.updater.epoch}.state"), trigger=resume_interval)
trainer.extend(extensions.snapshot(
filename="epoch-{.updater.epoch}.iter"), trigger=resume_interval)
# show log
log_interval = (10, "iteration")
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.observe_lr(), trigger=log_interval)
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss', 'lr']), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
# start training
if self.resume:
chainer.serializers.load_npz(self.resume, trainer)
trainer.run()
def __init__(self, folder, chain, train, test, batchsize=500, resume=True, gpu=0, nepoch=1, reports=[]):
self.reports = reports
self.nepoch = nepoch
self.folder = folder
self.chain = chain
self.gpu = gpu
if self.gpu >= 0:
chainer.cuda.get_device(gpu).use()
chain.to_gpu(gpu)
self.eval_chain = eval_chain = chain.copy()
self.chain.test = False
self.eval_chain.test = True
self.testset = test
if not os.path.exists(folder):
os.makedirs(folder)
train_iter = chainer.iterators.SerialIterator(train, batchsize, shuffle=True)
test_iter = chainer.iterators.SerialIterator(test, batchsize,
repeat=False, shuffle=False)
updater = training.StandardUpdater(train_iter, chain.optimizer, device=gpu)
trainer = training.Trainer(updater, (nepoch, 'epoch'), out=folder)
# trainer.extend(TrainingModeSwitch(chain))
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.Evaluator(test_iter, eval_chain, device=gpu), trigger=(1,'epoch'))
trainer.extend(extensions.snapshot_object(
chain, 'chain_snapshot_epoch_{.updater.epoch:06}'), trigger=(1,'epoch'))
trainer.extend(extensions.snapshot(
filename='snapshot_epoch_{.updater.epoch:06}'), trigger=(1,'epoch'))
trainer.extend(extensions.LogReport(trigger=(1,'epoch')), trigger=(1,'iteration'))
trainer.extend(extensions.PrintReport(
['epoch']+reports), trigger=IntervalTrigger(1,'epoch'))
self.trainer = trainer
if resume:
#if resumeFrom is not None:
# trainerFile = os.path.join(resumeFrom[0],'snapshot_epoch_{:06}'.format(resumeFrom[1]))
# S.load_npz(trainerFile, trainer)
i = 1
trainerFile = os.path.join(folder,'snapshot_epoch_{:06}'.format(i))
while i <= nepoch and os.path.isfile(trainerFile):
i = i + 1
trainerFile = os.path.join(folder,'snapshot_epoch_{:06}'.format(i))
i = i - 1
trainerFile = os.path.join(folder,'snapshot_epoch_{:06}'.format(i))
if i >= 0 and os.path.isfile(trainerFile):
S.load_npz(trainerFile, trainer)