python类save_hdf5()的实例源码

vae_m2.py 文件源码 项目:variational-autoencoder 作者: musyoku 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def save(self, dir=None):
        if dir is None:
            raise Exception()
        try:
            os.mkdir(dir)
        except:
            pass
        for attr in vars(self):
            prop = getattr(self, attr)
            if isinstance(prop, chainer.Chain) or isinstance(prop, chainer.optimizer.GradientMethod):
                serializers.save_hdf5(dir + "/%s_%s.hdf5" % (self.name, attr), prop)
        print "model saved."
vae_m1.py 文件源码 项目:variational-autoencoder 作者: musyoku 项目源码 文件源码 阅读 47 收藏 0 点赞 0 评论 0
def save(self, dir=None):
        if dir is None:
            raise Exception()
        try:
            os.mkdir(dir)
        except:
            pass
        for attr in vars(self):
            prop = getattr(self, attr)
            if isinstance(prop, chainer.Chain) or isinstance(prop, chainer.optimizer.GradientMethod):
                serializers.save_hdf5(dir + "/%s_%s.hdf5" % (self.name, attr), prop)
        print "model saved."
parse10.py 文件源码 项目:nn_parsers 作者: odashi 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def train(args):
  trace('loading corpus ...')
  with open(args.source) as fp:
    trees = [make_tree(l) for l in fp]

  trace('extracting leaf nodes ...')
  word_lists = [extract_words(t) for t in trees]
  lower_lists = [[w.lower() for w in words] for words in word_lists]

  trace('extracting gold operations ...')
  op_lists = [make_operations(t) for t in trees]

  trace('making vocabulary ...')
  word_vocab = Vocabulary.new(lower_lists, args.vocab)
  phrase_set = set()
  semiterminal_set = set()
  for tree in trees:
    phrase_set |= set(extract_phrase_labels(tree))
    semiterminal_set |= set(extract_semiterminals(tree))
  phrase_vocab = Vocabulary.new([list(phrase_set)], len(phrase_set), add_special_tokens=False)
  semiterminal_vocab = Vocabulary.new([list(semiterminal_set)], len(semiterminal_set), add_special_tokens=False)

  trace('converting data ...')
  word_lists = [convert_word_list(x, word_vocab) for x in word_lists]
  op_lists = [convert_op_list(x, phrase_vocab, semiterminal_vocab) for x in op_lists]

  trace('start training ...')
  parser = Parser(
    args.vocab, args.embed, args.queue, args.stack, args.srstate,
    len(phrase_set), len(semiterminal_set),
  )
  if args.use_gpu:
    parser.to_gpu()
  opt = optimizers.AdaGrad(lr = 0.005)
  opt.setup(parser)
  opt.add_hook(optimizer.GradientClipping(5))

  for epoch in range(args.epoch):
    n = 0

    for samples in batch(zip(word_lists, op_lists), args.minibatch):
      parser.zerograds()
      loss = XP.fzeros(())

      for word_list, op_list in zip(*samples):
        trace('epoch %3d, sample %6d:' % (epoch + 1, n + 1))
        loss += parser.forward(word_list, op_list, 0)
        n += 1

      loss.backward()
      opt.update()

    trace('saving model ...')
    prefix = args.model + '.%03.d' % (epoch + 1)
    word_vocab.save(prefix + '.words')
    phrase_vocab.save(prefix + '.phrases')
    semiterminal_vocab.save(prefix + '.semiterminals')
    parser.save_spec(prefix + '.spec')
    serializers.save_hdf5(prefix + '.weights', parser)

  trace('finished.')
parse12.py 文件源码 项目:nn_parsers 作者: odashi 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def train(args):
  trace('loading corpus ...')
  with open(args.source) as fp:
    trees = [make_tree(l) for l in fp]

  trace('extracting leaf nodes ...')
  word_lists = [extract_words(t) for t in trees]
  lower_lists = [[w.lower() for w in words] for words in word_lists]

  trace('extracting gold operations ...')
  op_lists = [make_operations(t) for t in trees]

  trace('making vocabulary ...')
  word_vocab = Vocabulary.new(lower_lists, args.vocab)
  phrase_set = set()
  semiterminal_set = set()
  for tree in trees:
    phrase_set |= set(extract_phrase_labels(tree))
    semiterminal_set |= set(extract_semiterminals(tree))
  phrase_vocab = Vocabulary.new([list(phrase_set)], len(phrase_set), add_special_tokens=False)
  semiterminal_vocab = Vocabulary.new([list(semiterminal_set)], len(semiterminal_set), add_special_tokens=False)

  trace('converting data ...')
  word_lists = [convert_word_list(x, word_vocab) for x in word_lists]
  op_lists = [convert_op_list(x, phrase_vocab, semiterminal_vocab) for x in op_lists]

  trace('start training ...')
  parser = Parser(
    args.vocab, args.embed, args.queue, args.stack, args.srstate,
    len(phrase_set), len(semiterminal_set),
  )
  if args.use_gpu:
    parser.to_gpu()
  opt = optimizers.AdaGrad(lr = 0.005)
  opt.setup(parser)
  opt.add_hook(optimizer.GradientClipping(5))

  for epoch in range(args.epoch):
    n = 0

    for samples in batch(zip(word_lists, op_lists), args.minibatch):
      parser.zerograds()
      loss = XP.fzeros(())

      for word_list, op_list in zip(*samples):
        trace('epoch %3d, sample %6d:' % (epoch + 1, n + 1))
        loss += parser.forward(word_list, op_list, 0)
        n += 1

      loss.backward()
      opt.update()

    trace('saving model ...')
    prefix = args.model + '.%03.d' % (epoch + 1)
    word_vocab.save(prefix + '.words')
    phrase_vocab.save(prefix + '.phrases')
    semiterminal_vocab.save(prefix + '.semiterminals')
    parser.save_spec(prefix + '.spec')
    serializers.save_hdf5(prefix + '.weights', parser)

  trace('finished.')
parse01.py 文件源码 项目:nn_parsers 作者: odashi 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def train(args):
  trace('loading corpus ...')
  with open(args.source) as fp:
    trees = [make_tree(l) for l in fp]

  trace('extracting leaf nodes ...')
  word_lists = [extract_words(t) for t in trees]

  trace('extracting gold operations ...')
  op_lists = [make_operations(t) for t in trees]

  trace('making vocabulary ...')
  word_vocab = Vocabulary.new(word_lists, args.vocab)
  phrase_set = set()
  semi_set = set()
  for tree in trees:
    phrase_set |= set(extract_phrase_labels(tree))
    semi_set |= set(extract_semi_labels(tree))
  phrase_vocab = Vocabulary.new([list(phrase_set)], len(phrase_set), add_special_tokens=False)
  semi_vocab = Vocabulary.new([list(semi_set)], len(semi_set), add_special_tokens=False)

  trace('converting data ...')
  word_lists = [convert_word_list(x, word_vocab) for x in word_lists]
  op_lists = [convert_op_list(x, phrase_vocab, semi_vocab) for x in op_lists]

  trace('start training ...')
  parser = Parser(
      args.vocab, args.embed, args.queue, args.stack,
      len(phrase_set), len(semi_set),
  )
  if USE_GPU:
    parser.to_gpu()
  opt = optimizers.AdaGrad(lr = 0.005)
  opt.setup(parser)
  opt.add_hook(optimizer.GradientClipping(5))

  for epoch in range(args.epoch):
    n = 0

    for samples in batch(zip(word_lists, op_lists), args.minibatch):
      parser.zerograds()
      loss = my_zeros((), np.float32)

      for word_list, op_list in zip(*samples):
        trace('epoch %3d, sample %6d:' % (epoch + 1, n + 1))
        loss += parser.forward(word_list, op_list, 0)
        n += 1

      loss.backward()
      opt.update()

    trace('saving model ...')
    prefix = args.model + '.%03.d' % (epoch + 1)
    word_vocab.save(prefix + '.words')
    phrase_vocab.save(prefix + '.phrases')
    semi_vocab.save(prefix + '.semiterminals')
    parser.save_spec(prefix + '.spec')
    serializers.save_hdf5(prefix + '.weights', parser)

  trace('finished.')
parse08.py 文件源码 项目:nn_parsers 作者: odashi 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def train(args):
  trace('loading corpus ...')
  with open(args.source) as fp:
    trees = [make_tree(l) for l in fp]

  trace('extracting leaf nodes ...')
  word_lists = [extract_words(t) for t in trees]
  lower_lists = [[w.lower() for w in words] for words in word_lists]

  trace('extracting gold operations ...')
  op_lists = [make_operations(t) for t in trees]

  trace('making vocabulary ...')
  word_vocab = Vocabulary.new(lower_lists, args.vocab)
  phrase_set = set()
  semiterminal_set = set()
  for tree in trees:
    phrase_set |= set(extract_phrase_labels(tree))
    semiterminal_set |= set(extract_semiterminals(tree))
  phrase_vocab = Vocabulary.new([list(phrase_set)], len(phrase_set), add_special_tokens=False)
  semiterminal_vocab = Vocabulary.new([list(semiterminal_set)], len(semiterminal_set), add_special_tokens=False)

  trace('converting data ...')
  word_lists = [convert_word_list(x, word_vocab) for x in word_lists]
  op_lists = [convert_op_list(x, phrase_vocab, semiterminal_vocab) for x in op_lists]

  trace('start training ...')
  parser = Parser(
    args.vocab, args.queue, args.stack, args.srstate,
    len(phrase_set), len(semiterminal_set),
  )
  if args.use_gpu:
    parser.to_gpu()
  opt = optimizers.AdaGrad(lr = 0.005)
  opt.setup(parser)
  opt.add_hook(optimizer.GradientClipping(5))

  for epoch in range(args.epoch):
    n = 0

    for samples in batch(zip(word_lists, op_lists), args.minibatch):
      parser.zerograds()
      loss = XP.fzeros(())

      for word_list, op_list in zip(*samples):
        trace('epoch %3d, sample %6d:' % (epoch + 1, n + 1))
        loss += parser.forward(word_list, op_list, 0)
        n += 1

      loss.backward()
      opt.update()

    trace('saving model ...')
    prefix = args.model + '.%03.d' % (epoch + 1)
    word_vocab.save(prefix + '.words')
    phrase_vocab.save(prefix + '.phrases')
    semiterminal_vocab.save(prefix + '.semiterminals')
    parser.save_spec(prefix + '.spec')
    serializers.save_hdf5(prefix + '.weights', parser)

  trace('finished.')
parse14.py 文件源码 项目:nn_parsers 作者: odashi 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def train(args):
  trace('loading corpus ...')
  with open(args.source) as fp:
    trees = [make_tree(l) for l in fp]

  trace('extracting leaf nodes ...')
  word_lists = [extract_words(t) for t in trees]
  lower_lists = [[w.lower() for w in words] for words in word_lists]

  trace('extracting gold operations ...')
  op_lists = [make_operations(t) for t in trees]

  trace('making vocabulary ...')
  word_vocab = Vocabulary.new(lower_lists, args.vocab)
  phrase_set = set()
  semiterminal_set = set()
  for tree in trees:
    phrase_set |= set(extract_phrase_labels(tree))
    semiterminal_set |= set(extract_semiterminals(tree))
  phrase_vocab = Vocabulary.new([list(phrase_set)], len(phrase_set), add_special_tokens=False)
  semiterminal_vocab = Vocabulary.new([list(semiterminal_set)], len(semiterminal_set), add_special_tokens=False)

  trace('converting data ...')
  word_lists = [convert_word_list(x, word_vocab) for x in word_lists]
  op_lists = [convert_op_list(x, phrase_vocab, semiterminal_vocab) for x in op_lists]

  trace('start training ...')
  parser = Parser(
    args.vocab, args.embed, args.char_embed, args.queue,
    args.stack, args.srstate, len(phrase_set), len(semiterminal_set),
  )
  if args.use_gpu:
    parser.to_gpu()
  opt = optimizers.AdaGrad(lr = 0.005)
  opt.setup(parser)
  opt.add_hook(optimizer.GradientClipping(5))

  for epoch in range(args.epoch):
    n = 0

    for samples in batch(zip(word_lists, op_lists), args.minibatch):
      parser.zerograds()
      loss = XP.fzeros(())

      for word_list, op_list in zip(*samples):
        trace('epoch %3d, sample %6d:' % (epoch + 1, n + 1))
        loss += parser.forward(word_list, op_list, 0)
        n += 1

      loss.backward()
      opt.update()

    trace('saving model ...')
    prefix = args.model + '.%03.d' % (epoch + 1)
    word_vocab.save(prefix + '.words')
    phrase_vocab.save(prefix + '.phrases')
    semiterminal_vocab.save(prefix + '.semiterminals')
    parser.save_spec(prefix + '.spec')
    serializers.save_hdf5(prefix + '.weights', parser)

  trace('finished.')
parse09.py 文件源码 项目:nn_parsers 作者: odashi 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def train(args):
  trace('loading corpus ...')
  with open(args.source) as fp:
    trees = [make_tree(l) for l in fp]

  trace('extracting leaf nodes ...')
  word_lists = [extract_words(t) for t in trees]
  lower_lists = [[w.lower() for w in words] for words in word_lists]

  trace('extracting gold operations ...')
  op_lists = [make_operations(t) for t in trees]

  trace('making vocabulary ...')
  word_vocab = Vocabulary.new(lower_lists, args.vocab)
  phrase_set = set()
  semiterminal_set = set()
  for tree in trees:
    phrase_set |= set(extract_phrase_labels(tree))
    semiterminal_set |= set(extract_semiterminals(tree))
  phrase_vocab = Vocabulary.new([list(phrase_set)], len(phrase_set), add_special_tokens=False)
  semiterminal_vocab = Vocabulary.new([list(semiterminal_set)], len(semiterminal_set), add_special_tokens=False)

  trace('converting data ...')
  word_lists = [convert_word_list(x, word_vocab) for x in word_lists]
  op_lists = [convert_op_list(x, phrase_vocab, semiterminal_vocab) for x in op_lists]

  trace('start training ...')
  parser = Parser(
    args.vocab, args.queue, args.stack, args.srstate,
    len(phrase_set), len(semiterminal_set),
  )
  if args.use_gpu:
    parser.to_gpu()
  opt = optimizers.AdaGrad(lr = 0.005)
  opt.setup(parser)
  opt.add_hook(optimizer.GradientClipping(5))

  for epoch in range(args.epoch):
    n = 0

    for samples in batch(zip(word_lists, op_lists), args.minibatch):
      parser.zerograds()
      loss = XP.fzeros(())

      for word_list, op_list in zip(*samples):
        trace('epoch %3d, sample %6d:' % (epoch + 1, n + 1))
        loss += parser.forward(word_list, op_list, 0)
        n += 1

      loss.backward()
      opt.update()

    trace('saving model ...')
    prefix = args.model + '.%03.d' % (epoch + 1)
    word_vocab.save(prefix + '.words')
    phrase_vocab.save(prefix + '.phrases')
    semiterminal_vocab.save(prefix + '.semiterminals')
    parser.save_spec(prefix + '.spec')
    serializers.save_hdf5(prefix + '.weights', parser)

  trace('finished.')
parse11.py 文件源码 项目:nn_parsers 作者: odashi 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def train(args):
  trace('loading corpus ...')
  with open(args.source) as fp:
    trees = [make_tree(l) for l in fp]

  trace('extracting leaf nodes ...')
  word_lists = [extract_words(t) for t in trees]
  lower_lists = [[w.lower() for w in words] for words in word_lists]

  trace('extracting gold operations ...')
  op_lists = [make_operations(t) for t in trees]

  trace('making vocabulary ...')
  word_vocab = Vocabulary.new(lower_lists, args.vocab)
  phrase_set = set()
  semiterminal_set = set()
  for tree in trees:
    phrase_set |= set(extract_phrase_labels(tree))
    semiterminal_set |= set(extract_semiterminals(tree))
  phrase_vocab = Vocabulary.new([list(phrase_set)], len(phrase_set), add_special_tokens=False)
  semiterminal_vocab = Vocabulary.new([list(semiterminal_set)], len(semiterminal_set), add_special_tokens=False)

  trace('converting data ...')
  word_lists = [convert_word_list(x, word_vocab) for x in word_lists]
  op_lists = [convert_op_list(x, phrase_vocab, semiterminal_vocab) for x in op_lists]

  trace('start training ...')
  parser = Parser(
    args.vocab, args.embed, args.queue, args.stack, args.srstate,
    len(phrase_set), len(semiterminal_set),
  )
  if args.use_gpu:
    parser.to_gpu()
  opt = optimizers.AdaGrad(lr = 0.005)
  opt.setup(parser)
  opt.add_hook(optimizer.GradientClipping(5))

  for epoch in range(args.epoch):
    n = 0

    for samples in batch(zip(word_lists, op_lists), args.minibatch):
      parser.zerograds()
      loss = XP.fzeros(())

      for word_list, op_list in zip(*samples):
        trace('epoch %3d, sample %6d:' % (epoch + 1, n + 1))
        loss += parser.forward(word_list, op_list, 0)
        n += 1

      loss.backward()
      opt.update()

    trace('saving model ...')
    prefix = args.model + '.%03.d' % (epoch + 1)
    word_vocab.save(prefix + '.words')
    phrase_vocab.save(prefix + '.phrases')
    semiterminal_vocab.save(prefix + '.semiterminals')
    parser.save_spec(prefix + '.spec')
    serializers.save_hdf5(prefix + '.weights', parser)

  trace('finished.')
parse02.py 文件源码 项目:nn_parsers 作者: odashi 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def train(args):
  trace('loading corpus ...')
  with open(args.source) as fp:
    trees = [make_tree(l) for l in fp]

  trace('extracting leaf nodes ...')
  word_lists = [extract_words(t) for t in trees]

  trace('extracting gold operations ...')
  op_lists = [make_operations(t) for t in trees]

  trace('making vocabulary ...')
  word_vocab = Vocabulary.new(word_lists, args.vocab)
  phrase_set = set()
  semi_set = set()
  for tree in trees:
    phrase_set |= set(extract_phrase_labels(tree))
    semi_set |= set(extract_semi_labels(tree))
  phrase_vocab = Vocabulary.new([list(phrase_set)], len(phrase_set), add_special_tokens=False)
  semi_vocab = Vocabulary.new([list(semi_set)], len(semi_set), add_special_tokens=False)

  trace('converting data ...')
  word_lists = [convert_word_list(x, word_vocab) for x in word_lists]
  op_lists = [convert_op_list(x, phrase_vocab, semi_vocab) for x in op_lists]

  trace('start training ...')
  parser = Parser(
      args.vocab, args.embed, args.queue, args.stack,
      len(phrase_set), len(semi_set),
  )
  if USE_GPU:
    parser.to_gpu()
  opt = optimizers.AdaGrad(lr = 0.005)
  opt.setup(parser)
  opt.add_hook(optimizer.GradientClipping(5))

  for epoch in range(args.epoch):
    n = 0

    for samples in batch(zip(word_lists, op_lists), args.minibatch):
      parser.zerograds()
      loss = my_zeros((), np.float32)

      for word_list, op_list in zip(*samples):
        trace('epoch %3d, sample %6d:' % (epoch + 1, n + 1))
        loss += parser.forward(word_list, op_list, 0)
        n += 1

      loss.backward()
      opt.update()

    trace('saving model ...')
    prefix = args.model + '.%03.d' % (epoch + 1)
    word_vocab.save(prefix + '.words')
    phrase_vocab.save(prefix + '.phrases')
    semi_vocab.save(prefix + '.semiterminals')
    parser.save_spec(prefix + '.spec')
    serializers.save_hdf5(prefix + '.weights', parser)

  trace('finished.')
parse03.py 文件源码 项目:nn_parsers 作者: odashi 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def train(args):
  trace('loading corpus ...')
  with open(args.source) as fp:
    trees = [make_tree(l) for l in fp]

  trace('extracting leaf nodes ...')
  word_lists = [extract_words(t) for t in trees]

  trace('extracting gold operations ...')
  op_lists = [make_operations(t) for t in trees]

  trace('making vocabulary ...')
  word_vocab = Vocabulary.new(word_lists, args.vocab)
  phrase_set = set()
  semi_set = set()
  for tree in trees:
    phrase_set |= set(extract_phrase_labels(tree))
    semi_set |= set(extract_semi_labels(tree))
  phrase_vocab = Vocabulary.new([list(phrase_set)], len(phrase_set), add_special_tokens=False)
  semi_vocab = Vocabulary.new([list(semi_set)], len(semi_set), add_special_tokens=False)

  trace('converting data ...')
  word_lists = [convert_word_list(x, word_vocab) for x in word_lists]
  op_lists = [convert_op_list(x, phrase_vocab, semi_vocab) for x in op_lists]

  trace('start training ...')
  parser = Parser(
      args.vocab, args.embed, args.queue, args.stack,
      len(phrase_set), len(semi_set),
  )
  if USE_GPU:
    parser.to_gpu()
  opt = optimizers.AdaGrad(lr = 0.005)
  opt.setup(parser)
  opt.add_hook(optimizer.GradientClipping(5))

  for epoch in range(args.epoch):
    n = 0

    for samples in batch(zip(word_lists, op_lists), args.minibatch):
      parser.zerograds()
      loss = my_zeros((), np.float32)

      for word_list, op_list in zip(*samples):
        trace('epoch %3d, sample %6d:' % (epoch + 1, n + 1))
        loss += parser.forward(word_list, op_list, 0)
        n += 1

      loss.backward()
      opt.update()

    trace('saving model ...')
    prefix = args.model + '.%03.d' % (epoch + 1)
    word_vocab.save(prefix + '.words')
    phrase_vocab.save(prefix + '.phrases')
    semi_vocab.save(prefix + '.semiterminals')
    parser.save_spec(prefix + '.spec')
    serializers.save_hdf5(prefix + '.weights', parser)

  trace('finished.')
parse05.py 文件源码 项目:nn_parsers 作者: odashi 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def train(args):
  trace('loading corpus ...')
  with open(args.source) as fp:
    trees = [make_tree(l) for l in fp]

  trace('extracting leaf nodes ...')
  word_lists = [extract_words(t) for t in trees]
  lower_lists = [[w.lower() for w in words] for words in word_lists]

  trace('extracting gold operations ...')
  op_lists = [make_operations(t) for t in trees]

  trace('making vocabulary ...')
  word_vocab = Vocabulary.new(lower_lists, args.vocab)
  phrase_set = set()
  semiterminal_set = set()
  for tree in trees:
    phrase_set |= set(extract_phrase_labels(tree))
    semiterminal_set |= set(extract_semiterminals(tree))
  phrase_vocab = Vocabulary.new([list(phrase_set)], len(phrase_set), add_special_tokens=False)
  semiterminal_vocab = Vocabulary.new([list(semiterminal_set)], len(semiterminal_set), add_special_tokens=False)

  trace('converting data ...')
  word_lists = [convert_word_list(x, word_vocab) for x in word_lists]
  op_lists = [convert_op_list(x, phrase_vocab, semiterminal_vocab) for x in op_lists]

  trace('start training ...')
  parser = Parser(
    args.vocab, args.queue, args.stack,
    len(phrase_set), len(semiterminal_set),
  )
  if args.use_gpu:
    parser.to_gpu()
  opt = optimizers.AdaGrad(lr = 0.005)
  opt.setup(parser)
  opt.add_hook(optimizer.GradientClipping(5))

  for epoch in range(args.epoch):
    n = 0

    for samples in batch(zip(word_lists, op_lists), args.minibatch):
      parser.zerograds()
      loss = XP.fzeros(())

      for word_list, op_list in zip(*samples):
        trace('epoch %3d, sample %6d:' % (epoch + 1, n + 1))
        loss += parser.forward(word_list, op_list, 0)
        n += 1

      loss.backward()
      opt.update()

    trace('saving model ...')
    prefix = args.model + '.%03.d' % (epoch + 1)
    word_vocab.save(prefix + '.words')
    phrase_vocab.save(prefix + '.phrases')
    semiterminal_vocab.save(prefix + '.semiterminals')
    parser.save_spec(prefix + '.spec')
    serializers.save_hdf5(prefix + '.weights', parser)

  trace('finished.')
parse07.py 文件源码 项目:nn_parsers 作者: odashi 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def train(args):
  trace('loading corpus ...')
  with open(args.source) as fp:
    trees = [make_tree(l) for l in fp]

  trace('extracting leaf nodes ...')
  word_lists = [extract_words(t) for t in trees]
  lower_lists = [[w.lower() for w in words] for words in word_lists]

  trace('extracting gold operations ...')
  op_lists = [make_operations(t) for t in trees]

  trace('making vocabulary ...')
  word_vocab = Vocabulary.new(lower_lists, args.vocab)
  phrase_set = set()
  semiterminal_set = set()
  for tree in trees:
    phrase_set |= set(extract_phrase_labels(tree))
    semiterminal_set |= set(extract_semiterminals(tree))
  phrase_vocab = Vocabulary.new([list(phrase_set)], len(phrase_set), add_special_tokens=False)
  semiterminal_vocab = Vocabulary.new([list(semiterminal_set)], len(semiterminal_set), add_special_tokens=False)

  trace('converting data ...')
  word_lists = [convert_word_list(x, word_vocab) for x in word_lists]
  op_lists = [convert_op_list(x, phrase_vocab, semiterminal_vocab) for x in op_lists]

  trace('start training ...')
  parser = Parser(
    args.vocab, args.queue, args.stack,
    len(phrase_set), len(semiterminal_set),
  )
  if args.use_gpu:
    parser.to_gpu()
  opt = optimizers.AdaGrad(lr = 0.005)
  opt.setup(parser)
  opt.add_hook(optimizer.GradientClipping(5))

  for epoch in range(args.epoch):
    n = 0

    for samples in batch(zip(word_lists, op_lists), args.minibatch):
      parser.zerograds()
      loss = XP.fzeros(())

      for word_list, op_list in zip(*samples):
        trace('epoch %3d, sample %6d:' % (epoch + 1, n + 1))
        loss += parser.forward(word_list, op_list, 0)
        n += 1

      loss.backward()
      opt.update()

    trace('saving model ...')
    prefix = args.model + '.%03.d' % (epoch + 1)
    word_vocab.save(prefix + '.words')
    phrase_vocab.save(prefix + '.phrases')
    semiterminal_vocab.save(prefix + '.semiterminals')
    parser.save_spec(prefix + '.spec')
    serializers.save_hdf5(prefix + '.weights', parser)

  trace('finished.')
train.py 文件源码 项目:chainer-image-caption 作者: dsanno 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def train(epoch_num):
    image_groups, sentence_groups = make_groups(train_image_ids, train_sentences)
    test_image_groups, test_sentence_groups = make_groups(test_image_ids, test_sentences, train=False)
    for epoch in range(epoch_num):
        batches = random_batches(image_groups, sentence_groups)
        sum_loss = 0
        sum_acc = 0
        sum_size = 0
        batch_num = len(batches)
        for i, (image_id_batch, sentence_batch) in enumerate(batches):
            loss, acc, size = forward(caption_net, images[image_id_batch], sentence_batch)
            caption_net.cleargrads()
            loss.backward()
            loss.unchain_backward()
            optimizer.update()
            sentence_length = sentence_batch.shape[1]
            sum_loss += float(loss.data) * size
            sum_acc += acc * size
            sum_size += size
            if (i + 1) % 500 == 0:
                print '{} / {} loss: {} accuracy: {}'.format(i + 1, batch_num, sum_loss / sum_size, sum_acc / sum_size)
        print 'epoch: {} done'.format(epoch + 1)
        print 'train loss: {} accuracy: {}'.format(sum_loss / sum_size, sum_acc / sum_size)
        sum_loss = 0
        sum_acc = 0
        sum_size = 0
        for image_ids, sentences in zip(test_image_groups, test_sentence_groups):
            if len(sentences) == 0:
                continue
            size = len(sentences)
            for i in range(0, size, batch_size):
                image_id_batch = image_ids[i:i + batch_size]
                sentence_batch = sentences[i:i + batch_size]
                loss, acc, size = forward(caption_net, images[image_id_batch], sentence_batch, train=False)
                sentence_length = sentence_batch.shape[1]
                sum_loss += float(loss.data) * size
                sum_acc += acc * size
                sum_size += size
        print 'test loss: {} accuracy: {}'.format(sum_loss / sum_size, sum_acc / sum_size)

        serializers.save_hdf5(args.output + '_{0:04d}.model'.format(epoch), caption_net)
        serializers.save_hdf5(args.output + '_{0:04d}.state'.format(epoch), optimizer)


问题


面经


文章

微信
公众号

扫码关注公众号