def __init__(self, model_path, word_dim=None, afix_dim=None, nlayers=2,
hidden_dim=128, dep_dim=100, dropout_ratio=0.5):
self.model_path = model_path
defs_file = model_path + "/tagger_defs.txt"
if word_dim is None:
self.train = False
Param.load(self, defs_file)
self.extractor = FeatureExtractor(model_path)
else:
# training
self.train = True
p = Param(self)
p.dep_dim = dep_dim
p.word_dim = word_dim
p.afix_dim = afix_dim
p.hidden_dim = hidden_dim
p.nlayers = nlayers
p.n_words = len(read_model_defs(model_path + "/words.txt"))
p.n_suffixes = len(read_model_defs(model_path + "/suffixes.txt"))
p.n_prefixes = len(read_model_defs(model_path + "/prefixes.txt"))
p.targets = read_model_defs(model_path + "/target.txt")
p.dump(defs_file)
self.in_dim = self.word_dim + 8 * self.afix_dim
self.dropout_ratio = dropout_ratio
super(FastBiaffineLSTMParser, self).__init__(
emb_word=L.EmbedID(self.n_words, self.word_dim, ignore_label=IGNORE),
emb_suf=L.EmbedID(self.n_suffixes, self.afix_dim, ignore_label=IGNORE),
emb_prf=L.EmbedID(self.n_prefixes, self.afix_dim, ignore_label=IGNORE),
lstm_f=L.NStepLSTM(self.nlayers, self.in_dim, self.hidden_dim, 0.32),
lstm_b=L.NStepLSTM(self.nlayers, self.in_dim, self.hidden_dim, 0.32),
arc_dep=L.Linear(2 * self.hidden_dim, self.dep_dim),
arc_head=L.Linear(2 * self.hidden_dim, self.dep_dim),
rel_dep=L.Linear(2 * self.hidden_dim, self.dep_dim),
rel_head=L.Linear(2 * self.hidden_dim, self.dep_dim),
biaffine_arc=Biaffine(self.dep_dim),
biaffine_tag=Bilinear(self.dep_dim, self.dep_dim, len(self.targets))
)
评论列表
文章目录