def __init__(self, model_path, word_dim=None, afix_dim=None,
nlayers=2, hidden_dim=128, relu_dim=64, dropout_ratio=0.5):
self.model_path = model_path
defs_file = model_path + "/tagger_defs.txt"
if word_dim is None:
self.train = False
Param.load(self, defs_file)
self.extractor = FeatureExtractor(model_path)
else:
self.train = True
p = Param(self)
p.word_dim = word_dim
p.afix_dim = afix_dim
p.hidden_dim = hidden_dim
p.relu_dim = relu_dim
p.nlayers = nlayers
p.dump(defs_file)
self.targets = read_model_defs(model_path + "/target.txt")
self.words = read_model_defs(model_path + "/words.txt")
self.suffixes = read_model_defs(model_path + "/suffixes.txt")
self.prefixes = read_model_defs(model_path + "/prefixes.txt")
self.in_dim = self.word_dim + 8 * self.afix_dim
self.dropout_ratio = dropout_ratio
super(LSTMTagger, self).__init__(
emb_word=L.EmbedID(len(self.words), self.word_dim),
emb_suf=L.EmbedID(len(self.suffixes), self.afix_dim, ignore_label=IGNORE),
emb_prf=L.EmbedID(len(self.prefixes), self.afix_dim, ignore_label=IGNORE),
lstm_f=L.NStepLSTM(nlayers, self.in_dim, self.hidden_dim, 0.),
lstm_b=L.NStepLSTM(nlayers, self.in_dim, self.hidden_dim, 0.),
linear1=L.Linear(2 * self.hidden_dim, self.relu_dim),
linear2=L.Linear(self.relu_dim, len(self.targets)),
)
评论列表
文章目录