def _do_run_query(self, language, tokenized, limit):
tokens, values, parse = tokenized
print("Input", tokens, values)
results = []
config = language.config
grammar = config.grammar
with language.session.as_default():
with language.session.graph.as_default():
input, input_len = vectorize(tokens, config.dictionary, config.max_length)
parse_vector = vectorize_constituency_parse(parse, config.max_length, input_len)
input_batch, input_length_batch, parse_batch = [input], [input_len], [parse_vector]
sequences = language.model.predict_on_batch(language.session, input_batch, input_length_batch, parse_batch)
assert len(sequences) == 1
for i, decoded in enumerate(sequences[0]):
if i >= limit:
break
decoded = list(decoded)
try:
decoded = decoded[:decoded.index(grammar.end)]
except ValueError:
pass
decoded = [grammar.tokens[x] for x in decoded]
print("Beam", i+1, decoded)
try:
json_rep = dict(answer=json.dumps(json_syntax.to_json(decoded, grammar, values)), prob=1./len(sequences[0]), score=1)
except Exception as e:
print("Failed to represent " + str(decoded) + " as json", e)
traceback.print_exc(file=sys.stdout)
continue
results.append(json_rep)
return results
query_handler.py 文件源码
python
阅读 25
收藏 0
点赞 0
评论 0
评论列表
文章目录