def prepare_data(path, word2idx, num_threads=8, **opts):
with tf.device("/cpu:0"):
enqueue_data, dequeue_batch = get_input_queues(
path, word2idx, batch_size=opts["batch_size"], num_threads=num_threads)
# TODO: put this logic somewhere else
input_ph = tf.placeholder_with_default(dequeue_batch, (None, None))
source, target, sequence_length = preprocess(input_ph)
return enqueue_data, input_ph, source, target, sequence_length
评论列表
文章目录