def generate_text(session, model, config, starting_text='<eos>',
stop_length=100, stop_tokens=None, temp=1.0):
"""Generate text from the model.
Hint: Create a feed-dictionary and use sess.run() to execute the model. Note
that you will need to use model.initial_state as a key to feed_dict
Hint: Fetch model.final_state and model.predictions[-1]. (You set
model.final_state in add_model() and model.predictions is set in
__init__)
Hint: Store the outputs of running the model in local variables state and
y_pred (used in the pre-implemented parts of this function.)
Args:
session: tf.Session() object
model: Object of type RNNLM_Model
config: A Config() object
starting_text: Initial text passed to model. # ??text???List of word idxs??generate_text
Returns:
output: List of word idxs
"""
state = model.initial_state.eval()
# Imagine tokens as a batch size of one, length of len(tokens[0])
tokens = [model.vocab.encode(word) for word in starting_text.split()] # ???text???word?????one hot??
for i in xrange(stop_length):
### YOUR CODE HERE
feed = {model.input_placeholder: [tokens[-1:]],
model.initial_state: state,
model.dropout_placeholder: 1}
state, y_pred = session.run(
[model.final_state, model.predictions[-1]], feed_dict=feed) # ?model??????state, y_pred
### END YOUR CODE
next_word_idx = sample(y_pred[0], temperature=temp) # ????????????idx
tokens.append(next_word_idx)
if stop_tokens and model.vocab.decode(tokens[-1]) in stop_tokens:
break
output = [model.vocab.decode(word_idx) for word_idx in tokens] # ?tokens??one hot?????word
return output
评论列表
文章目录