def calc_score_of_histories(words, dropout=0.0):
# This will change from a list of histories, to a list of words in each history position
words = np.transpose(words)
# Lookup the embeddings and concatenate them
emb = dy.concatenate([dy.lookup_batch(W_emb, x) for x in words])
# Create the hidden layer
W_h = dy.parameter(W_h_p)
b_h = dy.parameter(b_h_p)
h = dy.tanh(dy.affine_transform([b_h, W_h, emb]))
# Perform dropout
if dropout != 0.0:
h = dy.dropout(h, dropout)
# Calculate the score and return
W_sm = dy.parameter(W_sm_p)
b_sm = dy.parameter(b_sm_p)
return dy.affine_transform([b_sm, W_sm, h])
# Calculate the loss value for the entire sentence
评论列表
文章目录