def forward_prop_step(self,x_t, s_t1_prev, s_t2_prev):
# This is how we calculated the hidden state in a simple RNN. No longer!
# s_t = T.tanh(U[:,x_t] + W.dot(s_t1_prev))
# Word embedding layer
x_e = self.E.dot(x_t)
# GRU Layer 1
z_t1 = sigmoid(self.U[0].dot(x_e) + self.W[0].dot(s_t1_prev) + self.b[0])
r_t1 = sigmoid(self.U[1].dot(x_e) + self.W[1].dot(s_t1_prev) + self.b[1])
c_t1 = np.tanh(self.U[2].dot(x_e) + self.W[2].dot(s_t1_prev * r_t1) + self.b[2])
s_t1 = (np.ones(z_t1.shape) - z_t1) * c_t1 + z_t1 * s_t1_prev
# GRU Layer 2
z_t2 = sigmoid(self.U[3].dot(s_t1) + self.W[3].dot(s_t2_prev) + self.b[3])
r_t2 = sigmoid(self.U[4].dot(s_t1) + self.W[4].dot(s_t2_prev) + self.b[4])
c_t2 = np.tanh(self.U[5].dot(s_t1) + self.W[5].dot(s_t2_prev * r_t2) + self.b[5])
s_t2 = (np.ones(z_t2.shape) - z_t2) * c_t2 + z_t2 * s_t2_prev
# Final output calculation
o_t = self.V.dot(s_t2) + self.c
return [o_t, s_t1, s_t2]
评论列表
文章目录