def update_recurrent_unit(self):
# Weights and Bias for input and hidden tensor
self.Wi = self.update_rate * self.Wi + (1 - self.update_rate) * tf.identity(self.lstm.Wi)
self.Ui = self.update_rate * self.Ui + (1 - self.update_rate) * tf.identity(self.lstm.Ui)
self.bi = self.update_rate * self.bi + (1 - self.update_rate) * tf.identity(self.lstm.bi)
self.Wf = self.update_rate * self.Wf + (1 - self.update_rate) * tf.identity(self.lstm.Wf)
self.Uf = self.update_rate * self.Uf + (1 - self.update_rate) * tf.identity(self.lstm.Uf)
self.bf = self.update_rate * self.bf + (1 - self.update_rate) * tf.identity(self.lstm.bf)
self.Wog = self.update_rate * self.Wog + (1 - self.update_rate) * tf.identity(self.lstm.Wog)
self.Uog = self.update_rate * self.Uog + (1 - self.update_rate) * tf.identity(self.lstm.Uog)
self.bog = self.update_rate * self.bog + (1 - self.update_rate) * tf.identity(self.lstm.bog)
self.Wc = self.update_rate * self.Wc + (1 - self.update_rate) * tf.identity(self.lstm.Wc)
self.Uc = self.update_rate * self.Uc + (1 - self.update_rate) * tf.identity(self.lstm.Uc)
self.bc = self.update_rate * self.bc + (1 - self.update_rate) * tf.identity(self.lstm.bc)
def unit(x, hidden_memory_tm1):
previous_hidden_state, c_prev = tf.unpack(hidden_memory_tm1)
# Input Gate
i = tf.sigmoid(
tf.matmul(x, self.Wi) +
tf.matmul(previous_hidden_state, self.Ui) + self.bi
)
# Forget Gate
f = tf.sigmoid(
tf.matmul(x, self.Wf) +
tf.matmul(previous_hidden_state, self.Uf) + self.bf
)
# Output Gate
o = tf.sigmoid(
tf.matmul(x, self.Wog) +
tf.matmul(previous_hidden_state, self.Uog) + self.bog
)
# New Memory Cell
c_ = tf.nn.tanh(
tf.matmul(x, self.Wc) +
tf.matmul(previous_hidden_state, self.Uc) + self.bc
)
# Final Memory cell
c = f * c_prev + i * c_
# Current Hidden state
current_hidden_state = o * tf.nn.tanh(c)
return tf.pack([current_hidden_state, c])
return unit
评论列表
文章目录