def backPropagate(Z1, Z2, y, W2, b2):
## YOUR CODE HERE ##
E2 = 0
E1 = 0
Eb1 = 0
# E2 is the error in output layer. To find it we should exract estimated value from actual output.
# We should find 5 error because there are 5 node in output layer.
E2 = Z2 - y
## E1 is the error in the hidden layer. To find it we should use the error that we found in output layer and the weights between
## output and hidden layer
## We should find 30 error because there are 30 node in hidden layer.
E1 = np.dot(W2, np.transpose(E2))
## Eb1 is the error bias for hidden layer. To find it we should use the error that we found in output layer and the weights between
## output and bias layer
## We should find 1 error because there are 1 bias node in hidden layer.
Eb1 = np.dot(b2, np.transpose(E2))
####################
return E2, E1, Eb1
# calculate the gradients for weights between units and the bias weights
评论列表
文章目录