def calculate_loss(self, X, y, model):
num_examples = len(X)
lamda = 0.01 # regularization strength
Wi, bh, Wh, bo = model['Wi'], model['bh'], model['Wh'], model['bo']
# Forward propagation to calculate our predictions
neth = np.dot(X, Wi) + bh
lh = np.tanh(neth)
neto = np.dot(lh, Wh) + bo
lo = np.exp(neto)
probs = lo / np.sum(lo, axis=1, keepdims=True)
# Calculating the loss
corect_logprobs = -np.log(probs[range(num_examples), y])
data_loss = np.sum(corect_logprobs)
# Add regulatization term to loss (optional)
data_loss += lamda/2 * (np.sum(np.square(Wi)) + np.sum(np.square(Wh)))
return 1./num_examples * data_loss
# ??
评论列表
文章目录