def gradient(x0, X, y, alpha):
# gradient of the logistic loss
w, c = x0[1:137], x0[0]
#print("c is " + str(c))
z = X.dot(w) + c
z = phi(y * z)
z0 = (z - 1) * y
grad_w = np.matmul(z0,X) / X.shape[0] + alpha * w
grad_c = z0.sum() / X.shape[0]
grad_c = np.array(grad_c)
#print(grad_w[0,1:5])
return np.c_[([grad_c], grad_w)]
##### Stochastic Gradient Descent Optimiser ######
Logistic_Regressor.py 文件源码
python
阅读 19
收藏 0
点赞 0
评论 0
评论列表
文章目录