def train(self, x, y, learning_rate=1e-3, reg = 1e-5, num_iter=1500, batch_size=200):
num_train, num_feature = x.shape
num_classes = np.max(y) + 1
if self.W == None:
self.W = np.random.randn(num_feature, num_classes)
loss_history = []
acc_history = []
for iter in range(num_iter):
indices = np.random.choice(num_train, batch_size)
x_batch = x[indices]
y_batch = y[indices]
loss, grad = self.loss(x_batch, y_batch, reg)
acc = self.accuracy(x_batch, y_batch)
loss_history.append(loss)
acc_history.append(acc)
self.W += -learning_rate * grad
if np.mod(iter, 100) == 0:
print("iteration {}/{} loss: {:.7f}".format(iter, num_iter, loss))
return loss_history, acc_history
评论列表
文章目录