def learn_by_mini_batch(dataset: DataSet, batch_size: int = 25, n_iter: int = 5000) -> IrisChain:
n = len(dataset.train)
model = IrisChain()
optimizer = optimizers.Adam()
optimizer.setup(model)
x_train = dataset.train.drop('class', axis=1).values
y_train = to_hot_vector(dataset.train['class']).values
for j in range(n_iter):
shuffled = np.random.permutation(n)
for i in range(0, n, batch_size):
indices = shuffled[i:i+batch_size]
x = Variable(x_train[indices])
y = Variable(y_train[indices])
model.cleargrads()
loss = model(x, y)
loss.backward()
optimizer.update()
return model
评论列表
文章目录