def _optim(self, xys):
idx = np.arange(len(xys))
self.batch_size = np.ceil(len(xys) / self.nbatches)
batch_idx = np.arange(self.batch_size, len(xys), self.batch_size)
for self.epoch in range(1, self.max_epochs + 1):
# shuffle training examples
self._pre_epoch()
shuffle(idx)
# store epoch for callback
self.epoch_start = timeit.default_timer()
# process mini-batches
for batch in np.split(idx, batch_idx):
# select indices for current batch
bxys = [xys[z] for z in batch]
self._process_batch(bxys)
# check callback function, if false return
for f in self.post_epoch:
if not f(self):
break
评论列表
文章目录