def get_batch(self, batch_size):
print("Loading %d real images of max possible %d..." % (batch_size, self.train_index.size(0)))
batch = torch.Tensor(batch_size, 3, self.opt.image_size, self.opt.image_size)
for j in range(batch_size):
img = self.get_data(self.train_index[self.counter])
#print(img.size())
batch[j].copy_(img)
self.counter = (self.counter + 1) % self.train_index.size(0)
#print("counter", self.counter)
#print(np.average(batch.cpu().numpy()), np.min(batch.cpu().numpy()), np.max(batch.cpu().numpy()))
batch_np = (batch.cpu().numpy() * 255).astype(np.uint8).transpose((0, 2, 3, 1))
#return Variable(batch.cuda(), volatile=volatile)
#from scipy import misc
#misc.imshow(batch_np[0])
return batch_np
#
# Background processes below
# Tensorflow and pytorch stuff is separated in different processes, because
# otherwies the two will bitch at each other
# The whole measuring of inception scores for checkpoints is in background
# processes, because otherwise python will not free the memory properly and
# run into out of memory errors sooner or later
#
calculate_inception_scores.py 文件源码
python
阅读 33
收藏 0
点赞 0
评论 0
评论列表
文章目录