def eval_one_dataset(self, sess, dataset, save_dir, subset='train'):
count = 0
print('num_examples:', dataset._num_examples)
while count < dataset._num_examples:
start = count % dataset._num_examples
images, embeddings_batchs, filenames, _ =\
dataset.next_batch_test(self.batch_size, start, 1)
print('count = ', count, 'start = ', start)
for i in range(len(embeddings_batchs)):
samples_batchs = []
# Generate up to 16 images for each sentence,
# with randomness from noise z and conditioning augmentation.
for j in range(np.minimum(16, cfg.TRAIN.NUM_COPY)):
samples = sess.run(self.fake_images,
{self.embeddings: embeddings_batchs[i]})
samples_batchs.append(samples)
self.save_super_images(images, samples_batchs,
filenames, i, save_dir,
subset)
count += self.batch_size
评论列表
文章目录