def main(_):
pp.pprint(flags.FLAGS.__flags)
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
if not os.path.exists(FLAGS.sample_dir):
os.makedirs(FLAGS.sample_dir)
with tf.Session() as sess:
if FLAGS.dataset == 'mnist':
dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, y_dim=10,
dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir)
else:
dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size,
dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir)
if FLAGS.is_train:
dcgan.train(FLAGS)
else:
if FLAGS.is_single:
dcgan.single_test(FLAGS.checkpoint_dir, FLAGS.file_name)
elif FLAGS.is_small:
dcgan.batch_test2(FLAGS.checkpoint_dir)
else:
dcgan.batch_test(FLAGS.checkpoint_dir, FLAGS.file_name)
# dcgan.load(FLAGS.checkpoint_dir)
# dcgan.single_test(FLAGS.checkpoint_dir)
# dcgan.batch_test(FLAGS.checkpoint_dir)
"""
if FLAGS.visualize:
to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
[dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
[dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
[dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
[dcgan.h4_w, dcgan.h4_b, None])
# Below is codes for visualization
OPTION = 2
visualize(sess, dcgan, FLAGS, OPTION)
"""
python类visualize()的实例源码
def main(_):
loader = Loader(FLAGS.data_dir, FLAGS.data, FLAGS.batch_size)
print("# of data: {}".format(loader.data_num))
with tf.Session() as sess:
lsgan = LSGAN([FLAGS.batch_size, 112, 112, 3])
sess.run(tf.global_variables_initializer())
for epoch in range(10000):
loader.reset()
for step in range(int(loader.batch_num/FLAGS.d)):
if (step == 0 and epoch % 1 == 100):
utils.visualize(sess.run(lsgan.gen_img), epoch)
for _ in range(FLAGS.d):
batch = np.asarray(loader.next_batch(), dtype=np.float32)
batch = (batch-127.5) / 127.5
#print("{}".format(batch.shape))
feed={lsgan.X: batch}
_ = sess.run(lsgan.d_train_op, feed_dict=feed)
#utils.visualize(batch, (epoch+1)*100)
#cv2.namedWindow("window")
#cv2.imshow("window", cv2.cvtColor(batch[0], cv2.COLOR_RGB2BGR))
#cv2.waitKey(0)
#cv2.destroyAllWindows()
_ = sess.run(lsgan.g_train_op)
def main(_):
pp.pprint(flags.FLAGS.__flags)
sample_dir_ = os.path.join(FLAGS.sample_dir, FLAGS.name)
checkpoint_dir_ = os.path.join(FLAGS.checkpoint_dir, FLAGS.name)
log_dir_ = os.path.join(FLAGS.log_dir, FLAGS.name)
if not os.path.exists(checkpoint_dir_):
os.makedirs(checkpoint_dir_)
if not os.path.exists(sample_dir_):
os.makedirs(sample_dir_)
if not os.path.exists(log_dir_):
os.makedirs(log_dir_)
with tf.Session() as sess:
if FLAGS.dataset == 'mnist':
dcgan = DCGAN(sess, config=FLAGS, batch_size=FLAGS.batch_size, output_size=28, c_dim=1,
dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=checkpoint_dir_, sample_dir=sample_dir_, log_dir=log_dir_)
else:
dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, output_size=FLAGS.output_size, c_dim=FLAGS.c_dim,
dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir)
if FLAGS.is_train:
dcgan.train(FLAGS)
else:
dcgan.sampling(FLAGS)
if FLAGS.visualize:
to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
[dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
[dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
[dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
[dcgan.h4_w, dcgan.h4_b, None])
# Below is codes for visualization
OPTION = 2
visualize(sess, dcgan, FLAGS, OPTION)
def main(_):
pp.pprint(flags.FLAGS.__flags)
sample_dir_ = os.path.join(FLAGS.sample_dir, FLAGS.name)
checkpoint_dir_ = os.path.join(FLAGS.checkpoint_dir, FLAGS.name)
log_dir_ = os.path.join(FLAGS.log_dir, FLAGS.name)
if not os.path.exists(checkpoint_dir_):
os.makedirs(checkpoint_dir_)
if not os.path.exists(sample_dir_):
os.makedirs(sample_dir_)
if not os.path.exists(log_dir_):
os.makedirs(log_dir_)
with tf.Session() as sess:
if FLAGS.dataset == 'mnist':
dcgan = DCGAN(sess, config=FLAGS, batch_size=FLAGS.batch_size, output_size=28, c_dim=1,
dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=checkpoint_dir_, sample_dir=sample_dir_, log_dir=log_dir_)
else:
dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, output_size=FLAGS.output_size, c_dim=FLAGS.c_dim,
dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir)
if FLAGS.is_train:
dcgan.train(FLAGS)
else:
dcgan.sampling(FLAGS)
if FLAGS.visualize:
to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
[dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
[dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
[dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
[dcgan.h4_w, dcgan.h4_b, None])
# Below is codes for visualization
OPTION = 2
visualize(sess, dcgan, FLAGS, OPTION)
def main(_):
pp.pprint(flags.FLAGS.__flags)
sample_dir_ = os.path.join(FLAGS.sample_dir, FLAGS.name)
checkpoint_dir_ = os.path.join(FLAGS.checkpoint_dir, FLAGS.name)
log_dir_ = os.path.join(FLAGS.log_dir, FLAGS.name)
if not os.path.exists(checkpoint_dir_):
os.makedirs(checkpoint_dir_)
if not os.path.exists(sample_dir_):
os.makedirs(sample_dir_)
if not os.path.exists(log_dir_):
os.makedirs(log_dir_)
with tf.Session() as sess:
if FLAGS.dataset == 'mnist':
dcgan = DCGAN(sess, config=FLAGS, batch_size=FLAGS.batch_size, output_size=28, c_dim=1,
dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=checkpoint_dir_, sample_dir=sample_dir_, log_dir=log_dir_)
else:
dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, output_size=FLAGS.output_size, c_dim=FLAGS.c_dim,
dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir)
if FLAGS.is_train:
dcgan.train(FLAGS)
else:
dcgan.sampling(FLAGS)
#dcgan.load(FLAGS.checkpoint_dir)
if FLAGS.visualize:
to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
[dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
[dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
[dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
[dcgan.h4_w, dcgan.h4_b, None])
# Below is codes for visualization
OPTION = 2
visualize(sess, dcgan, FLAGS, OPTION)
def main(_):
pp.pprint(flags.FLAGS.__flags)
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
if not os.path.exists(FLAGS.sample_dir):
os.makedirs(FLAGS.sample_dir)
with tf.Session() as sess:
dcgan = DCGAN(sess, image_size = FLAGS.image_size, output_size = FLAGS.output_size, batch_size=FLAGS.batch_size, sample_size = FLAGS.sample_size)
if FLAGS.is_train:
dcgan.train(FLAGS)
else:
dcgan.load(FLAGS.checkpoint_dir)
if FLAGS.visualize:
# to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
# [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
# [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
# [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
# [dcgan.h4_w, dcgan.h4_b, None])
# Below is codes for visualization
OPTION = 2
visualize(sess, dcgan, FLAGS, OPTION)
def main(_):
pp.pprint(flags.FLAGS.__flags)
if FLAGS.input_width is None:
FLAGS.input_width = FLAGS.input_height
if FLAGS.output_width is None:
FLAGS.output_width = FLAGS.output_height
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
if not os.path.exists(FLAGS.sample_dir):
os.makedirs(FLAGS.sample_dir)
run_config = tf.ConfigProto()
run_config.gpu_options.allow_growth=True
with tf.Session(config=run_config) as sess:
wgan = WGAN(
sess,
input_width=FLAGS.input_width,
input_height=FLAGS.input_height,
input_water_width=FLAGS.input_water_width,
input_water_height=FLAGS.input_water_height,
output_width=FLAGS.output_width,
output_height=FLAGS.output_height,
batch_size=FLAGS.batch_size,
c_dim=FLAGS.c_dim,
max_depth = FLAGS.max_depth,
save_epoch=FLAGS.save_epoch,
water_dataset_name=FLAGS.water_dataset,
air_dataset_name = FLAGS.air_dataset,
depth_dataset_name = FLAGS.depth_dataset,
input_fname_pattern=FLAGS.input_fname_pattern,
is_crop=FLAGS.is_crop,
checkpoint_dir=FLAGS.checkpoint_dir,
results_dir = FLAGS.results_dir,
sample_dir=FLAGS.sample_dir,
num_samples = FLAGS.num_samples)
if FLAGS.is_train:
wgan.train(FLAGS)
else:
if not wgan.load(FLAGS.checkpoint_dir):
raise Exception("[!] Train a model first, then run test mode")
wgan.test(FLAGS)
# to_json("./web/js/layers.js", [wgan.h0_w, wgan.h0_b, wgan.g_bn0],
# [wgan.h1_w, wgan.h1_b, wgan.g_bn1],
# [wgan.h2_w, wgan.h2_b, wgan.g_bn2],
# [wgan.h3_w, wgan.h3_b, wgan.g_bn3],
# [wgan.h4_w, wgan.h4_b, None])
# Below is codes for visualization
#OPTION = 1
#visualize(sess, wgan, FLAGS, OPTION)
def main(_):
pp.pprint(flags.FLAGS.__flags)
if FLAGS.input_width is None:
FLAGS.input_width = FLAGS.input_height
if FLAGS.output_width is None:
FLAGS.output_width = FLAGS.output_height
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
if not os.path.exists(FLAGS.sample_dir):
os.makedirs(FLAGS.sample_dir)
run_config = tf.ConfigProto()
run_config.gpu_options.allow_growth=True
with tf.Session(config=run_config) as sess:
wgan = WGAN(
sess,
input_width=FLAGS.input_width,
input_height=FLAGS.input_height,
input_water_width=FLAGS.input_water_width,
input_water_height=FLAGS.input_water_height,
output_width=FLAGS.output_width,
output_height=FLAGS.output_height,
batch_size=FLAGS.batch_size,
c_dim=FLAGS.c_dim,
max_depth = FLAGS.max_depth,
save_epoch=FLAGS.save_epoch,
water_dataset_name=FLAGS.water_dataset,
air_dataset_name = FLAGS.air_dataset,
depth_dataset_name = FLAGS.depth_dataset,
input_fname_pattern=FLAGS.input_fname_pattern,
is_crop=FLAGS.is_crop,
checkpoint_dir=FLAGS.checkpoint_dir,
results_dir = FLAGS.results_dir,
sample_dir=FLAGS.sample_dir,
num_samples = FLAGS.num_samples)
if FLAGS.is_train:
wgan.train(FLAGS)
else:
if not wgan.load(FLAGS.checkpoint_dir):
raise Exception("[!] Train a model first, then run test mode")
wgan.test(FLAGS)
# to_json("./web/js/layers.js", [wgan.h0_w, wgan.h0_b, wgan.g_bn0],
# [wgan.h1_w, wgan.h1_b, wgan.g_bn1],
# [wgan.h2_w, wgan.h2_b, wgan.g_bn2],
# [wgan.h3_w, wgan.h3_b, wgan.g_bn3],
# [wgan.h4_w, wgan.h4_b, None])
# Below is codes for visualization
#OPTION = 1
#visualize(sess, wgan, FLAGS, OPTION)
def train(self, config=None):
#mnist = input_data.read_data_sets("/tmp/tensorflow/mnist/input_dat", one_hot=True)
loader = Loader(config.data_dir, config.data, config.batch_size)
loaded = False
if not config.reset:
loaded, global_step = self.restore(config.checkpoint_dir)
if not loaded:
tf.global_variables_initializer().run()
global_step = 0
d_losses = []
g_losses = []
steps = []
gif = []
for epoch in range(config.epoch):
loader.reset()
#for idx in range(config.step):
for idx in range(loader.batch_num):
#batch_X, _ = mnist.train.next_batch(config.batch_size)
#batch_X = batch_X.reshape([-1]+self.in_dim)
batch_X = np.asarray(loader.next_batch(), dtype=np.float32)
#batch_X = (batch_X*255.-127.5)/127.5
batch_X = (batch_X - 127.5)/127.5
batch_z = np.random.uniform(-1, 1, [config.batch_size, self.z_dim])
_, d_loss = self.sess.run([self.d_train_op, self.d_loss],
feed_dict={self.X: batch_X, self.z: batch_z})
_, g_loss = self.sess.run([self.g_train_op, self.g_loss],
feed_dict={self.z: batch_z})
d_losses.append(d_loss)
g_losses.append(g_loss)
steps.append(global_step)
global_step += 1
print(" [Epoch {}] d_loss:{}, g_loss:{}".format(epoch, d_loss, g_loss))
batch_z = np.random.uniform(-1, 1, [config.batch_size, self.z_dim])
imgs = self.sess.run(self.sampler, feed_dict={self.z: batch_z})
gif.append(visualize(imgs, epoch, config.data))
self.save("{}_{}".format(config.checkpoint_dir, config.data), global_step, model_name="dcgan")
plot({'d_loss':d_losses, 'g_loss':g_losses}, steps, title="DCGAN loss ({})".format(config.data), x_label="Step", y_label="Loss")
save_gif(gif, "gen_img_{}".format(config.data))
def run(checkpoint_dir = 'checkpoints', batch_size = 64, input_height = 108, input_width = None, output_height = 64, output_width = None, dataset = 'celebA', input_fname_pattern = '*.jpg', output_dir = 'output', sample_dir = 'samples', crop=True):
#pp.pprint(flags.FLAGS.__flags)
if input_width is None:
input_width = input_height
if output_width is None:
output_width = output_height
#if not os.path.exists(checkpoint_dir):
# os.makedirs(checkpoint_dir)
#if not os.path.exists(output_dir):
# os.makedirs(output_dir)
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
run_config = tf.ConfigProto()
run_config.gpu_options.allow_growth=True
with tf.Session(config=run_config) as sess:
dcgan = DCGAN(
sess,
input_width=input_width,
input_height=input_height,
output_width=output_width,
output_height=output_height,
batch_size=batch_size,
sample_num=batch_size,
dataset_name=dataset,
input_fname_pattern=input_fname_pattern,
crop=crop,
checkpoint_dir=checkpoint_dir,
sample_dir=sample_dir,
output_dir=output_dir)
show_all_variables()
try:
tf.global_variables_initializer().run()
except:
tf.initialize_all_variables().run()
# Below is code for visualization
visualize(sess, dcgan, batch_size = batch_size, input_height = input_height, input_width = input_width, output_dir = output_dir)