def evaluate():
""" Eval the system"""
with tf.Graph().as_default():
# make inputs
state, boundary = inputs(empty=True, shape=shape)
# unwrap
y_1, small_boundary_mul, small_boundary_add, x_2, y_2 = continual_unroll_template(state, boundary)
# calc velocity
x_2_add = add_lattice(x_2)
state_add = add_lattice(state)
velocity_generated = lattice_to_vel(x_2_add)
velocity_norm_generated = vel_to_norm(velocity_generated)
velocity_true = lattice_to_vel(state_add)
velocity_norm_true = vel_to_norm(velocity_true)
# restore network
variables_to_restore = tf.all_variables()
saver = tf.train.Saver(variables_to_restore)
sess = tf.Session()
ckpt = tf.train.get_checkpoint_state(RESTORE_DIR)
if ckpt and ckpt.model_checkpoint_path:
print("restoring file from " + ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print("no chekcpoint file found from " + RESTORE_DIR + ", this is an error")
exit()
state_feed_dict, boundary_feed_dict = feed_dict(1, shape, FLAGS.lattice_size, 0, 0)
fd = {state:state_feed_dict, boundary:boundary_feed_dict}
y_1_g, small_boundary_mul_g, small_boundary_add_g = sess.run([y_1, small_boundary_mul, small_boundary_add], feed_dict=fd)
# generate video
for step in tqdm(xrange(FLAGS.video_length)):
# calc generated frame compressed state
state_feed_dict, boundary_feed_dict = feed_dict(1, shape, FLAGS.lattice_size, 0, step)
fd = {state:state_feed_dict, boundary:boundary_feed_dict, y_1:y_1_g, small_boundary_mul:small_boundary_mul_g, small_boundary_add:small_boundary_add_g}
v_n_g, v_n_t, y_1_g = sess.run([velocity_norm_generated, velocity_norm_true, y_2],feed_dict=fd)
# make frame for video
if not d2d:
v_n_g = v_n_g[:,10]
v_n_t = v_n_t[:,10]
frame = np.concatenate([v_n_g, v_n_t, np.abs(v_n_g - v_n_t)], 2)[0]
frame = np.uint8(255 * frame/min(.25, np.max(frame)))
frame = cv2.applyColorMap(frame[:,:,0], 2)
# write frame to video
video.write(frame)
# release video
video.release()
cv2.destroyAllWindows()
评论列表
文章目录