def run():
"""Runs evaluation in a loop, and logs summaries to TensorBoard."""
# Create the evaluation directory if it doesn't exist.
if not tf.gfile.IsDirectory(eval_dir):
tf.logging.info("Creating eval directory: %s", eval_dir)
tf.gfile.MakeDirs(eval_dir)
g = tf.Graph()
with g.as_default():
images, input_seqs, target_seqs, input_mask = Build_Inputs(mode, input_file_pattern)
net_image_embeddings = Build_Image_Embeddings(mode, images, train_inception)
net_seq_embeddings = Build_Seq_Embeddings(input_seqs)
_, target_cross_entropy_losses, target_cross_entropy_loss_weights, network = \
Build_Model(mode, net_image_embeddings, net_seq_embeddings, target_seqs, input_mask)
global_step = tf.Variable(
initial_value=0,
dtype=tf.int32,
name="global_step",
trainable=False,
collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.VARIABLES])
# Create the Saver to restore model Variables.
saver = tf.train.Saver()
# Create the summary operation and the summary writer.
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(eval_dir)
g.finalize()
# Run a new evaluation run every eval_interval_secs.
while True:
start = time.time()
tf.logging.info("Starting evaluation at " + time.strftime(
"%Y-%m-%d-%H:%M:%S", time.localtime()))
run_once(global_step, target_cross_entropy_losses,
target_cross_entropy_loss_weights,
saver, summary_writer,
summary_op)
time_to_next_eval = start + eval_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
评论列表
文章目录