def _init_summaries(self):
if self.is_train:
logdir = os.path.join(SUMMARY_PATH, self.log_name, 'train')
self.summary_writer = tf.summary.FileWriter(logdir)
self.summary_writer_by_points = [tf.summary.FileWriter(os.path.join(logdir, 'point_%02d' % i))
for i in range(16)]
tf.scalar_summary('Average euclidean distance', self.euclidean_dist, collections = [KEY_SUMMARIES])
for i in range(16):
tf.scalar_summary('Joint euclidean distance', self.euclidean_dist_per_joint[i],
collections = [KEY_SUMMARIES_PER_JOINT[i]])
self.create_summary_from_weights()
self.ALL_SUMMARIES = tf.merge_all_summaries(KEY_SUMMARIES)
self.SUMMARIES_PER_JOINT = [tf.merge_all_summaries(KEY_SUMMARIES_PER_JOINT[i]) for i in range(16)]
else:
logdir = os.path.join(SUMMARY_PATH, self.log_name, 'test')
self.summary_writer = tf.summary.FileWriter(logdir)
python类merge_all_summaries()的实例源码
def test():
with tf.Graph().as_default():
image, label = input.get_input(LABEL_PATH, LABEL_FORMAT, IMAGE_PATH, IMAGE_FORMAT)
logits = model.inference(image)
top_k_op = tf.nn.in_top_k(logits, label, 1)
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Get summaries for TENSOR BOARD
summary_op = tf.merge_all_summaries()
graph_def = tf.get_default_graph().as_graph_def()
summary_writer = tf.train.SummaryWriter(input.FLAGS.eval_dir, graph_def=graph_def)
while True:
evaluate_model(saver, summary_writer, top_k_op, summary_op)
if input.FLAGS.run_once:
break
time.sleep(input.FLAGS.eval_interval_secs)
def build_summaries():
episode_reward = tf.Variable(0.)
scalar_summary("Reward", episode_reward)
episode_ave_max_q = tf.Variable(0.)
scalar_summary("Qmax Value", episode_ave_max_q)
logged_epsilon = tf.Variable(0.)
scalar_summary("Epsilon", logged_epsilon)
# Threads shouldn't modify the main graph, so we use placeholders
# to assign the value of every summary (instead of using assign method
# in every thread, that would keep creating new ops in the graph)
summary_vars = [episode_reward, episode_ave_max_q, logged_epsilon]
summary_placeholders = [tf.placeholder("float")
for i in range(len(summary_vars))]
assign_ops = [summary_vars[i].assign(summary_placeholders[i])
for i in range(len(summary_vars))]
summary_op = merge_all_summaries()
return summary_placeholders, assign_ops, summary_op
def train(self):
self.train_op = self.optim.minimize(self.loss, global_step=self.global_step)
self.writer = tf.train.SummaryWriter("./logs/D_pretrained", self.sess.graph)
self.summary_op = tf.merge_all_summaries()
tf.initialize_all_variables().run()
self.saver = tf.train.Saver(var_list=self.D_params_dict, max_to_keep=self.max_to_keep)
count = 0
for idx in range(self.max_iter//3000):
self.save(self.checkpoint_dir, count)
self.evaluate('test', count)
self.evaluate('train', count)
for k in tqdm(range(3000)):
right_images, right_text, _ = self.dataset.sequential_sample(self.batch_size)
right_length = np.sum((right_text!=self.NOT)+0, 1)
fake_images, fake_text, _ = self.negative_dataset.sequential_sample(self.batch_size)
fake_length = np.sum((fake_text!=self.NOT)+0, 1)
wrong_text = self.dataset.get_wrong_text(self.batch_size)
wrong_length = np.sum((wrong_text!=self.NOT)+0, 1)
feed_dict = {self.right_images:right_images, self.right_text:right_text, self.right_length:right_length,
self.fake_images:fake_images, self.fake_text:fake_text, self.fake_length:fake_length,
self.wrong_images:right_images, self.wrong_text:wrong_text, self.wrong_length:wrong_length}
_, loss, summary_str = self.sess.run([self.train_op, self.loss, self.summary_op], feed_dict)
self.writer.add_summary(summary_str, count)
count += 1
def testTrainWithNoneAsLogdirWhenUsingSummariesRaisesError(self):
with tf.Graph().as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
tf.scalar_summary('total_loss', total_loss)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
summary_op = tf.merge_all_summaries()
with self.assertRaises(ValueError):
slim.learning.train(
train_op, None, number_of_steps=300, summary_op=summary_op)
def __init__(self, config, environment, optimizer, lr_op):
super(Agent, self).__init__(config)
self.weight_dir = 'weights'
self.env = environment
self.history = History(self.config)
self.lr_op = lr_op
self.optimizer = optimizer
self.step_op = tf.Variable(0, trainable=False, name='step')
self.step_inc_op = self.step_op.assign_add(1, use_locking=True)
self.build_dqn()
self.saver = tf.train.Saver(self.w.values() + [self.step_op], max_to_keep=30)
self.summary_op = tf.merge_all_summaries()
self.init_op = tf.initialize_all_variables()
def __init__(self,env):
# experience replay
self.replay_buffer = deque()
# initialize parameters
self.time_step = 0
self.epsilon = INITIAL_EPSILON
self.action_dim = 3
self.create_Q_network()
self.create_training_method()
# create session
self.t_session = tf.InteractiveSession()
self.R = tf.placeholder("float", shape = None)
self.T = tf.placeholder("float", shape = None)
R_summ = tf.scalar_summary(tags = "testing_reward", values = self.R)
T_summ = tf.scalar_summary(tags = "training_reward", values = self.T)
self.merged_summ = tf.merge_all_summaries()
self.writer = tf.train.SummaryWriter(logdir = "/home/airchen/Documents/coding/stock", graph = self.t_session.graph) # The logdir is the directory you want to log your tensorboard event files, please feel free to change it, and remember you want to always add: /home/USERNAME/ before the directory.
self.t_session.run(tf.initialize_all_variables())
def __init__(self,env):
# experience replay
self.replay_buffer = deque()
# initialize parameters
self.epsilon = INITIAL_EPSILON
self.action_dim = 3 # Totally three actions
self.create_Q_network()
self.create_training_method()
# create session, used for launching tensorflow and tensorboard
self.t_session = tf.InteractiveSession()
self.R = tf.placeholder("float", shape = None)
self.T = tf.placeholder("float", shape = None)
R_summ = tf.scalar_summary(tags = "testing_reward", values = self.R)
T_summ = tf.scalar_summary(tags = "training_reward", values = self.T)
self.merged_summ = tf.merge_all_summaries()
self.writer = tf.train.SummaryWriter(logdir = "/home/airchen/Documents/coding/stock", graph = self.t_session.graph) # The logdir is the directory you want to log your tensorboard event files, please feel free to change it, and remember you want to always add: /home/USERNAME/ before the directory.
self.t_session.run(tf.initialize_all_variables())
def _add_train_op(self):
params = self._params
self._lr_rate = tf.maximum(
params.min_lr,
tf.train.exponential_decay(params.lr, self._global_step, 30000, 0.98))
tvars = tf.trainable_variables()
# use reserved gpu for gradient computation
with tf.device(self._get_gpu(self._num_gpus-1)):
grads, global_norm = tf.clip_by_global_norm(
tf.gradients(self._loss, tvars), params.max_grad_norm)
tf.scalar_summary('global_norm', global_norm)
optimizer = tf.train.AdamOptimizer(self._lr_rate)
tf.scalar_summary('learning rate', self._lr_rate)
with tf.device(self._next_device()):
self._train_op = optimizer.apply_gradients(
zip(grads, tvars), global_step=self._global_step, name='train_step')
self._summaries = tf.merge_all_summaries()
return self._train_op, self._loss,
def _initialize_tf_utilities_and_ops(self, restore_previous_model):
""" Initialize TensorFlow operations: summaries, init operations, saver, summary_writer.
Restore a previously trained model if the flag restore_previous_model is true.
"""
self.tf_merged_summaries = tf.merge_all_summaries()
init_op = tf.initialize_all_variables()
self.tf_saver = tf.train.Saver()
self.tf_session.run(init_op)
if restore_previous_model:
self.tf_saver.restore(self.tf_session, self.model_path)
self.tf_summary_writer = tf.train.SummaryWriter(self.summary_dir, self.tf_session.graph_def)
def setup_summaries(self):
episode_reward = tf.Variable(0.)
s1 = tf.scalar_summary("Episode Reward " + str(self.actor_id), episode_reward)
if self.alg_type == "a3c":
summary_vars = [episode_reward]
else:
episode_ave_max_q = tf.Variable(0.)
s2 = tf.scalar_summary("Max Q Value " + str(self.actor_id), episode_ave_max_q)
logged_epsilon = tf.Variable(0.)
s3 = tf.scalar_summary("Epsilon " + str(self.actor_id), logged_epsilon)
summary_vars = [episode_reward, episode_ave_max_q, logged_epsilon]
summary_placeholders = [tf.placeholder("float") for _ in range(len(summary_vars))]
update_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in range(len(summary_vars))]
with tf.control_dependencies(update_ops):
summary_ops = tf.merge_all_summaries()
return summary_placeholders, update_ops, summary_ops
def initialize_session(sess, task_params):
if task_params['verbose']:
print("Initalizing tensorflow session ...")
saver = tf.train.Saver()
if task_params['restore_from_checkpoint']:
saver.restore(
sess=sess,
save_path=task_params['save_path'])
if task_params['verbose']:
print("Restoring variables from '{}'".format(task_params['save_path']))
else:
sess.run(tf.initialize_all_variables())
sess.run(tf.initialize_local_variables())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
logdir=task_params['summaries_dir'] + '/train_' + time.strftime("%Y%m%d_%H-%M-%S")
train_writer = tf.train.SummaryWriter(logdir=logdir, graph=sess.graph)
summaries = tf.merge_all_summaries()
return coord, threads, saver, train_writer, summaries
def train(self, config):
start_time = time.time()
merged_sum = tf.merge_all_summaries()
writer = tf.train.SummaryWriter("./logs", self.sess.graph_def)
tf.initialize_all_variables().run()
self.load(self.checkpoint_dir)
for epoch in range(self.epoch):
epoch_loss = 0.
for idx, x in enumerate(self.reader.next_batch()):
_, loss, e_loss, g_loss, summary_str = self.sess.run(
[self.optim, self.loss, self.e_loss, self.g_loss, merged_sum], feed_dict={self.x: x})
epoch_loss += loss
if idx % 10 == 0:
print("Epoch: [%2d] [%4d/%4d] time: %4.4f, loss: %.8f, e_loss: %.8f, g_loss: %.8f" \
% (epoch, idx, self.reader.batch_cnt, time.time() - start_time, loss, e_loss, g_loss))
if idx % 2 == 0:
writer.add_summary(summary_str, step)
if idx != 0 and idx % 1000 == 0:
self.save(self.checkpoint_dir, step)
def initialize(self, log_dir="./logs"):
self.merged_sum = tf.merge_all_summaries()
self.writer = tf.train.SummaryWriter(log_dir, self.sess.graph_def)
tf.initialize_all_variables().run()
self.load(self.checkpoint_dir)
start_iter = self.step.eval()
def _build_graph(self):
"""Build a whole graph for the model."""
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self._build_model()
if self.mode == 'train':
self._build_train_op()
#self.summaries = tf.merge_all_summaries()
def restore(self, ckpt_file='/tmp/rlflow/model.ckpt'):
"""
Restore state from a file
"""
self.saver.restore(self.sess, ckpt_file)
# if '-' in ckpt_file[ckpt_file.rfind('.ckpt'):]:
# last_step = int(ckpt_file[ckpt_file.find('-')+1:])
# self.step = last_step
print("Session restored from file: %s" % ckpt_file)
# def build_summary_ops(self, verbose=3):
# """
# Build summary ops for activations, gradients, reward, q values,
# values estimates, etc
# Create summaries with `verbose` level
# """
# if verbose >= 3:
# # Summarize activations
# activations = tf.get_collection(tf.GraphKeys.ACTIVATIONS)
# tflearn.summarize_activations(activations, RLAlgorithm.SUMMARY_COLLECTION_NAME)
# if verbose >= 2:
# # Summarize variable weights
# tflearn.summarize_variables(tf.trainable_variables(), RLAlgorithm.SUMMARY_COLLECTION_NAME)
# if verbose >= 1:
# # summarize reward
# episode_reward = tf.Variable(0., trainable=False)
# self.episode_reward_summary = scalar_summary("Reward", episode_reward, collections=RLAlgorithm.SUMMARY_COLLECTION_NAME)
# self.episode_reward_placeholder = tf.placeholder("float")
# self.episode_reward_op = episode_reward.assign(self.episode_reward_placeholder)
# tf.add_to_collection(RLAlgorithm.SUMMARY_COLLECTION_NAME, self.episode_reward_summary)
#
# # Summarize gradients
# # tflearn.summarize_gradients(self.grads_and_vars, summ_collection)
#
# if len(tf.get_collection(RLAlgorithm.SUMMARY_COLLECTION_NAME)) != 0:
# self.summary_op = merge_all_summaries(key=RLAlgorithm.SUMMARY_COLLECTION_NAME)
def init_common(self):
# initialize variables common to training and testing
self.t = 0
self.learning_step = 0
self.replay = []
self.losses = []
self.games = []
self.q_t = None
self.s_t = None
self.a_t = None
self.r_t = 0
self.s_t1 = None
self.q_t1 = None
self.terminal = False
self.test_mode = False
self.baseline = False
# enable logging
self.q_train.summaries = self.q_target.summaries = self.summaries = tf.merge_all_summaries()
def __init__(self,
screen_width,
screen_height,
num_channels,
num_actions,
metrics_directory,
batched_forward_pass_size,
hyperparameters=QNetworkHyperparameters()):
self.logger = logging.getLogger(__name__)
self.screen_width = screen_width
self.screen_height = screen_height
self.num_channels = num_channels
self.num_actions = num_actions
self.batched_forward_pass_size = batched_forward_pass_size
self.hyperparameters = hyperparameters
self.tf_graph = tf.Graph()
self.tf_graph_forward_pass_bundle_single = self._build_graph_forward_pass_bundle(self.tf_graph, 1)
self.tf_graph_forward_pass_bundle_batched = self._build_graph_forward_pass_bundle(self.tf_graph, batched_forward_pass_size)
self.tf_graph_train_bundle = self._build_graph_train_bundle(self.tf_graph)
self.tf_session = tf.Session(graph=self.tf_graph)
with self.tf_graph.as_default():
self.tf_all_summaries = tf.merge_all_summaries()
self.tf_summary_writer = tf.train.SummaryWriter(logdir=metrics_directory, graph=self.tf_graph)
self.tf_saver = tf.train.Saver()
tf.initialize_all_variables().run(session=self.tf_session)
self.assigns_train_to_forward_pass_variables = self._build_assigns_train_to_forward_pass_variables()
def add_visualize_node(self):
# Merge all the summaries and write them out to /tmp/mnist_logs (by default)
self.merged = tf.merge_all_summaries()
self.train_writer = tf.train.SummaryWriter(self.summaries_dir+ '/train',
self.graph)
self.test_writer = tf.train.SummaryWriter(self.summaries_dir + '/test')
return
def add_visualize_node(self):
# Merge all the summaries and write them out to /tmp/mnist_logs (by default)
self.merged = tf.merge_all_summaries()
self.train_writer = tf.train.SummaryWriter(self.summaries_dir+ '/train',
self.graph)
self.test_writer = tf.train.SummaryWriter(self.summaries_dir + '/test')
return
def initialize_network(self, logs_dir):
print("Initializing network...")
self.logs_dir = logs_dir
self.sess = tf.Session()
self.summary_op = tf.merge_all_summaries()
self.saver = tf.train.Saver()
self.summary_writer = tf.train.SummaryWriter(self.logs_dir, self.sess.graph)
self.sess.run(tf.initialize_all_variables())
ckpt = tf.train.get_checkpoint_state(self.logs_dir)
if ckpt and ckpt.model_checkpoint_path:
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
print("Model restored...")
self.coord = tf.train.Coordinator()
self.threads = tf.train.start_queue_runners(self.sess, self.coord)
def evaluate():
"""Eval for a number of steps."""
with tf.Graph().as_default() as g:
# Get images and labels.
images, labels = architecture.inputs(phase=FLAGS.phase)
# Build a Graph that computes the logits predictions from the
# inference model.
logits = architecture.inference(images, train=False)
# adapt logits
logits = tf.reshape(logits, (-1, NUM_CLASSES))
epsilon = tf.constant(value=1e-4)
logits = logits + epsilon
# predict
predictions = tf.argmax(logits, dimension=1)
labels = tf.cast(tf.reshape(labels, shape=predictions.get_shape()), dtype=tf.int64)
# compute accuracy
correct_predictions = tf.equal(predictions, labels)
accuracy = tf.reduce_mean(tf.cast(correct_predictions, dtype=tf.float32))
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
architecture.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, g)
tf.initialize_all_variables()
while True:
eval_once(saver, summary_writer, accuracy, summary_op)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
def _set_model(self, model):
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF
self.model = model
self.sess = KTF.get_session()
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
tf.histogram_summary(weight.name, weight)
if self.write_images:
w_img = tf.squeeze(weight)
shape = w_img.get_shape()
if len(shape) > 1 and shape[0] > shape[1]:
w_img = tf.transpose(w_img)
if len(shape) == 1:
w_img = tf.expand_dims(w_img, 0)
w_img = tf.expand_dims(tf.expand_dims(w_img, 0), -1)
tf.image_summary(weight.name, w_img)
if hasattr(layer, 'output'):
tf.histogram_summary('{}_out'.format(layer.name),
layer.output)
self.merged = tf.merge_all_summaries()
if self.write_graph:
if parse_version(tf.__version__) >= parse_version('0.8.0'):
self.writer = tf.train.SummaryWriter(self.log_dir,
self.sess.graph)
else:
self.writer = tf.train.SummaryWriter(self.log_dir,
self.sess.graph_def)
else:
self.writer = tf.train.SummaryWriter(self.log_dir)
def setup_models(self, hidden_layer_size, summary_file):
# setup the seperate core and target networks
self.core_state, self.core_q_values = build_model("core", self.state_size, self.num_actions, hidden_layer_size)
self.target_state, self.target_q_values = build_model("target", self.state_size, self.num_actions, hidden_layer_size)
# build the global copy op that will copy core network onto target
self.clobber_target_net_op = copy_all_vars(from_namespace="core", to_namespace="target",
affine_coefficient=self.target_network_update_coeff)
# left hand side of the bellman update; Q(s1, a)
self.core_action_mask = tf.placeholder(dtype=tf.float32, shape=[None, self.num_actions],
name="core_action_mask")
self.core_q_value_for_action = tf.reduce_sum(self.core_q_values * self.core_action_mask)
# right hand side of bellman update; reward + max_a Q(s2, a')
self.reward = tf.placeholder(dtype=tf.float32, name="reward")
self.discount_p = tf.placeholder(dtype=tf.float32, name="discount")
self.max_target_q_value_plus_reward = self.reward + (self.discount_p * tf.stop_gradient(tf.reduce_max(self.target_q_values)))
# for loss just use squared loss on the difference
self.temporal_difference_loss = tf.reduce_mean(tf.pow(self.max_target_q_value_plus_reward - self.core_q_value_for_action, 2))
self.learning_rate_p = tf.placeholder(dtype=tf.float32, name="learning_rate")
optimizer = tf.train.GradientDescentOptimizer(self.learning_rate_p)
#optimizer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate, decay=0.9)
gradients = optimizer.compute_gradients(self.temporal_difference_loss)
for i, (gradient, variable) in enumerate(gradients):
if gradient is None: # eg stop gradient cases
continue
gradients[i] = (tf.clip_by_norm(gradient, self.gradient_clip), variable)
tf.histogram_summary(variable.name, variable)
tf.histogram_summary(variable.name + '/gradients', gradient)
tf.scalar_summary("temporal_difference_loss", self.temporal_difference_loss)
self.train_op = optimizer.apply_gradients(gradients)
# build session
self.sess = tf.Session()
self.sess.run(tf.initialize_all_variables())
self.summaries = tf.merge_all_summaries()
self.summary_writer = tf.train.SummaryWriter(summary_file, self.sess.graph_def)
def initialize_graph(self, input_dim):
self.input_dim = input_dim
self._setup_base_graph()
with self.graph.as_default():
self.sess = tf.Session()
self.init_op = tf.initialize_all_variables()
self.summary = tf.merge_all_summaries()
self.sess.run(self.init_op)
self.initialized = True
def evaluate():
"""Eval CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
# Get images and labels for CIFAR-10.
eval_data = FLAGS.eval_data == 'test'
images, labels = cifar10.inputs(eval_data=eval_data)
# images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate predictions.
top_k_op = tf.nn.in_top_k(logits, labels, 3)
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
graph_def = tf.get_default_graph().as_graph_def()
summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
graph_def=graph_def)
while True:
eval_once(saver, summary_writer, top_k_op, summary_op)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
def evaluate():
"""Eval CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
# Get images and labels for CIFAR-10.
eval_data = FLAGS.eval_data == 'test'
images, labels = cifar10.inputs(eval_data=eval_data)
# images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images, eval=True)
# Calculate predictions.
top_k_op = tf.nn.in_top_k(logits, labels, 3)
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
graph_def = tf.get_default_graph().as_graph_def()
summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
graph_def=graph_def)
while True:
eval_once(saver, summary_writer, top_k_op, summary_op)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
def evaluate():
"""Eval CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
# Get images and labels for CIFAR-10.
eval_data = FLAGS.eval_data == 'test'
images, labels = cifar10.inputs(eval_data=eval_data)
# images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate predictions.
top_k_op = tf.nn.in_top_k(logits, labels, 1)
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
graph_def = tf.get_default_graph().as_graph_def()
summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
graph_def=graph_def)
while True:
eval_once(saver, summary_writer, top_k_op, summary_op)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
def train():
with tf.Graph().as_default():
global_step = tf.Variable(0, trainable=False)
image, label = input.get_input(LABEL_PATH, LABEL_FORMAT, IMAGE_PATH, IMAGE_FORMAT)
logits = model.inference(image)
loss = model.loss(logits, label)
train_op = model.train(loss, global_step)
saver = tf.train.Saver(tf.all_variables())
summary_op = tf.merge_all_summaries()
init = tf.initialize_all_variables()
sess = tf.Session(config=tf.ConfigProto(log_device_placement=input.FLAGS.log_device_placement))
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.train.SummaryWriter(input.FLAGS.train_dir, graph_def=sess.graph_def)
for step in xrange(input.FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 1 == 0:
num_examples_per_step = input.FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)')
print (format_str % (datetime.now(), step, loss_value, examples_per_sec, sec_per_batch))
if step % 10 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 25 == 0:
checkpoint_path = os.path.join(input.FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
def evaluate():
"""Eval CIFAR-10 for a number of steps."""
with tf.Graph().as_default() as g:
# Get images and labels for CIFAR-10.
eval_data = FLAGS.eval_data == 'test'
images, labels = cifar10_input.inputs(eval_data, FLAGS.data_dir, FLAGS.batch_size)
# Build a Graph that computes the logits predictions from the
# inference model.
logits = inference(images)
# Calculate predictions.
top_k_op = tf.nn.in_top_k(logits, labels, 1)
# Restore the moving average version of the learned variables for eval.
#variable_averages = tf.train.ExponentialMovingAverage(
# cifar10.MOVING_AVERAGE_DECAY)
#variables_to_restore = variable_averages.variables_to_restore()
#saver = tf.train.Saver(variables_to_restore)
saver = tf.train.Saver()
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, g)
while True:
eval_once(saver, summary_writer, top_k_op, summary_op)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)