def scalar_summary(self, tag, value, step):
summary = tf.Summary(value = [tf.Summary.Value(tag = tag, simple_value = value)])
self.writer.add_summary(summary, step)
self.writer.flush()
python类Summary()的实例源码
def histo_summary(self, tag, values, step, bins = 1000):
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values ** 2))
counts, edges = np.histogram(values, bins = bins)
for edge in edges[1:]:
hist.bucket_limit.append(edge)
for count in counts:
hist.bucket.append(count)
summary = tf.Summary(value = [tf.Summary.Value(tag = tag, histo = hist)])
self.writer.add_summary(summary, step)
self.writer.flush()
def make_summary(value_dict):
"""Make tf.Summary for tensorboard"""
return tf.Summary(value=[tf.Summary.Value(tag=k, simple_value=v) for k, v in value_dict.items()])
def summary(value_dict, global_step, writer):
"""Make tf.Summary for tensorboard"""
summary = tf.Summary(value=[tf.Summary.Value(tag=k, simple_value=v) for k, v in value_dict.items()])
writer.add_summary(summary, global_step)
return None
def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
def histo_summary(self, tag, values, step, bins=1000):
"""Log a histogram of the tensor of values."""
# Create a histogram using numpy
counts, bin_edges = np.histogram(values, bins=bins)
# Fill the fields of the histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values**2))
# Drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush()
def log_scalar(self, tag, value, step):
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
def log_histogram(self, tag, values, step, bins=1000):
# Create histogram using numpy
values = np.array(values)
counts, bin_edges = np.histogram(values, bins=bins)
# Fill fields of histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(np.shape(values)))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values ** 2))
# Requires equal number as bins, where the first goes from -DBL_MAX to bin_edges[1]
# See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/summary.proto#L30
# Thus, we drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush()
def _evaluate_on_validation(self, get_val_instance_generator,
batch_size,
num_val_steps,
session):
val_batch_gen = DataManager.get_batch_generator(
get_val_instance_generator, batch_size)
# Calculate the mean of the validation metrics
# over the validation set.
val_accuracies = []
val_losses = []
for val_batch in tqdm(val_batch_gen,
total=num_val_steps,
desc="Validation Batches Completed",
leave=False):
feed_dict = self._get_validation_feed_dict(val_batch)
val_batch_acc, val_batch_loss = session.run(
[self.accuracy, self.loss],
feed_dict=feed_dict)
val_accuracies.append(val_batch_acc)
val_losses.append(val_batch_loss)
# Take the mean of the accuracies and losses.
# TODO/FIXME this assumes each batch is same shape, which
# is not necessarily true.
mean_val_accuracy = np.mean(val_accuracies)
mean_val_loss = np.mean(val_losses)
# Create a new Summary object with mean_val accuracy
# and mean_val_loss and add it to Tensorboard.
val_summary = tf.Summary(value=[
tf.Summary.Value(tag="val_summaries/loss",
simple_value=mean_val_loss),
tf.Summary.Value(tag="val_summaries/accuracy",
simple_value=mean_val_accuracy)])
return mean_val_accuracy, mean_val_loss, val_summary
def Evaluate(sess):
test_acc = 0.0
test_loss = 0.0
test_pre_index = 0
add = 1000
for it in range(test_iteration):
test_batch_x = test_x[test_pre_index: test_pre_index + add]
test_batch_y = test_y[test_pre_index: test_pre_index + add]
test_pre_index = test_pre_index + add
test_feed_dict = {
x: test_batch_x,
label: test_batch_y,
learning_rate: epoch_learning_rate,
training_flag: False
}
loss_, acc_ = sess.run([cost, accuracy], feed_dict=test_feed_dict)
test_loss += loss_ / 10.0
test_acc += acc_ / 10.0
summary = tf.Summary(value=[tf.Summary.Value(tag='test_loss', simple_value=test_loss),
tf.Summary.Value(tag='test_accuracy', simple_value=test_acc)])
return test_acc, test_loss, summary
def MakeSummary(name, value):
"""Creates a tf.Summary proto with the given name and value."""
summary = tf.Summary()
val = summary.value.add()
val.tag = str(name)
val.simple_value = float(value)
return summary
def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
def histo_summary(self, tag, values, step, bins=1000):
"""Log a histogram of the tensor of values."""
# Create a histogram using numpy
counts, bin_edges = np.histogram(values, bins=bins)
# Fill the fields of the histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values ** 2))
# Drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush()
def epoch_begin(self):
self.epoch_id = self.gstep_val // self.train.nbatches
summary_val = tf.Summary.Value(tag='control_flow/Epoch',
simple_value=self.epoch_id + 1)
summary = tf.Summary(value=[summary_val])
self.sv.summary_computed(self.sess, summary,
global_step=self.epoch_id)
self.pbar = tqdm(total=self.train.nbatches,
bar_format='{n_fmt}/{total_fmt}{desc}'
'{percentage:3.0f}%|{bar}| '
'[{elapsed}<{remaining},'
'{rate_fmt}{postfix}]')
def test_finalize_stats_summaries(self):
p = plan.Plan(None)
p.save_summaries_secs = 42
p.losses['foo'] = tf.constant([1.0])
p.losses['bar'] = tf.constant([2.0, 3.0])
p.metrics['baz'] = tf.constant(4)
p.metrics['qux'] = tf.constant([5.0, 6.0])
p.finalize_stats()
with self.test_session():
self.assertEqual(6, p.loss_total.eval({p.batch_size_placeholder: 1}))
summary = tf.Summary()
summary.ParseFromString(p.summaries.eval({p.batch_size_placeholder: 1}))
qux_string = tf.summary.histogram('qux', [5, 6]).eval()
qux_proto = tf.Summary()
qux_proto.ParseFromString(qux_string)
qux_histogram = qux_proto.value[0].histo
expected_values = [
tf.Summary.Value(tag='foo', simple_value=1),
tf.Summary.Value(tag='bar', simple_value=5),
tf.Summary.Value(tag='loss_total', simple_value=6),
tf.Summary.Value(tag='baz', simple_value=4),
tf.Summary.Value(tag='qux', histo=qux_histogram)]
six.assertCountEqual(self, expected_values, summary.value)
summary.ParseFromString(p.summaries.eval({p.batch_size_placeholder: 2}))
expected_values = [
tf.Summary.Value(tag='foo', simple_value=0.5),
tf.Summary.Value(tag='bar', simple_value=2.5),
tf.Summary.Value(tag='loss_total', simple_value=3),
tf.Summary.Value(tag='baz', simple_value=4),
tf.Summary.Value(tag='qux', histo=qux_histogram)]
six.assertCountEqual(self, expected_values, summary.value)
def emit_values(supervisor, session, step, values):
summary = tf.Summary()
for name, value in six.iteritems(values):
summary_value = summary.value.add()
summary_value.tag = name
summary_value.simple_value = float(value)
supervisor.summary_computed(session, summary, global_step=step)
def add_summary_value(writer, key, value, iteration):
summary = tf.Summary(value=[tf.Summary.Value(tag=key, simple_value=value)])
writer.add_summary(summary, iteration)
def MakeSummary(name, value):
"""Creates a tf.Summary proto with the given name and value."""
summary = tf.Summary()
val = summary.value.add()
val.tag = str(name)
val.simple_value = float(value)
return summary
def log_scalar(self, tag, value, step):
"""
Parameter
----------
tag : basestring
Name of the scalar
value
step : int
training iteration
"""
summary = tf.Summary(value=[tf.Summary.Value(tag=tag,
simple_value=value)])
self.writer.add_summary(summary, step)
def log_histogram(self, tag, values, step, bins=1000):
"""Logs the histogram of a list/vector of values."""
# Create histogram using numpy
counts, bin_edges = np.histogram(values, bins=bins)
# Fill fields of histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values**2))
# Requires equal number as bins, where the first goes from -DBL_MAX to bin_edges[1]
# See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/summary.proto#L30
# Thus, we drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush()
def close(self):
"""Close the simulation, freeing resources.
Notes
-----
The simulation cannot be restarted after it is closed. This is not a
technical limitation, just a design decision made for all Nengo
simulators.
"""
if not self.closed:
# note: we use getattr in case it crashes before the object is
# created
if getattr(self, "sess", None) is not None:
self.sess.close()
self.sess = None
if getattr(self, "summary", None) is not None:
self.summary.close()
self.closed = True
def learn_actor(self, s, x_ma, epoch): # batch update
_, police_grads = self.sess.run(self.train_ops, feed_dict={S: s, X_MA: x_ma})
# the following method for soft replace target params is computational expansive
# target_params = (1-tau) * target_params + tau * eval_params
# self.sess.run([tf.assign(t, (1 - self.tau) * t + self.tau * e) for t, e in zip(self.t_params, self.e_params)])
summary = tf.Summary()
# summary.value.add(tag='info/c_gradient{}'.format(self.agent_id), simple_value=float(_c_grad))
summary.value.add(tag='info/police_grads{}'.format(self.agent_id), simple_value=np.mean([np.mean(_) for _ in police_grads]))
writer.add_summary(summary, epoch)
writer.flush()
# instead of above method, I use a hard replacement here
if self.t_replace_counter % self.t_replace_iter == 0:
self.sess.run([tf.assign(t, e) for t, e in zip(self.t_params, self.e_params)])
self.t_replace_counter += 1
def learn_actor(self, s, x_ma, epoch): # batch update
_, police_grads = self.sess.run(self.train_ops, feed_dict={S: s, X_MA: x_ma})
# the following method for soft replace target params is computational expansive
# target_params = (1-tau) * target_params + tau * eval_params
# self.sess.run([tf.assign(t, (1 - self.tau) * t + self.tau * e) for t, e in zip(self.t_params, self.e_params)])
summary = tf.Summary()
# summary.value.add(tag='info/c_gradient{}'.format(self.agent_id), simple_value=float(_c_grad))
summary.value.add(tag='info/police_grads{}'.format(self.agent_id), simple_value=np.mean([np.mean(_) for _ in police_grads]))
writer.add_summary(summary, epoch)
writer.flush()
# instead of above method, I use a hard replacement here
if self.t_replace_counter % self.t_replace_iter == 0:
self.sess.run([tf.assign(t, e) for t, e in zip(self.t_params, self.e_params)])
self.t_replace_counter += 1
__main__.py 文件源码
项目:Neural-Architecture-Search-with-RL
作者: dhruvramani
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def __init__(self, config):
self.config = config
self.data = DataSet(self.config)
self.add_placeholders()
self.summarizer = tf.summary
self.net = Network(config)
self.saver = tf.train.Saver()
self.epoch_count, self.second_epoch_count = 0, 0
self.outputs, self.prob = self.net.neural_search()
self.hyperparams = self.net.gen_hyperparams(self.outputs)
self.hype_list = [1 for i in range(self.config.hyperparams)] #[7, 7, 24, 5, 5, 36, 3, 3, 48, 64]
self.reinforce_loss = self.net.REINFORCE(self.prob)
self.tr_cont_step = self.net.train_controller(self.reinforce_loss, self.val_accuracy)
self.cNet, self.y_pred = self.init_child(self.hype_list)
self.cross_loss, self.accuracy, self.tr_model_step = self.grow_child()
self.init = tf.global_variables_initializer()
self.local_init = tf.local_variables_initializer()
def tf_log(summary, collection=SCALAR_SUMMARIES):
"""Add tf.summary object to collection named collection"""
tf.add_to_collection(collection, summary)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
if self.model.validation_data and self.histogram_freq:
if epoch % self.histogram_freq == 0:
# TODO: implement batched calls to sess.run
# (current call will likely go OOM on GPU)
if self.model.uses_learning_phase:
cut_v_data = len(self.model.inputs)
val_data = self.model.validation_data[:cut_v_data] + [0]
tensors = self.model.inputs + [K.learning_phase()]
else:
val_data = self.model.validation_data
tensors = self.model.inputs
feed_dict = dict(zip(tensors, val_data))
result = self.sess.run([self.merged], feed_dict=feed_dict)
summary_str = result[0]
self.writer.add_summary(summary_str, epoch)
for name, value in logs.items():
if name in ['batch', 'size']:
continue
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.writer.add_summary(summary, epoch)
self.writer.flush()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
if self.model.validation_data and self.histogram_freq:
if epoch % self.histogram_freq == 0:
# TODO: implement batched calls to sess.run
# (current call will likely go OOM on GPU)
if self.model.uses_learning_phase:
cut_v_data = len(self.model.inputs)
val_data = self.model.validation_data[:cut_v_data] + [0]
tensors = self.model.inputs + [K.learning_phase()]
else:
val_data = self.model.validation_data
tensors = self.model.inputs
feed_dict = dict(zip(tensors, val_data))
result = self.sess.run([self.merged], feed_dict=feed_dict)
summary_str = result[0]
self.writer.add_summary(summary_str, epoch)
for name, value in logs.items():
if name in ['batch', 'size']:
continue
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.writer.add_summary(summary, epoch)
self.writer.flush()
def visualize(self, net_inputs, net_outputs):
# input a batch of training examples of form [Tensor1, Tensor2, ... Tensor_n]
# net_inputs: usually images; net_outputs: usually the labels
# this function visualize the data that is read in, do not return anything but use tf.summary
# visualize the video using multiple images
# their is no way to visualize time sequence now, so isvalid and isstop couldn't be visualized
if not FLAGS.no_image_input:
decoded = net_inputs[0]
visualize = tf.cast(decoded[0,:,:,:,:], tf.uint8)
tf.image_summary("video_seq", visualize, max_images=FLAGS.n_sub_frame)
def visualize(self, net_inputs, net_outputs):
# input a batch of training examples of form [Tensor1, Tensor2, ... Tensor_n]
# net_inputs: usually images; net_outputs: usually the labels
# this function visualize the data that is read in, do not return anything but use tf.summary
raise NotImplemented()
def learn_critic(self, x_ma, a_ma, r, x2_ma, s, a, s2, epoch=0):
# ATTENTION!!!!
# the key point is that we use constant a_ma to replace critic's tensor: self.a_ma
# here we must replace this tensor, otherwise whole network crash
# because critic must use constant a_ma to do gradient,
# while actor must use its network tensor a_ma to do gradient
# this is the trick!!
_c_grad, _c_loss, _a_grads = self.sess.run(
self.train_ops, feed_dict={X_MA: x_ma, self.a_ma: a_ma,
R: r, X2_MA: x2_ma,
S: s, S2: s2})
summary = tf.Summary()
# summary.value.add(tag='info/c_gradient{}'.format(self.agent_id),
# simple_value=float(_c_grad))
summary.value.add(tag='info/c_loss{}'.format(self.agent_id), simple_value=float(_c_loss))
writer.add_summary(summary, epoch)
writer.flush()
# the following method for soft replace target params is computational expansive
# target_params = (1-tau) * target_params + tau * eval_params
# self.sess.run([tf.assign(t, (1 - self.tau) * t + self.tau * e) for t, e in zip(self.t_params, self.e_params)])
# instead of above method, we use a hard replacement here
if self.t_replace_counter % self.t_replace_iter == 0:
self.sess.run([tf.assign(t, e) for t, e in zip(self.t_params, self.e_params)])
self.t_replace_counter += 1
# ------------------- Memory -------------------