def build_model(self):
self.q = tf.placeholder(tf.float32, [self.reader.vocab_size], name="question")
self.a = tf.placeholder(tf.float32, [self.reader.vocab_size], name="answer")
self.build_encoder()
self.build_decoder()
# Kullback Leibler divergence
self.e_loss = -0.5 * tf.reduce_sum(1 + self.log_sigma_sq - tf.square(self.mu) - tf.exp(self.log_sigma_sq))
# Log likelihood
self.g_loss = tf.reduce_sum(tf.log(self.p_x_i))
self.loss = tf.reduce_mean(self.e_loss + self.g_loss)
self.optim = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(-self.loss)
_ = tf.scalar_summary("encoder loss", self.e_loss)
_ = tf.scalar_summary("decoder loss", self.g_loss)
_ = tf.scalar_summary("loss", self.loss)
python类scalar_summary()的实例源码
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
# tf.histogram_summary(tensor_name + '/activations', x)
tf.summary.histogram(tensor_name + '/activations', x)
# tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
# tf.histogram_summary(tensor_name + '/activations', x)
tf.summary.histogram(tensor_name + '/activations', x)
# tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def get_loss(pred, label, end_points, reg_weight=0.001):
""" pred: BxNxC,
label: BxN, """
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.scalar_summary('classify loss', classify_loss)
# Enforce the transformation as orthogonal matrix
transform = end_points['transform'] # BxKxK
K = transform.get_shape()[1].value
mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1]))
mat_diff -= tf.constant(np.eye(K), dtype=tf.float32)
mat_diff_loss = tf.nn.l2_loss(mat_diff)
tf.scalar_summary('mat_loss', mat_diff_loss)
return classify_loss + mat_diff_loss * reg_weight
def define_summaries(self):
'''Helper function for init_opt'''
all_sum = {'g': [], 'd': [], 'hr_g': [], 'hr_d': [], 'hist': []}
for k, v in self.log_vars:
if k.startswith('g'):
all_sum['g'].append(tf.scalar_summary(k, v))
elif k.startswith('d'):
all_sum['d'].append(tf.scalar_summary(k, v))
elif k.startswith('hr_g'):
all_sum['hr_g'].append(tf.scalar_summary(k, v))
elif k.startswith('hr_d'):
all_sum['hr_d'].append(tf.scalar_summary(k, v))
elif k.startswith('hist'):
all_sum['hist'].append(tf.histogram_summary(k, v))
self.g_sum = tf.merge_summary(all_sum['g'])
self.d_sum = tf.merge_summary(all_sum['d'])
self.hr_g_sum = tf.merge_summary(all_sum['hr_g'])
self.hr_d_sum = tf.merge_summary(all_sum['hr_d'])
self.hist_sum = tf.merge_summary(all_sum['hist'])
def compute_cost(self):
losses = tf.nn.seq2seq.sequence_loss_by_example(
[tf.reshape(self.pred, [-1], name='reshape_pred')],
[tf.reshape(self.ys, [-1], name='reshape_target')],
[tf.ones([self.batch_size * self.n_steps], dtype=tf.float32)],
average_across_timesteps=True,
softmax_loss_function=self.ms_error,
name='losses'
)
with tf.name_scope('average_cost'):
self.cost = tf.div(
tf.reduce_sum(losses, name='losses_sum'),
self.batch_size,
name='average_cost')
tf.scalar_summary('cost', self.cost)
def _gan_loss(self, logits_real, logits_fake, feature_real, feature_fake, use_features=False):
discriminator_loss_real = self._cross_entropy_loss(logits_real, tf.ones_like(logits_real),
name="disc_real_loss")
discriminator_loss_fake = self._cross_entropy_loss(logits_fake, tf.zeros_like(logits_fake),
name="disc_fake_loss")
self.discriminator_loss = discriminator_loss_fake + discriminator_loss_real
gen_loss_disc = self._cross_entropy_loss(logits_fake, tf.ones_like(logits_fake), name="gen_disc_loss")
if use_features:
gen_loss_features = tf.reduce_mean(tf.nn.l2_loss(feature_real - feature_fake)) / (self.crop_image_size ** 2)
else:
gen_loss_features = 0
self.gen_loss = gen_loss_disc + 0.1 * gen_loss_features
tf.scalar_summary("Discriminator_loss", self.discriminator_loss)
tf.scalar_summary("Generator_loss", self.gen_loss)
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name +' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
policy_gradient_actor_critic.py 文件源码
项目:RFR-solution
作者: baoblackcoal
项目源码
文件源码
阅读 34
收藏 0
点赞 0
评论 0
def policy_gradient():
with tf.variable_scope("policy"):
params = tf.get_variable("policy_parameters", [4, 2])
state = tf.placeholder("float", [None, 4])
actions = tf.placeholder("float", [None, 2])
advantages = tf.placeholder("float", [None, 1])
reward_input = tf.placeholder("float")
episode_reward = tf.get_variable("episode_reward", initializer=tf.constant(0.))
episode_reward = reward_input
linear = tf.matmul(state, params)
probabilities = tf.nn.softmax(linear)
good_probabilities = tf.reduce_sum(tf.mul(probabilities, actions), reduction_indices=[1])
eligibility = tf.log(good_probabilities) * advantages
loss = -tf.reduce_sum(eligibility)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
tf.scalar_summary("loss", loss)
tf.scalar_summary("episode_reward", episode_reward)
return probabilities, state, actions, advantages, optimizer, reward_input, episode_reward
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _add_loss_summaries(total_loss):
"""Add summaries for losses.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name + ' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
def log(self, key, val, step_num):
"""Directly log a scalar value to the event file.
:param string key: a name for the value
:param val: a float
:param step_num: the iteration number at which this value was logged
"""
try:
ph, summ = self.summaries[key]
except KeyError:
# if we haven't defined a variable for this key, define one
with self.g.as_default():
ph = tf.placeholder(tf.float32, (), name=key) # scalar
summ = tf.scalar_summary(key, ph)
self.summaries[key] = (ph, summ)
summary_str = self.sess.run(summ, {ph: val})
self.summ_writer.add_summary(summary_str, step_num)
return val
def log(self, key, val, step_num):
"""Directly log a scalar value to the event file.
:param string key: a name for the value
:param val: a float
:param step_num: the iteration number at which this value was logged
"""
try:
ph, summ = self.summaries[key]
except KeyError:
# if we haven't defined a variable for this key, define one
with self.g.as_default():
ph = tf.placeholder(tf.float32, (), name=key) # scalar
summ = tf.scalar_summary(key, ph)
self.summaries[key] = (ph, summ)
summary_str = self.sess.run(summ, {ph: val})
self.summ_writer.add_summary(summary_str, step_num)
return val
def build_model(self):
self.inputs = tf.placeholder(tf.float32, [self.batch_size, self.input_size, self.input_size, 3], name='real_images')
# self.inputs = tf.placeholder(tf.float32, [None, self.input_size, self.input_size, 3], name='real_images')
try:
self.up_inputs = tf.image.resize_images(self.inputs, self.image_shape[0], self.image_shape[1], tf.image.ResizeMethod.NEAREST_NEIGHBOR)
except ValueError:
# newer versions of tensorflow
self.up_inputs = tf.image.resize_images(self.inputs, [self.image_shape[0], self.image_shape[1]], tf.image.ResizeMethod.NEAREST_NEIGHBOR)
self.images = tf.placeholder(tf.float32, [self.batch_size] + self.image_shape, name='real_images')
# self.images = tf.placeholder(tf.float32, [None] + self.image_shape, name='real_images')
self.sample_images= tf.placeholder(tf.float32, [self.sample_size] + self.image_shape, name='sample_images')
# self.sample_images = tf.placeholder(tf.float32, [None] + self.image_shape, name='sample_images')
self.G = self.generator(self.inputs)
self.G_sum = tf.image_summary("G", self.G)
self.g_loss = tf.reduce_mean(tf.square(self.images-self.G))
self.g_loss_sum = tf.scalar_summary("g_loss", self.g_loss)
t_vars = tf.trainable_variables()
self.g_vars = [var for var in t_vars if 'g_' in var.name]
self.saver = tf.train.Saver()
def _init_summaries(self):
if self.is_train:
logdir = os.path.join(SUMMARY_PATH, self.log_name, 'train')
self.summary_writer = tf.summary.FileWriter(logdir)
self.summary_writer_by_points = [tf.summary.FileWriter(os.path.join(logdir, 'point_%02d' % i))
for i in range(16)]
tf.scalar_summary('Average euclidean distance', self.euclidean_dist, collections = [KEY_SUMMARIES])
for i in range(16):
tf.scalar_summary('Joint euclidean distance', self.euclidean_dist_per_joint[i],
collections = [KEY_SUMMARIES_PER_JOINT[i]])
self.create_summary_from_weights()
self.ALL_SUMMARIES = tf.merge_all_summaries(KEY_SUMMARIES)
self.SUMMARIES_PER_JOINT = [tf.merge_all_summaries(KEY_SUMMARIES_PER_JOINT[i]) for i in range(16)]
else:
logdir = os.path.join(SUMMARY_PATH, self.log_name, 'test')
self.summary_writer = tf.summary.FileWriter(logdir)
SENN.py 文件源码
项目:Multi-channel-speech-extraction-using-DNN
作者: zhr1201
项目源码
文件源码
阅读 53
收藏 0
点赞 0
评论 0
def loss(self, inf_targets, inf_vads, targets, vads, mtl_fac):
'''
Loss definition
Only speech inference loss is defined and work quite well
Add VAD cross entropy loss if you want
'''
loss_v1 = tf.nn.l2_loss(inf_targets - targets) / self.batch_size
loss_o = loss_v1
reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
# ipdb.set_trace()
loss_v = loss_o + tf.add_n(reg_loss)
tf.scalar_summary('loss', loss_v)
# loss_merge = tf.cond(
# is_val, lambda: tf.scalar_summary('val_loss_batch', loss_v),
# lambda: tf.scalar_summary('loss', loss_v))
return loss_v, loss_o
# return tf.reduce_mean(tf.nn.l2_loss(inf_targets - targets))
def _setup_training(self):
"""
Set up a data flow graph for fine tuning
"""
layer_num = self.layer_num
act_func = ACTIVATE_FUNC[self.activate_func]
sigma = self.sigma
lr = self.learning_rate
weights = self.weights
biases = self.biases
data1, data2 = self.data1, self.data2
batch_size = self.batch_size
optimizer = OPTIMIZER[self.optimizer]
with tf.name_scope("training"):
s1 = self._obtain_score(data1, weights, biases, act_func, "1")
s2 = self._obtain_score(data2, weights, biases, act_func, "2")
with tf.name_scope("cost"):
sum_cost = tf.reduce_sum(tf.log(1 + tf.exp(-sigma*(s1-s2))))
self.cost = cost = sum_cost / batch_size
self.optimize = optimizer(lr).minimize(cost)
for n in range(layer_num-1):
tf.histogram_summary("weight"+str(n), weights[n])
tf.histogram_summary("bias"+str(n), biases[n])
tf.scalar_summary("cost", cost)
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Nothing.
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(result_tensor, 1), \
tf.argmax(ground_truth_tensor, 1))
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.scalar_summary('accuracy', evaluation_step)
return evaluation_step
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name +' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name +' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
def build_summaries():
episode_reward = tf.Variable(0.)
scalar_summary("Reward", episode_reward)
episode_ave_max_q = tf.Variable(0.)
scalar_summary("Qmax Value", episode_ave_max_q)
logged_epsilon = tf.Variable(0.)
scalar_summary("Epsilon", logged_epsilon)
# Threads shouldn't modify the main graph, so we use placeholders
# to assign the value of every summary (instead of using assign method
# in every thread, that would keep creating new ops in the graph)
summary_vars = [episode_reward, episode_ave_max_q, logged_epsilon]
summary_placeholders = [tf.placeholder("float")
for i in range(len(summary_vars))]
assign_ops = [summary_vars[i].assign(summary_placeholders[i])
for i in range(len(summary_vars))]
summary_op = merge_all_summaries()
return summary_placeholders, assign_ops, summary_op
def define_summaries(self):
'''Helper function for init_opt'''
all_sum = {'g': [], 'd': [], 'hr_g': [], 'hr_d': [], 'hist': []}
for k, v in self.log_vars:
if k.startswith('g'):
all_sum['g'].append(tf.scalar_summary(k, v))
elif k.startswith('d'):
all_sum['d'].append(tf.scalar_summary(k, v))
elif k.startswith('hr_g'):
all_sum['hr_g'].append(tf.scalar_summary(k, v))
elif k.startswith('hr_d'):
all_sum['hr_d'].append(tf.scalar_summary(k, v))
elif k.startswith('hist'):
all_sum['hist'].append(tf.histogram_summary(k, v))
self.g_sum = tf.merge_summary(all_sum['g'])
self.d_sum = tf.merge_summary(all_sum['d'])
self.hr_g_sum = tf.merge_summary(all_sum['hr_g'])
self.hr_d_sum = tf.merge_summary(all_sum['hr_d'])
self.hist_sum = tf.merge_summary(all_sum['hist'])
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CNN model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name + ' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
def _activation_summary(self, x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
# Error: these summaries cause high classifier error!!!
# All inputs to node MergeSummary/MergeSummary must be from the same frame.
# tensor_name = re.sub('%s_[0-9]*/' % "tower", '', x.op.name)
# tf.histogram_summary(tensor_name + '/activations', x)
# tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _build_loss(self):
config = self.config
JX = tf.shape(self.x)[2]
M = tf.shape(self.x)[1]
JQ = tf.shape(self.q)[1]
loss_mask = tf.reduce_max(tf.cast(self.q_mask, 'float'), 1)
losses = tf.nn.softmax_cross_entropy_with_logits(
self.logits, tf.cast(tf.reshape(self.y, [-1, M * JX]), 'float'))
ce_loss = tf.reduce_mean(loss_mask * losses)
tf.add_to_collection('losses', ce_loss)
ce_loss2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
self.logits2, tf.cast(tf.reshape(self.y2, [-1, M * JX]), 'float')))
tf.add_to_collection("losses", ce_loss2)
self.loss = tf.add_n(tf.get_collection('losses', scope=self.scope), name='loss')
tf.scalar_summary(self.loss.op.name, self.loss)
tf.add_to_collection('ema/scalar', self.loss)
def Train(self,
loss,
learning_rate,
clip_value_min,
clip_value_max,
name='training'):
tf.scalar_summary(':'.join([name, loss.op.name]), loss)
optimizer = tf.train.AdagradOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(loss)
clipped_grads_and_vars = [
(tf.clip_by_value(g, clip_value_min, clip_value_max), v)
for g, v in grads_and_vars
]
for g, v in clipped_grads_and_vars:
_ = tf.histogram_summary(':'.join([name, v.name]), v)
_ = tf.histogram_summary('%s: gradient for %s' % (name, v.name), g)
train_op = optimizer.apply_gradients(clipped_grads_and_vars)
return train_op
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CNN model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name + ' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op