def _add_loss_summaries(total_loss):
"""Add summaries for losses in CNN model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name + ' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
python类scalar_summary()的实例源码
def _build_loss(self):
config = self.config
JX = tf.shape(self.x)[2]
M = tf.shape(self.x)[1]
JQ = tf.shape(self.q)[1]
loss_mask = tf.reduce_max(tf.cast(self.q_mask, 'float'), 1)
losses = tf.nn.softmax_cross_entropy_with_logits(
self.logits, tf.cast(tf.reshape(self.y, [-1, M * JX]), 'float'))
ce_loss = tf.reduce_mean(loss_mask * losses)
tf.add_to_collection('losses', ce_loss)
ce_loss2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
self.logits2, tf.cast(tf.reshape(self.y2, [-1, M * JX]), 'float')))
tf.add_to_collection("losses", ce_loss2)
self.loss = tf.add_n(tf.get_collection('losses', scope=self.scope), name='loss')
tf.scalar_summary(self.loss.op.name, self.loss)
tf.add_to_collection('ema/scalar', self.loss)
def _init_loss(cls, config, q, expected_q, actions, reg_loss=None,
summaries=None):
"""
Setup the loss function and apply regularization is provided.
@return: loss_op
"""
q_masked = tf.reduce_sum(tf.mul(q, actions), reduction_indices=[1])
loss = tf.reduce_mean(tf.squared_difference(q_masked, expected_q))
if reg_loss is not None:
loss += config.reg_param * reg_loss
if summaries is not None:
summaries.append(tf.scalar_summary('loss', loss))
return loss
def grad(self, loc_mean_t, loc_t, h_t, prob, pred, labels):
loss1, grads1 = self.grad_reinforcement(loc_mean_t, loc_t, h_t, prob, pred, labels)
loss2, grads2 = self.grad_supervised(prob, labels)
loss = (1 - self.lambda_) * loss1 + self.lambda_ * loss2
grads = []
for i in xrange(len(grads1)):
grads.append((1 - self.lambda_) * grads1[i] + self.lambda_ * grads2[i])
tvars = tf.trainable_variables()
grads = zip(grads, tvars)
tf.scalar_summary('loss', loss)
tf.scalar_summary('loss_reinforcement', loss1)
tf.scalar_summary('loss_supervised', loss2)
return loss, grads
def _activation_summary(self, x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % 'tower', '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def train_model(self, train_anchor_batch, train_pos_batch, train_neg_batch,
model_params, train_params):
# get embedding for all batches.
all_batch = tf.concat(
0, [train_anchor_batch, train_pos_batch, train_neg_batch])
with tf.variable_scope("matcher"):
all_feats, _ = self.build_model(all_batch, model_params)
anchor_feats, pos_feats, neg_feats = tf.split(0, 3, all_feats)
# compute loss.
triplet_loss = dm_losses.triplet_loss(
anchor_feats,
pos_feats,
neg_feats,
0.2,
loss_type=commons.LossType.TRIPLET_L2)
tf.scalar_summary("losses/triplet_loss", triplet_loss)
# run training.
base_model.train_model_given_loss(triplet_loss, None, train_params)
# TODO (jiefeng): use proper evaluation for matcher and test.
nerve_net.py 文件源码
项目:ultrasound-nerve-segmentation-in-tensorflow
作者: loliverhennigh
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def loss_image(prediction, mask):
"""Calc loss for predition on image of mask.
Args.
inputs: prediction image
mask: true image
Return:
error: loss value
"""
print(prediction.get_shape())
print(mask.get_shape())
#mask = tf.flatten(mask)
#prediction = tf.flatten(prediction)
intersection = tf.reduce_sum(prediction * mask)
loss = -(2. * intersection + 1.) / (tf.reduce_sum(mask) + tf.reduce_sum(prediction) + 1.)
tf.scalar_summary('loss', loss)
return loss
def _activation_summary(self, x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
# Error: these summaries cause high classifier error!!!
# All inputs to node MergeSummary/MergeSummary must be from the same frame.
# tensor_name = re.sub('%s_[0-9]*/' % "tower", '', x.op.name)
# tf.histogram_summary(tensor_name + '/activations', x)
# tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def __init__(self, sess, env_name, model_dir, variables, max_update_per_step, max_to_keep=20):
self.sess = sess
self.env_name = env_name
self.max_update_per_step = max_update_per_step
self.reset()
self.max_avg_r = None
with tf.variable_scope('t'):
self.t_op = tf.Variable(0, trainable=False, name='t')
self.t_add_op = self.t_op.assign_add(1)
self.model_dir = model_dir
self.saver = tf.train.Saver(variables + [self.t_op], max_to_keep=max_to_keep)
self.writer = tf.train.SummaryWriter('./logs/%s' % self.model_dir, self.sess.graph)
with tf.variable_scope('summary'):
scalar_summary_tags = ['total r', 'avg r', 'avg q', 'avg v', 'avg a', 'avg l']
self.summary_placeholders = {}
self.summary_ops = {}
for tag in scalar_summary_tags:
self.summary_placeholders[tag] = tf.placeholder('float32', None, name=tag.replace(' ', '_'))
self.summary_ops[tag] = tf.scalar_summary('%s/%s' % (self.env_name, tag), self.summary_placeholders[tag])
def add_conv_layer(self, scope_name, layer_input, filter_size, input_channels,
output_channels, padding='SAME', should_init_wb=True):
with tf.variable_scope(scope_name):
weights_shape = filter_size + [input_channels, output_channels]
initial_weights, initial_bias = self.__get_init_params(scope_name, should_init_wb)
self.total_weights += weights_shape[0] * weights_shape[1] * weights_shape[2] * weights_shape[3]
self.logger.info('Weight shape:{} for scope:{}'.format(weights_shape, tf.get_variable_scope().name))
conv_weights = self.__get_variable('weights', weights_shape, tf.float32,
initializer=initial_weights)
tf.scalar_summary(scope_name + '/weight_sparsity', tf.nn.zero_fraction(conv_weights))
tf.histogram_summary(scope_name + '/weights', conv_weights)
conv = tf.nn.conv2d(layer_input, conv_weights,
strides=[1, 1, 1, 1], padding=padding)
conv_biases = self.__get_variable('biases', [output_channels], tf.float32,
initializer=initial_bias)
layer_output = tf.nn.relu(tf.nn.bias_add(conv, conv_biases))
return layer_output
def add_activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def add_loss_summaries(total_loss):
"""
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name +' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name +' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
def _add_loss_summaries(total_loss):
"""Add summaries for losses in deepSpeech model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss;
# do the same for the averaged version of the losses.
for each_loss in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average
# version of the loss as the original loss name.
tf.scalar_summary(each_loss.op.name + ' (raw)', each_loss)
tf.scalar_summary(each_loss.op.name, loss_averages.average(each_loss))
return loss_averages_op
def add_summaries(summaries, learning_rate, grads):
""" Add summary ops"""
# Track quantities for Tensorboard display
summaries.append(tf.scalar_summary('learning_rate', learning_rate))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(
tf.histogram_summary(var.op.name +
'/gradients', grad))
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.histogram_summary(var.op.name, var))
# Build the summary operation from the last tower summaries.
summary_op = tf.merge_summary(summaries)
return summary_op
model_cifar.py 文件源码
项目:deep_separation_contraction
作者: edouardoyallon
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def loss(logits, labels,n_class, scope='loss'):
with tf.variable_scope(scope):
# entropy loss
targets = one_hot_embedding(labels, n_class)
entropy_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, targets),
name='entropy_loss')
tf.add_to_collection('losses', entropy_loss)
# weight l2 decay loss
weight_l2_losses = [tf.nn.l2_loss(o) for o in tf.get_collection('weights')]
weight_decay_loss = tf.mul(FLAGS.weight_decay, tf.add_n(weight_l2_losses),
name='weight_decay_loss')
tf.add_to_collection('losses', weight_decay_loss)
for var in tf.get_collection('losses'):
tf.scalar_summary('losses/' + var.op.name, var)
# total loss
return tf.add_n(tf.get_collection('losses'), name='total_loss')
model_cifar_contract.py 文件源码
项目:deep_separation_contraction
作者: edouardoyallon
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def loss(logits, labels,n_class, scope='loss'):
with tf.variable_scope(scope):
# entropy loss
targets = one_hot_embedding(labels, n_class)
entropy_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, targets),
name='entropy_loss')
tf.add_to_collection('losses', entropy_loss)
# weight l2 decay loss
weight_l2_losses = [tf.nn.l2_loss(o) for o in tf.get_collection('weights')]
weight_decay_loss = tf.mul(FLAGS.weight_decay, tf.add_n(weight_l2_losses),
name='weight_decay_loss')
tf.add_to_collection('losses', weight_decay_loss)
for var in tf.get_collection('losses'):
tf.scalar_summary('losses/' + var.op.name, var)
# total loss
return tf.add_n(tf.get_collection('losses'), name='total_loss')
model_utils.py 文件源码
项目:deep_separation_contraction
作者: edouardoyallon
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name +' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name +' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op