def conv_max_pool_2x2(x, conv_width, conv_height, in_depth, out_depth, name="conv"):
with tf.name_scope(name) as scope:
W_conv = weight_variable([conv_width, conv_height, in_depth, out_depth])
b_conv = bias_variable([out_depth])
h_conv = tf.nn.relu(conv2d(x, W_conv) + b_conv)
h_pool = max_pool_2x2(h_conv)
with tf.name_scope("summaries") as scope:
# TIPS: to display the 32 convolution filters, re-arrange the
# weigths to look like 32 images with a transposition.
a = tf.reshape(W_conv, [conv_width * conv_height * in_depth, out_depth])
b = tf.transpose(a)
c = tf.reshape(b, [out_depth, conv_width, conv_height * in_depth, 1])
conv_image = tf.image_summary(name + " filter", c, out_depth)
# TIPS: by looking at the weights histogram, we can see the the
# weigths are explosing or vanishing.
W_conv_hist = tf.histogram_summary(name + " weights", W_conv)
b_conv_hist = tf.histogram_summary(name + " biases", b_conv)
return h_pool
python类histogram_summary()的实例源码
def build_encoder(self):
"""Inference Network. q(h|X)"""
with tf.variable_scope("encoder"):
q_cell = tf.nn.rnn_cell.LSTMCell(self.embed_dim, self.vocab_size)
a_cell = tf.nn.rnn_cell.LSTMCell(self.embed_dim, self.vocab_size)
l1 = tf.nn.relu(tf.nn.rnn_cell.linear(tf.expand_dims(self.x, 0), self.embed_dim, bias=True, scope="l1"))
l2 = tf.nn.relu(tf.nn.rnn_cell.linear(l1, self.embed_dim, bias=True, scope="l2"))
self.mu = tf.nn.rnn_cell.linear(l2, self.h_dim, bias=True, scope="mu")
self.log_sigma_sq = tf.nn.rnn_cell.linear(l2, self.h_dim, bias=True, scope="log_sigma_sq")
eps = tf.random_normal((1, self.h_dim), 0, 1, dtype=tf.float32)
sigma = tf.sqrt(tf.exp(self.log_sigma_sq))
_ = tf.histogram_summary("mu", self.mu)
_ = tf.histogram_summary("sigma", sigma)
self.h = self.mu + sigma * eps
def build_encoder(self):
"""Inference Network. q(h|X)"""
with tf.variable_scope("encoder"):
self.l1_lin = linear(tf.expand_dims(self.x, 0), self.embed_dim, bias=True, scope="l1")
self.l1 = tf.nn.relu(self.l1_lin)
self.l2_lin = linear(self.l1, self.embed_dim, bias=True, scope="l2")
self.l2 = tf.nn.relu(self.l2_lin)
self.mu = linear(self.l2, self.h_dim, bias=True, scope="mu")
self.log_sigma_sq = linear(self.l2, self.h_dim, bias=True, scope="log_sigma_sq")
self.eps = tf.random_normal((1, self.h_dim), 0, 1, dtype=tf.float32)
self.sigma = tf.sqrt(tf.exp(self.log_sigma_sq))
self.h = tf.add(self.mu, tf.mul(self.sigma, self.eps))
_ = tf.histogram_summary("mu", self.mu)
_ = tf.histogram_summary("sigma", self.sigma)
_ = tf.histogram_summary("h", self.h)
_ = tf.histogram_summary("mu + sigma", self.mu + self.sigma)
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
# tf.histogram_summary(tensor_name + '/activations', x)
tf.summary.histogram(tensor_name + '/activations', x)
# tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
# tf.histogram_summary(tensor_name + '/activations', x)
tf.summary.histogram(tensor_name + '/activations', x)
# tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def define_summaries(self):
'''Helper function for init_opt'''
all_sum = {'g': [], 'd': [], 'hr_g': [], 'hr_d': [], 'hist': []}
for k, v in self.log_vars:
if k.startswith('g'):
all_sum['g'].append(tf.scalar_summary(k, v))
elif k.startswith('d'):
all_sum['d'].append(tf.scalar_summary(k, v))
elif k.startswith('hr_g'):
all_sum['hr_g'].append(tf.scalar_summary(k, v))
elif k.startswith('hr_d'):
all_sum['hr_d'].append(tf.scalar_summary(k, v))
elif k.startswith('hist'):
all_sum['hist'].append(tf.histogram_summary(k, v))
self.g_sum = tf.merge_summary(all_sum['g'])
self.d_sum = tf.merge_summary(all_sum['d'])
self.hr_g_sum = tf.merge_summary(all_sum['hr_g'])
self.hr_d_sum = tf.merge_summary(all_sum['hr_d'])
self.hist_sum = tf.merge_summary(all_sum['hist'])
def add_layers(inputs, in_size, out_size, layer_name, keep_prob, activation_function=None):
# add one more layer and return the output of this layer
weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
wx_plus_b = tf.matmul(inputs, weights) + biases
# here to dropout
# ? wx_plus_b ?drop?????
# keep_prob ??????drop?????? sess.run ? feed
wx_plus_b = tf.nn.dropout(wx_plus_b, keep_prob)
if activation_function is None:
outputs = wx_plus_b
else:
outputs = activation_function(wx_plus_b)
tf.histogram_summary(layer_name + '/outputs', outputs)
return outputs
def nn_layer_(self,input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
"""Reusable code for making a simple neural net layer.
It does a matrix multiply, bias add, and then uses relu to nonlinearize.
It also sets up name scoping so that the resultant graph is easy to read,
and adds a number of summary ops.
"""
# Adding a name scope ensures logical grouping of the layers in the graph.
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
weights = self.weight_variable([input_dim, output_dim])
self.variable_summaries(weights, layer_name + '/weights')
with tf.name_scope('biases'):
biases = self.bias_variable([output_dim])
self.variable_summaries(biases, layer_name + '/biases')
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
tf.histogram_summary(layer_name + '/pre_activations', preactivate)
activations = act(preactivate, 'activation')
tf.histogram_summary(layer_name + '/activations', activations)
return activations
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _setup_training(self):
"""
Set up a data flow graph for fine tuning
"""
layer_num = self.layer_num
act_func = ACTIVATE_FUNC[self.activate_func]
sigma = self.sigma
lr = self.learning_rate
weights = self.weights
biases = self.biases
data1, data2 = self.data1, self.data2
batch_size = self.batch_size
optimizer = OPTIMIZER[self.optimizer]
with tf.name_scope("training"):
s1 = self._obtain_score(data1, weights, biases, act_func, "1")
s2 = self._obtain_score(data2, weights, biases, act_func, "2")
with tf.name_scope("cost"):
sum_cost = tf.reduce_sum(tf.log(1 + tf.exp(-sigma*(s1-s2))))
self.cost = cost = sum_cost / batch_size
self.optimize = optimizer(lr).minimize(cost)
for n in range(layer_num-1):
tf.histogram_summary("weight"+str(n), weights[n])
tf.histogram_summary("bias"+str(n), biases[n])
tf.scalar_summary("cost", cost)
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def define_summaries(self):
'''Helper function for init_opt'''
all_sum = {'g': [], 'd': [], 'hr_g': [], 'hr_d': [], 'hist': []}
for k, v in self.log_vars:
if k.startswith('g'):
all_sum['g'].append(tf.scalar_summary(k, v))
elif k.startswith('d'):
all_sum['d'].append(tf.scalar_summary(k, v))
elif k.startswith('hr_g'):
all_sum['hr_g'].append(tf.scalar_summary(k, v))
elif k.startswith('hr_d'):
all_sum['hr_d'].append(tf.scalar_summary(k, v))
elif k.startswith('hist'):
all_sum['hist'].append(tf.histogram_summary(k, v))
self.g_sum = tf.merge_summary(all_sum['g'])
self.d_sum = tf.merge_summary(all_sum['d'])
self.hr_g_sum = tf.merge_summary(all_sum['hr_g'])
self.hr_d_sum = tf.merge_summary(all_sum['hr_d'])
self.hist_sum = tf.merge_summary(all_sum['hist'])
def _activation_summary(self, x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
# Error: these summaries cause high classifier error!!!
# All inputs to node MergeSummary/MergeSummary must be from the same frame.
# tensor_name = re.sub('%s_[0-9]*/' % "tower", '', x.op.name)
# tf.histogram_summary(tensor_name + '/activations', x)
# tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def Train(self,
loss,
learning_rate,
clip_value_min,
clip_value_max,
name='training'):
tf.scalar_summary(':'.join([name, loss.op.name]), loss)
optimizer = tf.train.AdagradOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(loss)
clipped_grads_and_vars = [
(tf.clip_by_value(g, clip_value_min, clip_value_max), v)
for g, v in grads_and_vars
]
for g, v in clipped_grads_and_vars:
_ = tf.histogram_summary(':'.join([name, v.name]), v)
_ = tf.histogram_summary('%s: gradient for %s' % (name, v.name), g)
train_op = optimizer.apply_gradients(clipped_grads_and_vars)
return train_op
def _activation_summary(self, x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % 'tower', '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _activation_summary(self, x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
# Error: these summaries cause high classifier error!!!
# All inputs to node MergeSummary/MergeSummary must be from the same frame.
# tensor_name = re.sub('%s_[0-9]*/' % "tower", '', x.op.name)
# tf.histogram_summary(tensor_name + '/activations', x)
# tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def fc_layer(self, bottom, name):
with tf.variable_scope(name) as scope:
shape = bottom.get_shape().as_list()
dim = 1
for d in shape[1:]:
dim *= d
x = tf.reshape(bottom, [-1, dim])
with tf.device('/cpu:0'):
weights = self.get_fc_weight(name)
biases = self.get_fc_bias(name)
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
fc = tf.nn.bias_add(tf.matmul(x, weights), biases)
#tf.histogram_summary('adascan/'+name+'_activations', fc)
#tf.histogram_summary('adascan/'+name+'_weights', weights)
scope.reuse_variables()
return fc
def add_optimizer(self):
self.global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(0.01, self.global_step, 50,
0.1, staircase=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
gradients = optimizer.compute_gradients(self.loss)
self.apply_gradient_op = optimizer.apply_gradients(gradients, self.global_step)
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
for grad, var in gradients:
if grad is not None:
tf.histogram_summary(var.op.name + '/gradients', grad)
return self.apply_gradient_op
def add_conv_layer(self, scope_name, layer_input, filter_size, input_channels,
output_channels, padding='SAME', should_init_wb=True):
with tf.variable_scope(scope_name):
weights_shape = filter_size + [input_channels, output_channels]
initial_weights, initial_bias = self.__get_init_params(scope_name, should_init_wb)
self.total_weights += weights_shape[0] * weights_shape[1] * weights_shape[2] * weights_shape[3]
self.logger.info('Weight shape:{} for scope:{}'.format(weights_shape, tf.get_variable_scope().name))
conv_weights = self.__get_variable('weights', weights_shape, tf.float32,
initializer=initial_weights)
tf.scalar_summary(scope_name + '/weight_sparsity', tf.nn.zero_fraction(conv_weights))
tf.histogram_summary(scope_name + '/weights', conv_weights)
conv = tf.nn.conv2d(layer_input, conv_weights,
strides=[1, 1, 1, 1], padding=padding)
conv_biases = self.__get_variable('biases', [output_channels], tf.float32,
initializer=initial_bias)
layer_output = tf.nn.relu(tf.nn.bias_add(conv, conv_biases))
return layer_output
def add_activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def add_summaries(summaries, learning_rate, grads):
""" Add summary ops"""
# Track quantities for Tensorboard display
summaries.append(tf.scalar_summary('learning_rate', learning_rate))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(
tf.histogram_summary(var.op.name +
'/gradients', grad))
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.histogram_summary(var.op.name, var))
# Build the summary operation from the last tower summaries.
summary_op = tf.merge_summary(summaries)
return summary_op
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = x.op.name
# tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _add_gradients_summaries(grads_and_vars):
"""Add histogram summaries to gradients.
Note: The summaries are also added to the SUMMARIES collection.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The _list_ of the added summaries for grads_and_vars.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, tf.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(tf.histogram_summary(var.op.name + ':gradient',
grad_values))
summaries.append(tf.histogram_summary(var.op.name + ':gradient_norm',
tf.global_norm([grad_values])))
else:
tf.logging.info('Var %s has no gradient', var.op.name)
return summaries
model_deploy.py 文件源码
项目:Embarrassingly-Parallel-Image-Classification
作者: Azure
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def _add_gradients_summaries(grads_and_vars):
"""Add histogram summaries to gradients.
Note: The summaries are also added to the SUMMARIES collection.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The _list_ of the added summaries for grads_and_vars.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, tf.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(tf.histogram_summary(var.op.name + ':gradient',
grad_values))
summaries.append(tf.histogram_summary(var.op.name + ':gradient_norm',
tf.global_norm([grad_values])))
else:
tf.logging.info('Var %s has no gradient', var.op.name)
return summaries
region_proposal.py 文件源码
项目:lstm-rcnn-pedestrian-detection
作者: buffer51
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def create_train_summaries(learning_rate, clas_loss, reg_loss, rpn_loss, clas_accuracy, clas_positive_percentage, clas_positive_accuracy, VGG16D_activations, clas_activations):
with tf.name_scope('train'):
learning_rate_summary = tf.scalar_summary('learning_rate', learning_rate)
loss_clas_summary = tf.scalar_summary('loss/clas', clas_loss)
loss_reg_summary = tf.scalar_summary('loss/reg', reg_loss)
loss_rpn_summary = tf.scalar_summary('loss/rpn', rpn_loss)
stat_accuracy_summary = tf.scalar_summary('stat/accuracy', clas_accuracy)
stat_positive_percentage_summary = tf.scalar_summary('stat/positive_percentage', clas_positive_percentage)
stat_positive_accuracy_summary = tf.scalar_summary('stat/positive_accuracy', clas_positive_accuracy)
VGG16D_histogram = tf.histogram_summary('activations/VGG16D', VGG16D_activations)
clas_histogram = tf.histogram_summary('activations/clas', clas_activations)
return tf.merge_summary([learning_rate_summary, loss_clas_summary, loss_reg_summary, loss_rpn_summary, stat_accuracy_summary, stat_positive_percentage_summary, stat_positive_accuracy_summary, VGG16D_histogram, clas_histogram])
def build_encoder(self):
"""Inference Network. q(h|X)"""
with tf.variable_scope("encoder"):
q_cell = tf.nn.rnn_cell.LSTMCell(self.embed_dim, self.vocab_size)
a_cell = tf.nn.rnn_cell.LSTMCell(self.embed_dim, self.vocab_size)
l1 = tf.nn.relu(tf.nn.rnn_cell.linear(tf.expand_dims(self.x, 0), self.embed_dim, bias=True, scope="l1"))
l2 = tf.nn.relu(tf.nn.rnn_cell.linear(l1, self.embed_dim, bias=True, scope="l2"))
self.mu = tf.nn.rnn_cell.linear(l2, self.h_dim, bias=True, scope="mu")
self.log_sigma_sq = tf.nn.rnn_cell.linear(l2, self.h_dim, bias=True, scope="log_sigma_sq")
eps = tf.random_normal((1, self.h_dim), 0, 1, dtype=tf.float32)
sigma = tf.sqrt(tf.exp(self.log_sigma_sq))
_ = tf.histogram_summary("mu", self.mu)
_ = tf.histogram_summary("sigma", sigma)
self.h = self.mu + sigma * eps
def build_encoder(self):
"""Inference Network. q(h|X)"""
with tf.variable_scope("encoder"):
self.l1_lin = linear(tf.expand_dims(self.x, 0), self.embed_dim, bias=True, scope="l1")
self.l1 = tf.nn.relu(self.l1_lin)
self.l2_lin = linear(self.l1, self.embed_dim, bias=True, scope="l2")
self.l2 = tf.nn.relu(self.l2_lin)
self.mu = linear(self.l2, self.h_dim, bias=True, scope="mu")
self.log_sigma_sq = linear(self.l2, self.h_dim, bias=True, scope="log_sigma_sq")
self.eps = tf.random_normal((1, self.h_dim), 0, 1, dtype=tf.float32)
self.sigma = tf.sqrt(tf.exp(self.log_sigma_sq))
self.h = tf.add(self.mu, tf.mul(self.sigma, self.eps))
_ = tf.histogram_summary("mu", self.mu)
_ = tf.histogram_summary("sigma", self.sigma)
_ = tf.histogram_summary("h", self.h)
_ = tf.histogram_summary("mu + sigma", self.mu + self.sigma)