def calculate_loss_postprocess(self, predictions, labels, **unused_params):
with tf.name_scope("loss_postprocess"):
float_labels = tf.cast(labels, tf.float32)
predictions_pos = predictions*float_labels + (1-float_labels)
predictions_neg = predictions*(1-float_labels)
min_pos = tf.stop_gradient(tf.reduce_min(predictions_pos))
max_neg = tf.stop_gradient(tf.reduce_max(predictions_neg))
predictions_pos_mistake = tf.nn.relu(max_neg-predictions_pos)-0.01*tf.nn.relu(predictions_pos-max_neg)
predictions_neg_mistake = tf.nn.relu(predictions_neg-min_pos)-0.01*tf.nn.relu(min_pos-predictions_neg)
postprocess_loss = predictions_pos_mistake + predictions_neg_mistake
return tf.reduce_mean(tf.reduce_sum(postprocess_loss, 1))
python类reduce_min()的实例源码
def calculate_loss(self, predictions, labels, margin=0.2, adaptive=3.0, origin=1.0, **unused_params):
batch_size = FLAGS.batch_size
num_classes = FLAGS.num_classes
with tf.name_scope("loss_hinge"):
# get sim_neg
mask = tf.cast(labels, tf.float32)
reverse_mask = 1.0 - mask
min_true_pred = tf.reduce_min((predictions - 1.0) * mask, axis=1, keep_dims=True) + 1.0
mask_wrong = tf.stop_gradient(tf.cast(predictions > (min_true_pred - margin), tf.float32) * reverse_mask)
# get positve samples
int_labels = tf.cast(labels, tf.int32)
sample_labels = tf.unstack(int_labels, num=batch_size, axis=0)
sample_predictions = tf.unstack(predictions, num=batch_size, axis=0)
positive_predictions = []
for sample_label, sample_prediction in zip(sample_labels, sample_predictions):
indices = tf.where(sample_label > 0)
expanded_indices = tf.tile(indices[:,0], [num_classes])[:num_classes]
rand_arrange = tf.random_uniform([num_classes], minval=0, maxval=num_classes, dtype=tf.int32)
positive_indices = tf.stop_gradient(tf.gather(expanded_indices, rand_arrange))
positive_prediction = tf.gather(sample_prediction, positive_indices)
positive_predictions.append(positive_prediction)
positive_predictions = tf.stack(positive_predictions)
# hinge_loss
hinge_loss = tf.maximum(predictions - positive_predictions + margin, 0.0)
adaptive_loss = hinge_loss * mask_wrong
adaptive_loss = tf.reduce_mean(tf.reduce_sum(adaptive_loss, axis=1))
origin_loss = hinge_loss * reverse_mask
origin_loss = tf.reduce_mean(tf.reduce_sum(origin_loss, axis=1))
loss = adaptive * adaptive_loss + origin * origin_loss
return loss
def calculate_loss(self, predictions, labels, topk=20, **unused_params):
with tf.name_scope("loss_xent_batch"):
batch_agreement = FLAGS.batch_agreement
epsilon = 10e-6
float_batch_size = float(FLAGS.batch_size)
topk_predictions, _ = tf.nn.top_k(predictions, k=20)
min_topk_predictions = tf.reduce_min(topk_predictions, axis=1, keep_dims=True)
topk_mask = tf.cast(predictions >= min_topk_predictions, dtype=tf.float32)
float_labels = tf.cast(labels, tf.float32)
cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
1 - float_labels) * tf.log(1 - predictions + epsilon)
cross_entropy_loss = tf.negative(cross_entropy_loss)
# minimum positive predictions in topk
positive_predictions = (predictions * float_labels * topk_mask) + 1.0 - (float_labels * topk_mask)
min_pp = tf.reduce_min(positive_predictions)
# maximum negative predictions
negative_predictions = predictions * (1.0 - float_labels)
max_np = tf.reduce_max(negative_predictions)
# 1s that fall under top-k
false_negatives = tf.cast(predictions < min_topk_predictions, tf.float32) * float_labels
# 0s that grow over 1s in top-k
false_positives = tf.cast(predictions > min_pp, tf.float32) * (1.0 - float_labels) * topk_mask
weight = (false_negatives + false_positives) * batch_agreement + 1.0
weight = tf.stop_gradient(weight)
print weight
return tf.reduce_mean(tf.reduce_sum(weight * cross_entropy_loss, 1))
multi_view_cnn_deep_combine_chain_model.py 文件源码
项目:youtube-8m
作者: wangheda
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def multiview(self, cnn_output, axis=1):
max_view = tf.reduce_max(cnn_output, axis=1)
mean_view = tf.reduce_mean(cnn_output, axis=1)
min_view = tf.reduce_min(cnn_output, axis=1)
multi_view = tf.concat([max_view, mean_view, min_view], axis=1)
return multi_view
def min(x, axis=None, keepdims=False):
return tf.reduce_min(x, axis=None if axis is None else [axis], keep_dims = keepdims)
def add_weights_summary(weights, name=""):
with tf.name_scope(name+"_summary"):
mean = tf.reduce_mean(weights)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(weights - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(weights))
tf.summary.scalar('min', tf.reduce_min(weights))
tf.summary.histogram('histogram', weights)
def summary(tensor, summary_type=['mean', 'stddev', 'max', 'min', 'sparsity', 'histogram']):
""" Attach a lot of summaries to a Tensor. """
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % 'tower', '', tensor.name)
tensor_name = re.sub(':', '-', tensor_name)
with tf.name_scope('summary_' + tensor_name):
summaries = []
if len(tensor._shape) == 0:
summaries.append(tf.summary.scalar(tensor_name, tensor))
else:
if 'mean' in summary_type:
mean = tf.reduce_mean(tensor)
summaries.append(tf.summary.scalar(tensor_name + '/mean', mean))
if 'stddev' in summary_type:
mean = tf.reduce_mean(tensor)
stddev = tf.sqrt(tf.reduce_mean(tf.square(tensor - mean)))
summaries.append(tf.summary.scalar(tensor_name + '/stddev', stddev))
if 'max' in summary_type:
summaries.append(tf.summary.scalar(tensor_name + '/max', tf.reduce_max(tensor)))
if 'min' in summary_type:
summaries.append(tf.summary.scalar(tensor_name + '/min', tf.reduce_min(tensor)))
if 'sparsity' in summary_type:
summaries.append(tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(tensor)))
if 'histogram' in summary_type:
summaries.append(tf.summary.histogram(tensor_name, tensor))
return tf.summary.merge(summaries)
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def _convolutional_layer(self, input, patch_size, stride, input_channels, output_channels, bias_init_value, scope_name):
with tf.variable_scope(scope_name) as scope:
weights = tf.get_variable(name='weights',
shape=[patch_size, patch_size, input_channels, output_channels],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
biases = tf.Variable(name='biases', initial_value=tf.constant(value=bias_init_value, shape=[output_channels]))
conv = tf.nn.conv2d(input, weights, [1, stride, stride, 1], padding='SAME')
linear_rectification_bias = tf.nn.bias_add(conv, biases)
output = tf.nn.relu(linear_rectification_bias, name=scope.name)
grid_x = output_channels // 4
grid_y = 4 * input_channels
kernels_image_grid = self._create_kernels_image_grid(weights, (grid_x, grid_y))
tf.image_summary(scope_name + '/features', kernels_image_grid, max_images=1)
if "_conv1" in scope_name:
x_min = tf.reduce_min(weights)
x_max = tf.reduce_max(weights)
weights_0_to_1 = (weights - x_min) / (x_max - x_min)
weights_0_to_255_uint8 = tf.image.convert_image_dtype(weights_0_to_1, dtype=tf.uint8)
# to tf.image_summary format [batch_size, height, width, channels]
weights_transposed = tf.transpose(weights_0_to_255_uint8, [3, 0, 1, 2])
tf.image_summary(scope_name + '/features', weights_transposed[:,:,:,0:1], max_images=32)
return output
def variable_summaries(self, var, name):
"""Attach a lot of summaries to a Tensor."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.scalar_summary('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))
tf.scalar_summary('sttdev/' + name, stddev)
tf.scalar_summary('max/' + name, tf.reduce_max(var))
tf.scalar_summary('min/' + name, tf.reduce_min(var))
tf.histogram_summary(name, var)
return
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def print_tensor_summary(tensor, tag=None, n_print=21):
tensor_min = tf.reduce_min(tensor)
tensor_max = tf.reduce_max(tensor)
tensor_avg = tf.reduce_mean(tensor)
tensor_zero_fraction = tf.nn.zero_fraction(tensor)
tensor_shape = tf.shape(tensor)
tag = tag or tensor.name
tensor = tf.Print(tensor,
[tensor_min, tensor_max, tensor_avg, tensor_zero_fraction, tensor_shape, tensor],
message=(tag + ' Min, max, mean, sparsity, shape, value:'),
summarize=n_print)
return tensor
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def min(x, axis=None, keepdims=False):
'''Minimum value in a tensor.
'''
axis = _normalize_axis(axis, ndim(x))
return tf.reduce_min(x, reduction_indices=axis, keep_dims=keepdims)
def summarize(self, sess):
mean_info = sess.run([tf.sqrt(tf.nn.l2_loss(self.mean)), tf.reduce_min(self.mean), tf.reduce_max(self.mean)])
var_info = sess.run([tf.sqrt(tf.nn.l2_loss(self.var)), tf.reduce_min(self.var), tf.reduce_max(self.var)])
prec_info = sess.run([tf.reduce_min(self.prec), tf.reduce_max(self.prec)])
return ("l2(%.2e), prec(%.2e, %.2e) std(%.2e, %.2e)" % (mean_info[0], prec_info[0], prec_info[1], np.sqrt(var_info[1]), np.sqrt(var_info[2])) )
def summarize(self, sess):
mean_info = sess.run([tf.sqrt(tf.nn.l2_loss(self.mean)), tf.reduce_min(self.mean), tf.reduce_max(self.mean)])
var_info = sess.run([tf.sqrt(tf.nn.l2_loss(self.var)), tf.reduce_min(self.var), tf.reduce_max(self.var)])
prec_info = sess.run([self.prec.min(), self.prec.max()])
return ("l2(%.2e), prec(%.2e, %.2e) std(%.2e, %.2e)" % (mean_info[0], prec_info[0], prec_info[1], np.sqrt(var_info[1]), np.sqrt(var_info[2])) )
def batch_accuracy(a, b):
"Each point of a is measured against the closest point on b. Distance differences are added together."
tiled_a = a
tiled_a = tf.reshape(tiled_a, [int(tiled_a.get_shape()[0]), 1, int(tiled_a.get_shape()[1])])
tiled_a = tf.tile(tiled_a, [1, int(tiled_a.get_shape()[0]), 1])
tiled_b = b
tiled_b = tf.reshape(tiled_b, [1, int(tiled_b.get_shape()[0]), int(tiled_b.get_shape()[1])])
tiled_b = tf.tile(tiled_b, [int(tiled_b.get_shape()[0]), 1, 1])
difference = tf.abs(tiled_a-tiled_b)
difference = tf.reduce_min(difference, axis=1)
difference = tf.reduce_sum(difference, axis=1)
return tf.reduce_sum(difference, axis=0)
def accuracy(a, b):
"Each point of a is measured against the closest point on b. Distance differences are added together."
difference = tf.abs(a-b)
difference = tf.reduce_min(difference, axis=1)
difference = tf.reduce_sum(difference, axis=1)
return tf.reduce_sum( tf.reduce_sum(difference, axis=0) , axis=0)
def detectMinVal(input_mat, var, threshold=1e-6, name='', debug=False):
eigen_min = tf.reduce_min(input_mat)
eigen_max = tf.reduce_max(input_mat)
eigen_ratio = eigen_max / eigen_min
input_mat_clipped = clipoutNeg(input_mat, threshold)
if debug:
input_mat_clipped = tf.cond(tf.logical_or(tf.greater(eigen_ratio, 0.), tf.less(eigen_ratio, -500)), lambda: input_mat_clipped, lambda: tf.Print(
input_mat_clipped, [tf.convert_to_tensor('screwed ratio ' + name + ' eigen values!!!'), tf.convert_to_tensor(var.name), eigen_min, eigen_max, eigen_ratio]))
return input_mat_clipped
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev/' + name, stddev)
tf.summary.scalar('max/' + name, tf.reduce_max(var))
tf.summary.scalar('min/' + name, tf.reduce_min(var))
tf.summary.histogram(name, var)