def _add_cross_entropy(labels, logits, pref):
"""Compute average cross entropy and add to loss collection.
Args:
labels: Single dimension labels from distorted_inputs() or inputs().
logits: Output map from inference().
pref: Either 'c' or 's', for contours or segments, respectively.
"""
with tf.variable_scope('{}_cross_entropy'.format(pref)) as scope:
class_prop = C_CLASS_PROP if pref == 'c' else S_CLASS_PROP
weight_per_label = tf.scalar_mul(class_prop, tf.cast(tf.equal(labels, 0),
tf.float32)) + \
tf.scalar_mul(1.0 - class_prop, tf.cast(tf.equal(labels, 1),
tf.float32))
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
labels=tf.squeeze(labels, squeeze_dims=[3]), logits=logits)
cross_entropy_weighted = tf.multiply(weight_per_label, cross_entropy)
cross_entropy_mean = tf.reduce_mean(cross_entropy_weighted, name=scope.name)
tf.add_to_collection('losses', cross_entropy_mean)
python类scalar_mul()的实例源码
def clip_norm(g, c, n):
if c > 0:
if K.backend() == 'tensorflow':
import tensorflow as tf
import copy
condition = n >= c
then_expression = tf.scalar_mul(c / n, g)
else_expression = g
if hasattr(then_expression, 'get_shape'):
g_shape = copy.copy(then_expression.get_shape())
elif hasattr(then_expression, 'dense_shape'):
g_shape = copy.copy(then_expression.dense_shape)
if condition.dtype != tf.bool:
condition = tf.cast(condition, 'bool')
g = K.tensorflow_backend.control_flow_ops.cond(
condition, lambda: then_expression, lambda: else_expression)
if hasattr(then_expression, 'get_shape'):
g.set_shape(g_shape)
elif hasattr(then_expression, 'dense_shape'):
g._dense_shape = g_shape
else:
g = K.switch(n >= c, g * c / n, g)
return g
def build_graph(self, q_network, config):
self.ph_reward = tf.placeholder(tf.float32, [None])
self.ph_action = tf.placeholder(tf.int32, [None])
self.ph_terminal = tf.placeholder(tf.int32, [None])
self.ph_q_next_target = tf.placeholder(tf.float32, [None, config.output.action_size])
self.ph_q_next = tf.placeholder(tf.float32, [None, config.output.action_size])
action_one_hot = tf.one_hot(self.ph_action, config.output.action_size)
q_action = tf.reduce_sum(tf.multiply(q_network.node, action_one_hot), axis=1)
if config.double_dqn:
q_max = tf.reduce_sum(self.ph_q_next_target * tf.one_hot(tf.argmax(self.ph_q_next, axis=1),
config.output.action_size), axis=1)
else:
q_max = tf.reduce_max(self.ph_q_next_target, axis=1)
y = self.ph_reward + tf.cast(1 - self.ph_terminal, tf.float32) * tf.scalar_mul(config.rewards_gamma, q_max)
return tf.losses.absolute_difference(q_action, y)
test_boundary_optimization.py 文件源码
项目:CElegansBehaviour
作者: ChristophKirst
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def create_cost_soft_min_distance(self, c, s):
"""Creates a soft-min distance of the centers to the points"""
c_shape = c.get_shape().as_list();
s_shape = s.get_shape().as_list();
#expand matrices
cc = tf.reshape(c, [c_shape[0], c_shape[1], 1]);
ss = tf.reshape(s, [s_shape[0], s_shape[1], 1]);
ss = tf.transpose(ss, perm = [0,2,1]);
cc = tf.tile(cc, [1, 1, s_shape[0]]);
ss = tf.tile(ss, [c_shape[0], 1, 1]);
#pairwise distances
dist2 = tf.sqrt(tf.reduce_sum(tf.squared_difference(cc,ss), reduction_indices = 1));
dist2 = tf.reduce_mean(dist2, reduction_indices=0); # hack: get rid of batches here
#softmin
return tf.reduce_sum(tf.mul(tf.nn.softmax(tf.scalar_mul(tf.constant(-1.0,"float32"), dist2)), dist2),reduction_indices = 0);
def create_cost_soft_min_distance(c, s, k = 2.0):
"""Creates a soft-min distance of the centers to the points"""
c_shape = c.get_shape().as_list();
s_shape = s.get_shape().as_list();
#expand matrices
cc = tf.reshape(c, [c_shape[0], c_shape[1], 1]);
ss = tf.reshape(s, [s_shape[0], s_shape[1], 1]);
ss = tf.transpose(ss, perm = [2,1,0]);
cc = tf.tile(cc, [1, 1, s_shape[0]]);
ss = tf.tile(ss, [c_shape[0], 1, 1]);
#cc = tf.transpose(cc, perm = [2,1,0]);
#cc = tf.tile(cc, [s_shape[0], 1, 1]);
#ss = tf.tile(ss, [1, 1, c_shape[0]]);
#pairwise distances
dist2 = tf.sqrt(tf.reduce_sum(tf.squared_difference(cc,ss), reduction_indices = 1));
#softmin
softmin = tf.reduce_sum(tf.mul(tf.nn.softmax(tf.scalar_mul(tf.constant(-k,"float32"), dist2)), dist2),reduction_indices = 1);
return tf.reduce_mean(softmin);
def create_cost_soft_min_aligned_distance(x,y,nx,ny, k = 2.0, gamma = 1.0):
d = create_pair_wise_distances(x, y);
a = create_pair_wise_dots(nx, ny);
a = tf.scalar_mul(-0.5, tf.add(a, -1.0)); # [0,1] 0 = aligned
return tf.reduce_mean(create_aligned_distance(d, a, k = k, gamma = gamma));
#def create_cost_spacing(c, length, normalized = True):
# c_shape = c.get_shape().as_list();
# c1 = tf.slice(c, [1,0], [-1,-1]);
# c2 = tf.slice(c, [0,0], [c_shape[0]-1,-1]);
# d = tf.sqrt(tf.reduce_sum(tf.squared_difference(c1,c2), reduction_indices = 1));
# if normalized:
# return tf.reduce_mean(tf.squared_difference(d, tf.constant(length / (c_shape[0]-1), "float32")));
# else:
# return tf.reduce_mean(tf.squared_difference(d, tf.constant(length, "float32")));
def create_cost_bending(tn):
tn_shape = tn.get_shape().as_list();
tn1 = tf.slice(tn, [1,0], [-1,-1]);
tn2 = tf.slice(tn, [0,0], [tn_shape[0]-1,-1]);
dp = tf.reduce_sum(tf.mul(tn1, tn2), reduction_indices = 1);
return tf.scalar_mul(-1.0, tf.reduce_mean(dp));
#def create_cost_side(s, b, length = 1.0, weight_spacing = 1.0, weight_bending = 1.0):
# cost = create_cost_soft_min_distance(s, b);
# if weight_spacing != 0:
# cost_spacing = create_cost_spacing(s, length);
# cost = tf.add(cost, tf.mul(tf.constant(weight_spacing, "float32"), cost_spacing));
# if weight_bending != 0:
# cost_bending = create_cost_bending(s);
# cost = tf.add(cost, tf.mul(tf.constant(weight_bending, "float32"), cost_bending));
# return cost;
def create_cost_soft_min_aligned_distance(x,y,nx,ny, k = 2.0, gamma = 1.0):
d = create_pair_wise_distances(x, y);
a = create_pair_wise_dots(nx, ny);
a = tf.scalar_mul(-0.5, tf.add(a, -1.0)); # [0,1] 0 = aligned
return tf.reduce_mean(create_aligned_distance(d, a, k = k, gamma = gamma));
#def create_cost_spacing(c, length, normalized = True):
# c_shape = c.get_shape().as_list();
# c1 = tf.slice(c, [1,0], [-1,-1]);
# c2 = tf.slice(c, [0,0], [c_shape[0]-1,-1]);
# d = tf.sqrt(tf.reduce_sum(tf.squared_difference(c1,c2), reduction_indices = 1));
# if normalized:
# return tf.reduce_mean(tf.squared_difference(d, tf.constant(length / (c_shape[0]-1), "float32")));
# else:
# return tf.reduce_mean(tf.squared_difference(d, tf.constant(length, "float32")));
def create_cost_soft_min_distance(self, c, s):
"""Creates a soft-min distance of the centers to the points"""
c_shape = c.get_shape().as_list();
s_shape = s.get_shape().as_list();
#expand matrices
cc = tf.reshape(c, [c_shape[0], c_shape[1], c_shape[2], 1]);
ss = tf.reshape(s, [s_shape[0], s_shape[1], s_shape[2], 1]);
ss = tf.transpose(ss, perm = [0,3,2,1]);
cc = tf.tile(cc, [1, 1, 1, s_shape[0]]);
ss = tf.tile(ss, [1, c_shape[0], 1, 1]);
#pairwise distances
dist2 = tf.sqrt(tf.reduce_sum(tf.squared_difference(cc,ss), reduction_indices = 2));
dist2 = tf.reduce_mean(dist2, reduction_indices=0); # hack: get rid of batches here
#softmin
distmin = tf.reduce_sum(tf.mul(tf.nn.softmax(tf.scalar_mul(tf.constant(-1.0,"float32"), dist2)), dist2),reduction_indices = 1);
return tf.reduce_mean(distmin);
def create_cost_soft_min_distance_valid(self, c, s, v):
"""Creates a soft-min distance of the centers to the points"""
c_shape = c.get_shape().as_list();
s_shape = s.get_shape().as_list();
#expand matrices
cc = tf.reshape(c, [c_shape[0], c_shape[1], c_shape[2], 1]);
mm = tf.reduce_max(v); #hack for batch size = 1
ss = tf.slice(s, [0,0,0], [-1,mm,-1]);
ss = tf.reshape(ss, [s_shape[0], s_shape[1], s_shape[2], 1]);
ss = tf.transpose(ss, perm = [0,3,2,1]);
cc = tf.tile(cc, [1, 1, 1, s_shape[0]]);
ss = tf.tile(ss, [1, c_shape[0], 1, 1]);
#pairwise distances
dist2 = tf.sqrt(tf.reduce_sum(tf.squared_difference(cc,ss), reduction_indices = 2));
dist2 = tf.reduce_mean(dist2, reduction_indices=0); # hack: get rid of batches here
#softmin
distmin = tf.reduce_sum(tf.mul(tf.nn.softmax(tf.scalar_mul(tf.constant(-1.0,"float32"), dist2)), dist2),reduction_indices = 1);
return tf.reduce_mean(distmin);
def create_cost_soft_min_distance(self, c, s):
"""Creates a soft-min distance of the centers to the points"""
c_shape = c.get_shape().as_list();
s_shape = s.get_shape().as_list();
#expand matrices
cc = tf.reshape(c, [c_shape[0], c_shape[1], c_shape[2], 1]);
ss = tf.reshape(s, [s_shape[0], s_shape[1], s_shape[2], 1]);
ss = tf.transpose(ss, perm = [0,3,2,1]);
cc = tf.tile(cc, [1, 1, 1, s_shape[0]]);
ss = tf.tile(ss, [1, c_shape[0], 1, 1]);
#pairwise distances
dist2 = tf.sqrt(tf.reduce_sum(tf.squared_difference(cc,ss), reduction_indices = 2));
dist2 = tf.reduce_mean(dist2, reduction_indices=0); # hack: get rid of batches here
#softmin
distmin = tf.reduce_sum(tf.mul(tf.nn.softmax(tf.scalar_mul(tf.constant(-1.0,"float32"), dist2)), dist2),reduction_indices = 1);
return tf.reduce_mean(distmin);
def get_masks(origin_images, height, width, channels=3):
"""add horizon color lines and set empty"""
quarty = tf.random_uniform([height/4, 1])
prop = tf.scalar_mul(tf.convert_to_tensor(0.2), tf.ones([height/4, 1]))
quarty = tf.round(tf.add(quarty, prop))
y = tf.reshape(tf.stack([quarty, quarty, quarty, quarty], axis=1), [height, 1])
mask = tf.matmul(y, tf.ones([1, width]))
masks = tf.expand_dims(mask, 0)
masks = tf.expand_dims(masks, -1)
maskedimages = tf.mul(origin_images, masks)
"""add noise"""
scale = tf.random_uniform([channels, height, 1])
y = tf.subtract(tf.ones([height, 1]), y)
y = tf.expand_dims(y, 0)
y = tf.scalar_mul(tf.convert_to_tensor(255.), tf.multiply(scale, y))
noise = tf.add(mask, tf.matmul(y, tf.ones([channels, 1, width])))
noise = tf.pack(tf.split(value=noise, num_or_size_splits=noise.get_shape()[0], axis=0), axis=3)
maskedimages = tf.add(maskedimages, noise)
return maskedimages
def __init__(self, dim_in, dim_hidden, learning_rate, gate=Relu(),
initializer=tf.random_normal_initializer(), optimizer=None, name='BP'):
dim_out = 1
if optimizer is None:
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
self.x = tf.placeholder(tf.float32, shape=(None, dim_in))
self.target = tf.placeholder(tf.float32, shape=(None, dim_out))
U, __, ___, phi = \
fully_connected(name, 'fully_connected_layer1', self.x, dim_in, dim_hidden, initializer, gate.gate_fun)
W, __, ___, y = \
fully_connected(name, 'fully_connected_layer2', phi, dim_hidden, dim_out, initializer, tf.identity)
self.loss = tf.scalar_mul(0.5, tf.reduce_mean(tf.squared_difference(y, self.target)))
self.all_gradients = optimizer.compute_gradients(self.loss)
self.train_op = optimizer.apply_gradients(self.all_gradients)
self.outgoing_weight = W
self.feature_matrix = U
def calculate_loss(self, predictions, labels, b=1.0, **unused_params):
with tf.name_scope("loss_hinge"):
float_labels = tf.cast(labels, tf.float32)
all_zeros = tf.zeros(tf.shape(float_labels), dtype=tf.float32)
all_ones = tf.ones(tf.shape(float_labels), dtype=tf.float32)
sign_labels = tf.subtract(tf.scalar_mul(2, float_labels), all_ones)
hinge_loss = tf.maximum(
all_zeros, tf.scalar_mul(b, all_ones) - sign_labels * predictions)
return tf.reduce_mean(tf.reduce_sum(hinge_loss, 1))
def calculate_loss(self, predictions, labels, b=1.0, **unused_params):
with tf.name_scope("loss_hinge"):
float_labels = tf.cast(labels, tf.float32)
all_zeros = tf.zeros(tf.shape(float_labels), dtype=tf.float32)
all_ones = tf.ones(tf.shape(float_labels), dtype=tf.float32)
sign_labels = tf.subtract(tf.scalar_mul(2, float_labels), all_ones)
hinge_loss = tf.maximum(
all_zeros, tf.scalar_mul(b, all_ones) - sign_labels * predictions)
return tf.reduce_mean(tf.reduce_sum(hinge_loss, 1))
def calculate_loss(self, predictions, labels, b=1.0, **unused_params):
with tf.name_scope("loss_hinge"):
float_labels = tf.cast(labels, tf.float32)
all_zeros = tf.zeros(tf.shape(float_labels), dtype=tf.float32)
all_ones = tf.ones(tf.shape(float_labels), dtype=tf.float32)
sign_labels = tf.subtract(tf.scalar_mul(2, float_labels), all_ones)
hinge_loss = tf.maximum(
all_zeros, tf.scalar_mul(b, all_ones) - sign_labels * predictions)
return tf.reduce_mean(tf.reduce_sum(hinge_loss, 1))
def calculate_loss(self, predictions, labels, b=1.0, **unused_params):
with tf.name_scope("loss_hinge"):
float_labels = tf.cast(labels, tf.float32)
all_zeros = tf.zeros(tf.shape(float_labels), dtype=tf.float32)
all_ones = tf.ones(tf.shape(float_labels), dtype=tf.float32)
sign_labels = tf.subtract(tf.scalar_mul(2, float_labels), all_ones)
hinge_loss = tf.maximum(
all_zeros, tf.scalar_mul(b, all_ones) - sign_labels * predictions)
return tf.reduce_mean(tf.reduce_sum(hinge_loss, 1))
def calculate_loss(self, predictions, labels, b=1.0, **unused_params):
with tf.name_scope("loss_hinge"):
float_labels = tf.cast(labels, tf.float32)
all_zeros = tf.zeros(tf.shape(float_labels), dtype=tf.float32)
all_ones = tf.ones(tf.shape(float_labels), dtype=tf.float32)
sign_labels = tf.subtract(tf.scalar_mul(2, float_labels), all_ones)
hinge_loss = tf.maximum(
all_zeros, tf.scalar_mul(b, all_ones) - sign_labels * predictions)
return tf.reduce_mean(tf.reduce_sum(hinge_loss, 1))
def calculate_loss(self, predictions, labels, b=1.0, **unused_params):
with tf.name_scope("loss_hinge"):
float_labels = tf.cast(labels, tf.float32)
all_zeros = tf.zeros(tf.shape(float_labels), dtype=tf.float32)
all_ones = tf.ones(tf.shape(float_labels), dtype=tf.float32)
sign_labels = tf.subtract(tf.scalar_mul(2, float_labels), all_ones)
hinge_loss = tf.maximum(
all_zeros, tf.scalar_mul(b, all_ones) - sign_labels * predictions)
return tf.reduce_mean(tf.reduce_sum(hinge_loss, 1))
def calculate_loss(self, predictions, labels, b=1.0, **unused_params):
with tf.name_scope("loss_hinge"):
float_labels = tf.cast(labels, tf.float32)
all_zeros = tf.zeros(tf.shape(float_labels), dtype=tf.float32)
all_ones = tf.ones(tf.shape(float_labels), dtype=tf.float32)
sign_labels = tf.subtract(tf.scalar_mul(2, float_labels), all_ones)
hinge_loss = tf.maximum(
all_zeros, tf.scalar_mul(b, all_ones) - sign_labels * predictions)
return tf.reduce_mean(tf.reduce_sum(hinge_loss, 1))
def calculate_loss(self, predictions, labels, b=1.0, **unused_params):
with tf.name_scope("loss_hinge"):
float_labels = tf.cast(labels, tf.float32)
all_zeros = tf.zeros(tf.shape(float_labels), dtype=tf.float32)
all_ones = tf.ones(tf.shape(float_labels), dtype=tf.float32)
sign_labels = tf.subtract(tf.scalar_mul(2, float_labels), all_ones)
hinge_loss = tf.maximum(
all_zeros, tf.scalar_mul(b, all_ones) - sign_labels * predictions)
return tf.reduce_mean(tf.reduce_sum(hinge_loss, 1))
def calculate_loss(self, predictions, labels, b=1.0, **unused_params):
with tf.name_scope("loss_hinge"):
float_labels = tf.cast(labels, tf.float32)
all_zeros = tf.zeros(tf.shape(float_labels), dtype=tf.float32)
all_ones = tf.ones(tf.shape(float_labels), dtype=tf.float32)
sign_labels = tf.subtract(tf.scalar_mul(2, float_labels), all_ones)
hinge_loss = tf.maximum(
all_zeros, tf.scalar_mul(b, all_ones) - sign_labels * predictions)
return tf.reduce_mean(tf.reduce_sum(hinge_loss, 1))
def dsc_loss(scores, labels):
scores = tf.sigmoid(scores)
inter = tf.scalar_mul(2., tf.reduce_sum(tf.multiply(scores, labels), [1, 2, 3]))
union = tf.add(tf.reduce_sum(scores, [1, 2, 3]), tf.reduce_sum(labels, [1, 2, 3]))
dsc_loss = tf.reduce_mean(tf.sub(1., tf.div(inter, union)))
return dsc_loss
def calculate_loss(self, predictions, labels, b=1.0, **unused_params):
with tf.name_scope("loss_hinge"):
float_labels = tf.cast(labels, tf.float32)
all_zeros = tf.zeros(tf.shape(float_labels), dtype=tf.float32)
all_ones = tf.ones(tf.shape(float_labels), dtype=tf.float32)
sign_labels = tf.subtract(tf.scalar_mul(2, float_labels), all_ones)
hinge_loss = tf.maximum(
all_zeros, tf.scalar_mul(b, all_ones) - sign_labels * predictions)
return tf.reduce_mean(tf.reduce_sum(hinge_loss, 1))
def create_left_right(c, w):
#tanget vectors
c_shape = c.get_shape().as_list();
c1 = tf.slice(c, [1,0], [-1,-1]);
c2 = tf.slice(c, [0,0], [c_shape[0]-1,-1]);
dc = tf.sub(c1,c2);
#normalized tangent vectors
nr = tf.sqrt(tf.reduce_sum(tf.square(dc), reduction_indices = 1));
dcn = tf.transpose(tf.div(tf.transpose(dc), nr));
#average tangent
dc1 = tf.concat(0, [tf.slice(dcn, [0,0], [1,-1]), dcn]);
dc2 = tf.concat(0, [dcn, tf.slice(dcn, [c_shape[0]-2,0], [1,-1])]);
av = tf.scalar_mul(0.5, tf.add(dc1,dc2));
nr = tf.sqrt(tf.reduce_sum(tf.square(av), reduction_indices = 1));
av = tf.transpose(tf.div(tf.transpose(av), nr));
#normal
nrm = tf.mul(av, tf.constant([-1,1], "float32"));
nrm = tf.reverse(nrm, [False, True]);
left = tf.transpose(tf.mul(tf.transpose(nrm), w));
right= tf.scalar_mul(-1.0, left);
left = tf.add(left, c);
right= tf.add(right,c);
return left,right
def create_left_right(c, w, nrm):
left = tf.transpose(tf.mul(tf.transpose(nrm), w));
right= tf.scalar_mul(-1.0, left);
left = tf.add(left, c);
right= tf.add(right,c);
return left,right
#def create_dot(v1, v2):
# return tf.reduce_sum(tf.mul(v1, v2), reduction_indices = 1);
def create_soft_min(d, k = 2.0):
softmin = tf.reduce_sum(tf.mul(tf.nn.softmax(tf.scalar_mul(-k, d)), d),reduction_indices = 1);
return softmin;
def create_cost(c, w, b, nb, length, weight_spacing = 1.0, weight_bending = 1.0, gamma = 1.0, kappa = 2.0):
#tangents
t = create_tangent(c);
tn = create_normalize_tangent(t);
nl = create_normal(tn);
nr = tf.scalar_mul(-1.0, nl);
l,r = create_left_right(c,w,nl);
cost_left = create_cost_soft_min_aligned_distance(l, b, nl, nb, k = kappa, gamma = gamma);
cost_right= create_cost_soft_min_aligned_distance(r, b, nr, nb, k = kappa, gamma = gamma);
cost = tf.add(cost_left, cost_right);
#spacing and bending
if weight_spacing != 0:
cost_spacing = tf.scalar_mul(weight_spacing, create_cost_spacing(t, length));
cost = tf.add(cost, cost_spacing);
else:
cost_spacing = tf.constant(0);
if weight_bending != 0:
cost_bending = tf.scalar_mul(weight_bending, create_cost_bending(tn));
cost = tf.add(cost, cost_bending);
else:
cost_bending = tf.constant(0);
return (cost, cost_left, cost_right, cost_spacing, cost_bending, nl, l, r);
def create_left_right(c, w, nrm):
left = tf.transpose(tf.mul(tf.transpose(nrm), w));
right= tf.scalar_mul(-1.0, left);
left = tf.add(left, c);
right= tf.add(right,c);
return left,right
def create_soft_min(d, k = 2.0):
softmin = tf.reduce_sum(tf.mul(tf.nn.softmax(tf.scalar_mul(-k, d)), d),reduction_indices = 1);
return softmin;