def _deepfool2(model, x, epochs, eta, clip_min, clip_max, min_prob):
y0 = tf.stop_gradient(tf.reshape(model(x), [-1])[0])
y0 = tf.to_int32(tf.greater(y0, 0.5))
def _cond(i, z):
xadv = tf.clip_by_value(x + z*(1+eta), clip_min, clip_max)
y = tf.stop_gradient(tf.reshape(model(xadv), [-1])[0])
y = tf.to_int32(tf.greater(y, 0.5))
return tf.logical_and(tf.less(i, epochs), tf.equal(y0, y))
def _body(i, z):
xadv = tf.clip_by_value(x + z*(1+eta), clip_min, clip_max)
y = tf.reshape(model(xadv), [-1])[0]
g = tf.gradients(y, xadv)[0]
dx = - y * g / tf.norm(g)
return i+1, z+dx
_, noise = tf.while_loop(_cond, _body, [0, tf.zeros_like(x)],
name='_deepfool2_impl', back_prop=False)
return noise
python类clip_by_value()的实例源码
def dice_accuracy(decoded_predictions, annotations, class_nums):
DiceRatio = tf.constant(0,tf.float32)
misclassnum = tf.constant(0,tf.float32)
class_num = tf.constant(class_nums,tf.float32)
sublist = []
for index in range(1,class_nums-2):
current_annotation = tf.cast(tf.equal(tf.ones_like(annotations)*index,\
annotations),tf.float32)
cureent_prediction = tf.cast(tf.equal(tf.ones_like(decoded_predictions)*index,\
decoded_predictions),tf.float32)
Overlap = tf.add(current_annotation,cureent_prediction)
Common = tf.reduce_sum(tf.cast(tf.equal(tf.ones_like(Overlap)*2,Overlap),\
tf.float32),[0,1,2,3])
annotation_num = tf.reduce_sum(current_annotation,[0,1,2,3])
predict_num = tf.reduce_sum(cureent_prediction,[0,1,2,3])
all_num = tf.add(annotation_num,predict_num)
Sub_DiceRatio = Common*2/tf.clip_by_value(all_num, 1e-10, 1e+10)
misclassnum = tf.cond(tf.equal(Sub_DiceRatio,0.0), lambda: misclassnum + 1, lambda: misclassnum)
sublist.append(Sub_DiceRatio)
DiceRatio = DiceRatio + Sub_DiceRatio
DiceRatio = DiceRatio/tf.clip_by_value(tf.cast((class_num-misclassnum-3),tf.float32),1e-10,1e+1000)
return DiceRatio, sublist
Dense_Transformer_Network.py 文件源码
项目:3D_Dense_Transformer_Networks
作者: JohnYC1995
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def _meshgrid(self):
with tf.variable_scope('_meshgrid'):
x_t = tf.matmul(tf.ones(shape=tf.stack([self.out_height, 1])),
tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, self.out_width), 1), [1, 0]))
y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, self.out_height), 1),
tf.ones(shape=tf.stack([1, self.out_width])))
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
px,py = tf.stack([x_t_flat],axis=2),tf.stack([y_t_flat],axis=2)
#source control points
x,y = tf.linspace(-1.,1.,self.Column_controlP_number),tf.linspace(-1.,1.,self.Row_controlP_number)
x,y = tf.meshgrid(x,y)
xs,ys = tf.transpose(tf.reshape(x,(-1,1))),tf.transpose(tf.reshape(y,(-1,1)))
cpx,cpy = tf.transpose(tf.stack([xs],axis=2),perm=[1,0,2]),tf.transpose(tf.stack([ys],axis=2),perm=[1,0,2])
px, cpx = tf.meshgrid(px,cpx);py, cpy = tf.meshgrid(py,cpy)
#Compute distance R
Rx,Ry = tf.square(tf.subtract(px,cpx)),tf.square(tf.subtract(py,cpy))
R = tf.add(Rx,Ry)
R = tf.multiply(R,tf.log(tf.clip_by_value(R,1e-10,1e+10)))
#Source coordinates
ones = tf.ones_like(x_t_flat)
grid = tf.concat([ones, x_t_flat, y_t_flat,R],0)
grid = tf.reshape(grid,[-1])
grid = tf.reshape(grid,[self.Column_controlP_number*self.Row_controlP_number+3,self.out_height*self.out_width])
return grid
def Train(self,
loss,
learning_rate,
clip_value_min,
clip_value_max,
name='training'):
tf.scalar_summary(':'.join([name, loss.op.name]), loss)
optimizer = tf.train.AdagradOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(loss)
clipped_grads_and_vars = [
(tf.clip_by_value(g, clip_value_min, clip_value_max), v)
for g, v in grads_and_vars
]
for g, v in clipped_grads_and_vars:
_ = tf.histogram_summary(':'.join([name, v.name]), v)
_ = tf.histogram_summary('%s: gradient for %s' % (name, v.name), g)
train_op = optimizer.apply_gradients(clipped_grads_and_vars)
return train_op
def augment_image_pair(self, left_image, right_image):
# randomly shift gamma
random_gamma = tf.random_uniform([], 0.8, 1.2)
left_image_aug = left_image ** random_gamma
right_image_aug = right_image ** random_gamma
# randomly shift brightness
random_brightness = tf.random_uniform([], 0.5, 2.0)
left_image_aug = left_image_aug * random_brightness
right_image_aug = right_image_aug * random_brightness
# randomly shift color
random_colors = tf.random_uniform([3], 0.8, 1.2)
white = tf.ones([tf.shape(left_image)[0], tf.shape(left_image)[1]])
color_image = tf.stack([white * random_colors[i] for i in range(3)], axis=2)
left_image_aug *= color_image
right_image_aug *= color_image
# saturate
left_image_aug = tf.clip_by_value(left_image_aug, 0, 1)
right_image_aug = tf.clip_by_value(right_image_aug, 0, 1)
return left_image_aug, right_image_aug
def SSIM(self, x, y):
C1 = 0.01 ** 2
C2 = 0.03 ** 2
mu_x = slim.avg_pool2d(x, 3, 1, 'VALID')
mu_y = slim.avg_pool2d(y, 3, 1, 'VALID')
sigma_x = slim.avg_pool2d(x ** 2, 3, 1, 'VALID') - mu_x ** 2
sigma_y = slim.avg_pool2d(y ** 2, 3, 1, 'VALID') - mu_y ** 2
sigma_xy = slim.avg_pool2d(x * y , 3, 1, 'VALID') - mu_x * mu_y
SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2)
SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2)
SSIM = SSIM_n / SSIM_d
return tf.clip_by_value((1 - SSIM) / 2, 0, 1)
def clip_gradients_by_stddev(grads_and_vars, clip_factor = 2.5):
""" Clip gradients to [-clip_factor*stddev, clip_factor*stddev]."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients = []
for gradient in gradients:
if gradient is None:
clipped_gradients.append(None)
continue
mean_gradient = tf.reduce_mean(gradient)
stddev_gradient = tf.sqrt(tf.reduce_mean(tf.square(gradient - mean_gradient)))
#clipped_gradient = tf.clip_by_value(gradient, -clip_factor * stddev_gradient, clip_factor * stddev_gradient)
clipped_gradient = tf.cond(tf.size(gradient) < FLAGS.size_to_binarize,
lambda: gradient,
lambda: tf.clip_by_value(gradient, -clip_factor * stddev_gradient, clip_factor * stddev_gradient))
clipped_gradients.append(clipped_gradient)
return list(zip(clipped_gradients, variables))
def create_networks(self):
self.mean = tf.get_variable("means", shape=(1, int(self.state_input.get_shape()[1])),
initializer=tf.constant_initializer(0),
trainable=False)
self.std = tf.get_variable("stds", shape=(1, int(self.state_input.get_shape()[1])),
initializer=tf.constant_initializer(1),
trainable=False)
mean_ph = tf.placeholder(tf.float32, shape=self.mean.get_shape())
std_ph = tf.placeholder(tf.float32, shape=self.std.get_shape())
self.norm_set_op = [self.mean.assign(mean_ph), self.std.assign(std_ph)]
self.norm_phs = [mean_ph, std_ph]
self.good_input = tf.clip_by_value((self.state_input - self.mean) / (self.std + 1e-5), -50, 50)
self.good_next_input = tf.clip_by_value((self.next_state_input - self.mean) / (self.std + 1e-5), -50, 50)
self.atom_probs, self.weights, self.weights_phs = self.create_network("network", self.good_input)
self.target_atom_probs, self.target_weights, self.target_weights_phs = self.create_network("target",
self.good_next_input)
tensorflow_backend.py 文件源码
项目:deep-learning-keras-projects
作者: jasmeetsb
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def relu(x, alpha=0., max_value=None):
"""Rectified linear unit.
With default values, it returns element-wise `max(x, 0)`.
# Arguments
x: A tensor or variable.
alpha: A scalar, slope of negative section (default=`0.`).
max_value: Saturation threshold.
# Returns
A tensor.
"""
if alpha != 0.:
negative_part = tf.nn.relu(-x)
x = tf.nn.relu(x)
if max_value is not None:
max_value = _to_tensor(max_value, x.dtype.base_dtype)
zero = _to_tensor(0., x.dtype.base_dtype)
x = tf.clip_by_value(x, zero, max_value)
if alpha != 0.:
alpha = _to_tensor(alpha, x.dtype.base_dtype)
x -= alpha * negative_part
return x
tensorflow_backend.py 文件源码
项目:deep-learning-keras-projects
作者: jasmeetsb
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def categorical_crossentropy(output, target, from_logits=False):
"""Categorical crossentropy between an output tensor
and a target tensor, where the target is a tensor of the same
shape as the output.
"""
# Note: tf.nn.softmax_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if not from_logits:
# scale preds so that the class probas of each sample sum to 1
output /= tf.reduce_sum(output,
reduction_indices=len(output.get_shape()) - 1,
keep_dims=True)
# manual computation of crossentropy
epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)
output = tf.clip_by_value(output, epsilon, 1. - epsilon)
return - tf.reduce_sum(target * tf.log(output),
reduction_indices=len(output.get_shape()) - 1)
else:
try:
return tf.nn.softmax_cross_entropy_with_logits(labels=target,
logits=output)
except TypeError:
return tf.nn.softmax_cross_entropy_with_logits(output, target)
tensorflow_backend.py 文件源码
项目:deep-learning-keras-projects
作者: jasmeetsb
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def hard_sigmoid(x):
"""Segment-wise linear approximation of sigmoid.
Faster than sigmoid.
Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`.
In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`.
# Arguments
x: A tensor or variable.
# Returns
A tensor.
"""
x = (0.2 * x) + 0.5
zero = _to_tensor(0., x.dtype.base_dtype)
one = _to_tensor(1., x.dtype.base_dtype)
x = tf.clip_by_value(x, zero, one)
return x
def build_network_rnn(self):
self.states = tf.placeholder(tf.float32, [None] + list(self.env.observation_space.shape), name="states") # Observation
# self.n_states = tf.placeholder(tf.float32, shape=[None], name="n_states") # Observation
self.a_n = tf.placeholder(tf.float32, name="a_n") # Discrete action
self.adv_n = tf.placeholder(tf.float32, name="adv_n") # Advantage
n_states = tf.shape(self.states)[:1]
states = tf.expand_dims(flatten(self.states), [0])
enc_cell = tf.contrib.rnn.GRUCell(self.config["n_hidden_units"])
L1, _ = tf.nn.dynamic_rnn(cell=enc_cell, inputs=states,
sequence_length=n_states, dtype=tf.float32)
L1 = L1[0]
mu, sigma = mu_sigma_layer(L1, 1)
self.normal_dist = tf.contrib.distributions.Normal(mu, sigma)
self.action = self.normal_dist.sample(1)
self.action = tf.clip_by_value(self.action, self.env.action_space.low[0], self.env.action_space.high[0])
def create_dis_model(self):
# Set up discriminator model parameters
self.get_dis_params()
# Set up discriminator graph inputs
self.person_board_1 = tf.placeholder(tf.float32, [None, self.n_input])
self.person_board_2 = tf.placeholder(tf.float32, [None, self.n_input])
self.gen_board = tf.placeholder(tf.float32, [None, self.n_input])
# Get discriminator outputs
self.d_pred_real = self.d_predict(tf.concat(1, [self.person_board_1, self.person_board_2]), self.p_keep)
self.d_pred_fake = self.d_predict(tf.concat(1, [self.person_board_1, self.gen_board]), self.p_keep)
# Clamp weights
self.weight_clamps = [tf.clip_by_value(self.d_weights[layer], -0.01, 0.01) for layer in self.d_weights]
self.bias_clamps = [tf.clip_by_value(self.d_biases[layer], -0.01, 0.01) for layer in self.d_biases]
def get_train_op(loss,
var_list=None,
grad_clip=None,
learning_rate=0.001,
beta1=0.9,
beta2=0.999):
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate,
beta1=beta1,
beta2=beta2)
if grad_clip is None:
return optimizer.minimize(loss, var_list=var_list)
else:
gvs = optimizer.compute_gradients(loss, var_list=var_list)
def clip(grad):
if grad is None:
return grad
else:
return tf.clip_by_value(grad, -grad_clip, grad_clip)
capped_gvs = [(clip(grad), var) for grad, var in gvs]
return optimizer.apply_gradients(capped_gvs)
def get_train_op(loss,
var_list=None,
grad_clip=None,
learning_rate=0.001,
beta1=0.9,
beta2=0.999):
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate,
beta1=beta1,
beta2=beta2)
if grad_clip is None:
return optimizer.minimize(loss, var_list=var_list)
else:
gvs = optimizer.compute_gradients(loss, var_list=var_list)
def clip(grad):
if grad is None:
return grad
else:
return tf.clip_by_value(grad, -grad_clip, grad_clip)
capped_gvs = [(clip(grad), var) for grad, var in gvs]
return optimizer.apply_gradients(capped_gvs)
def get_train_op(loss,
var_list=None,
grad_clip=None,
learning_rate=0.001,
beta1=0.9,
beta2=0.999):
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate,
beta1=beta1,
beta2=beta2)
if grad_clip is None:
return optimizer.minimize(loss, var_list=var_list)
else:
gvs = optimizer.compute_gradients(loss, var_list=var_list)
def clip(grad):
if grad is None:
return grad
else:
return tf.clip_by_value(grad, -grad_clip, grad_clip)
capped_gvs = [(clip(grad), var) for grad, var in gvs]
return optimizer.apply_gradients(capped_gvs)
def get_train_op(loss,
var_list=None,
grad_clip=None,
learning_rate=0.001,
beta1=0.9,
beta2=0.999):
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate,
beta1=beta1,
beta2=beta2)
if grad_clip is None:
return optimizer.minimize(loss, var_list=var_list)
else:
gvs = optimizer.compute_gradients(loss, var_list=var_list)
def clip(grad):
if grad is None:
return grad
else:
return tf.clip_by_value(grad, -grad_clip, grad_clip)
capped_gvs = [(clip(grad), var) for grad, var in gvs]
return optimizer.apply_gradients(capped_gvs)
def get_train_op(loss,
var_list=None,
grad_clip=None,
learning_rate=0.001,
beta1=0.9,
beta2=0.999):
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate,
beta1=beta1,
beta2=beta2)
if grad_clip is None:
return optimizer.minimize(loss, var_list=var_list)
else:
gvs = optimizer.compute_gradients(loss, var_list=var_list)
def clip(grad):
if grad is None:
return grad
else:
return tf.clip_by_value(grad, -grad_clip, grad_clip)
capped_gvs = [(clip(grad), var) for grad, var in gvs]
return optimizer.apply_gradients(capped_gvs)
def get_train_op(loss,
var_list=None,
grad_clip=None,
learning_rate=0.001,
beta1=0.9,
beta2=0.999):
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate,
beta1=beta1,
beta2=beta2)
if grad_clip is None:
return optimizer.minimize(loss, var_list=var_list)
else:
gvs = optimizer.compute_gradients(loss, var_list=var_list)
def clip(grad):
if grad is None:
return grad
else:
return tf.clip_by_value(grad, -grad_clip, grad_clip)
capped_gvs = [(clip(grad), var) for grad, var in gvs]
return optimizer.apply_gradients(capped_gvs)
def get_train_op(loss,
var_list=None,
grad_clip=None,
learning_rate=0.001,
beta1=0.9,
beta2=0.999):
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate,
beta1=beta1,
beta2=beta2)
if grad_clip is None:
return optimizer.minimize(loss, var_list=var_list)
else:
gvs = optimizer.compute_gradients(loss, var_list=var_list)
def clip(grad):
if grad is None:
return grad
else:
return tf.clip_by_value(grad, -grad_clip, grad_clip)
capped_gvs = [(clip(grad), var) for grad, var in gvs]
return optimizer.apply_gradients(capped_gvs)