def __call__(X):
""" Predict the output from prev and scale the result on [-1, 1]
Use sigmoid activation
Args:
X (tf.Tensor): the input
Return:
tf.Ops: the activate_and_scale operator
"""
# TODO: Use tanh instead ? tanh=2*sigm(2*x)-1
with tf.name_scope('activate_and_scale'):
return tf.sub(tf.mul(2.0, tf.nn.sigmoid(X)), 1.0) # x_{i} = 2*sigmoid(y_{i-1}) - 1
python类sub()的实例源码
def stddev(x):
x = tf.to_float(x)
return tf.sqrt(tf.reduce_mean(tf.square(tf.abs
(tf.sub(x, tf.fill(x.get_shape(), tf.reduce_mean(x)))))))
def preprocess_for_eval(image, height, width,
central_fraction=0.875, scope=None):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would cropt the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details)
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(scope, 'eval_image', [image, height, width]):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
if central_fraction:
image = tf.image.central_crop(image, central_fraction=central_fraction)
if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
image = tf.sub(image, 0.5)
image = tf.mul(image, 2.0)
return image
def main(_):
test_img_list = load_data(dir_test)
mean_var = np.load('log/log_mycnn/mean_var_out.npz')
x1 = tf.placeholder(tf.float32, [None, 128, 128, 2]) # data
x2 = tf.placeholder(tf.float32, [None, 8]) # label
x4 = tf.placeholder(tf.float32, []) # dropout
net = Mycnn(x1, x4, bn_in=mean_var.f.arr_0)
fc2 = net.out
loss = tf.reduce_sum(tf.square(tf.sub(fc2, x2))) / 2 / batch_size
# gpu configuration
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
# gpu_opinions = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
saver = tf.train.Saver(max_to_keep=None)
with tf.Session(config=tf_config) as sess:
saver.restore(sess, dir_load)
test_model = DataSet(test_img_list)
loss_total = []
for i in range(iter_max):
x_batch_test, y_batch_test, h1_test, img1, img2 = test_model.next_batch()
np.savetxt(((dir_save + '/h1_%d.txt') % i), h1_test)
np.savetxt(((dir_save + '/label_%d.txt') % i), y_batch_test)
cv2.imwrite(((dir_save + '/image_%d_1.jpg') % i), img1)
cv2.imwrite(((dir_save + '/image_%d_2.jpg') % i), img2)
pre, average_loss = sess.run([fc2, loss], feed_dict={x1: x_batch_test, x2: y_batch_test, x4: 1.0})
np.savetxt(((dir_save + '/predict_%d.txt') % i), pre)
loss_total.append(average_loss)
print ('iter %05d, test loss = %.5f' % ((i+1), average_loss))
np.savetxt((dir_save + '/loss.txt'), loss_total)
def __pool(self):
# Pooling
with tf.name_scope('pooling') as _:
self.pooled_prob = tf.sub(1., tf.div(1., 1. + tf.mul(self.__avg_pool(tf.exp(self.signal)), self.pool_size * self.pool_size)), name='pooled_prob')
self.pooled = self.__sample(self.pooled_prob, 'pooled')
def __gradient_ascent(self):
# Gradient ascent
with tf.name_scope('gradient') as _:
self.grad_bias = tf.mul(tf.reduce_mean(self.hid_prob0 - self.hid_prob1, [0, 1, 2]),
self.learning_rate * self.batch_size, name='grad_bias')
self.grad_cias = tf.mul(tf.reduce_mean(self.vis_0 - self.vis_1, [0, 1, 2]),
self.learning_rate * self.batch_size, name='grad_cias')
# TODO: Is there any method to calculate batch-elementwise convolution?
temp_grad_weights = tf.zeros(self.weight_shape)
hid_filter0 = tf.reverse(self.hid_prob0, [False, True, True, False])
hid_filter1 = tf.reverse(self.hid_prob1, [False, True, True, False])
for idx in range(0, self.batch_size):
hid0_ith = self.__get_ith_hid_4d(hid_filter0, idx)
hid1_ith = self.__get_ith_hid_4d(hid_filter1, idx)
positive = [0] * self.depth
negative = [0] * self.depth
one_ch_conv_shape = [self.width, self.height, 1, self.num_features]
for jdx in range(0, self.depth):
positive[jdx] = tf.reshape(self.__conv2d(self.__get_ij_vis_4d(self.vis_0, idx, jdx), hid0_ith),
one_ch_conv_shape)
negative[jdx] = tf.reshape(self.__conv2d(self.__get_ij_vis_4d(self.vis_1, idx, jdx), hid1_ith),
one_ch_conv_shape)
positive = tf.concat(2, positive)
negative = tf.concat(2, negative)
temp_grad_weights = tf.add(temp_grad_weights,
tf.slice(tf.sub(positive, negative), [0, 0, 0, 0], self.weight_shape))
self.grad_weights = tf.mul(temp_grad_weights, self.learning_rate / (self.width * self.height))
self.gradient_ascent = [self.weights.assign_add(self.grad_weights),
self.bias.assign_add(self.grad_bias),
self.cias.assign_add(self.grad_cias)]
def image_preprocessing(image_buffer, bbox, train, thread_id=0):
"""Decode and preprocess one image for evaluation or training.
Args:
image_buffer: JPEG encoded string Tensor
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
train: boolean
thread_id: integer indicating preprocessing thread
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if bbox is None:
raise ValueError('Please supply a bounding box.')
image = decode_jpeg(image_buffer)
height = FLAGS.input_size
width = FLAGS.input_size
if train:
image = distort_image(image, height, width, bbox, thread_id)
else:
image = eval_image(image, height, width)
# Finally, rescale to [-1,1] instead of [0, 1)
image = tf.sub(image, 0.5)
image = tf.mul(image, 2.0)
return image
operations.py 文件源码
项目:TensorFlow-Machine-Learning-Cookbook
作者: PacktPublishing
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def custom_polynomial(x_val):
# Return 3x^2 - x + 10
return(tf.sub(3 * tf.square(x_val), x_val) + 10)
single_File_For_ColorizationModel_For_Not_OOP_Fan.py 文件源码
项目:Deep-learning-Colorization-for-visual-media
作者: OmarSayedMostafa
项目源码
文件源码
阅读 32
收藏 0
点赞 0
评论 0
def Train():
global AbColores_values
global CurrentBatch_indx
global GreyImages_Batch
global EpochsNum
global ExamplesNum
global Batch_size
Input_images = tf.placeholder(dtype=tf.float32,shape=[None,224,224,1],name="X_inputs")
Ab_Labels_tensor = tf.placeholder(dtype=tf.float32,shape=[None,224,224,2],name="Labels_inputs")
Prediction = TriainModel(Input_images)
Colorization_MSE = tf.reduce_mean((Frobenius_Norm(tf.sub(Prediction,Ab_Labels_tensor))))
Optmizer = tf.train.AdamOptimizer().minimize(Colorization_MSE)
#sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver = tf.train.import_meta_graph('Model Directory/our_model.meta')
saver.restore(sess, 'Model Directory/our_model')
PrevLoss = 0
for epoch in range(EpochsNum):
epoch_loss = 0
CurrentBatch_indx = 1
for i in range(int(ExamplesNum / Batch_size)):#Over batches
print("Batch Num ",i + 1)
ReadNextBatch()
a, c = sess.run([Optmizer,Colorization_MSE],feed_dict={Input_images:GreyImages_Batch,Ab_Labels_tensor:AbColores_values})
epoch_loss += c
print("epoch: ",epoch + 1, ",Loss: ",epoch_loss,", Diff:",PrevLoss - epoch_loss)
PrevLoss = epoch_loss
saver.save(sess, 'Model Directory/our_model',write_meta_graph=False)
def dsc_loss(scores, labels):
scores = tf.sigmoid(scores)
inter = tf.scalar_mul(2., tf.reduce_sum(tf.multiply(scores, labels), [1, 2, 3]))
union = tf.add(tf.reduce_sum(scores, [1, 2, 3]), tf.reduce_sum(labels, [1, 2, 3]))
dsc_loss = tf.reduce_mean(tf.sub(1., tf.div(inter, union)))
return dsc_loss
def iou_loss(scores, labels):
scores = tf.sigmoid(scores)
inter = tf.reduce_sum(tf.multiply(scores, labels), [1, 2, 3])
union = tf.add(tf.reduce_sum(scores, [1, 2, 3]), tf.reduce_sum(labels, [1, 2, 3]))
union = tf.sub(union, inter)
iou_loss = tf.reduce_mean(tf.sub(1., tf.div(inter, union)))
return iou_loss
def tf_model_eval_distance(sess, x, model1, model2, X_test):
"""
Compute the L1 distance between prediction of original and squeezed data.
:param sess: TF session to use when training the graph
:param x: input placeholder
:param model1: model output original predictions
:param model2: model output squeezed predictions
:param X_test: numpy array with training inputs
:return: a float vector with the distance value
"""
# Define sympbolic for accuracy
# acc_value = keras.metrics.categorical_accuracy(y, model)
l2_diff = tf.sqrt( tf.reduce_sum(tf.square(tf.sub(model1, model2)),
axis=1))
l_inf_diff = tf.reduce_max(tf.abs(tf.sub(model1, model2)), axis=1)
l1_diff = tf.reduce_sum(tf.abs(tf.sub(model1, model2)), axis=1)
l1_dist_vec = np.zeros((len(X_test)))
with sess.as_default():
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_test)) / FLAGS.batch_size))
assert nb_batches * FLAGS.batch_size >= len(X_test)
for batch in range(nb_batches):
if batch % 100 == 0 and batch > 0:
print("Batch " + str(batch))
# Must not use the `batch_indices` function here, because it
# repeats some examples.
# It's acceptable to repeat during training, but not eval.
start = batch * FLAGS.batch_size
end = min(len(X_test), start + FLAGS.batch_size)
cur_batch_size = end - start
l1_dist_vec[start:end] = l1_diff.eval(feed_dict={x: X_test[start:end],keras.backend.learning_phase(): 0})
assert end >= len(X_test)
return l1_dist_vec
def spatial_batch_norm(input_layer, name='spatial_batch_norm'):
"""
Batch-normalizes the layer as in http://arxiv.org/abs/1502.03167
This is important since it allows the different scales to talk to each other when they get joined.
"""
mean, variance = tf.nn.moments(input_layer, [0, 1, 2])
variance_epsilon = 0.01 # TODO: Check what this value should be
inv = tf.rsqrt(variance + variance_epsilon)
num_channels = input_layer.get_shape().as_list()[3] # TODO: Clean this up
scale = tf.Variable(tf.random_uniform([num_channels]), name='scale') # TODO: How should these initialize?
offset = tf.Variable(tf.random_uniform([num_channels]), name='offset')
return_val = tf.sub(tf.mul(tf.mul(scale, inv), tf.sub(input_layer, mean)), offset, name=name)
return return_val
def content_loss(self, layers):
activations = [self.activations_for_layer(i) for i in layers]
activation_diffs = [
tf.sub(
tf.tile(tf.slice(a, [self.num_style, 0, 0, 0], [self.num_content, -1, -1, -1]), [self.num_synthesized - self.num_content + 1, 1, 1, 1]),
tf.slice(a, [self.num_style + self.num_content, 0, 0, 0], [self.num_content, -1, -1, -1]))
for a in activations]
# This normalizer is in JCJohnson's paper, but not Gatys' I think?
Ns = [a.get_shape().as_list()[1] * a.get_shape().as_list()[2] * a.get_shape().as_list()[3] for a in activations]
content_loss = tf.div(tf.add_n([tf.div(tf.reduce_sum(tf.square(a)), n) for a, n in zip(activation_diffs, Ns)]), 2.0)
return content_loss
def preprocess_for_eval(image, height, width,
central_fraction=0.875, scope=None):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would cropt the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details)
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(scope, 'eval_image', [image, height, width]):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
if central_fraction:
image = tf.image.central_crop(image, central_fraction=central_fraction)
if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
image = tf.sub(image, 0.5)
image = tf.mul(image, 2.0)
return image
def tf_normal(y, mu, sigma):
oneDivSqrtTwoPI = 1 / math.sqrt(2*math.pi)
result = tf.sub(y, mu)
result = tf.transpose(result, [2,1,0])
result = tf.mul(result,tf.inv(sigma + 1e-8))
result = -tf.square(result)/2
result = tf.mul(tf.exp(result),tf.inv(sigma + 1e-8))*oneDivSqrtTwoPI
result = tf.reduce_prod(result, reduction_indices=[0])
return result
def test_logging_trainable(self):
with tf.Graph().as_default() as g, self.test_session(g):
var = tf.Variable(tf.constant(42.0), name='foo')
var.initializer.run()
cof = tf.constant(1.0)
loss = tf.sub(tf.mul(var, cof), tf.constant(1.0))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
tf.get_default_session().run(train_step)
self._run_monitor(learn.monitors.LoggingTrainable('foo'))
self.assertRegexpMatches(str(self.logged_message), var.name)
def test_logging_trainable(self):
with tf.Graph().as_default() as g, self.test_session(g):
var = tf.Variable(tf.constant(42.0), name='foo')
var.initializer.run()
cof = tf.constant(1.0)
loss = tf.sub(tf.mul(var, cof), tf.constant(1.0))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
tf.get_default_session().run(train_step)
self._run_monitor(learn.monitors.LoggingTrainable('foo'))
self.assertRegexpMatches(str(self.logged_message), var.name)
def setUp(self):
super(CoreBinaryOpsTest, self).setUp()
self.x_probs_broadcast_tensor = tf.reshape(
self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size])
self.channel_probs_broadcast_tensor = tf.reshape(
self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size])
# == and != are not element-wise for tf.Tensor, so they shouldn't be
# elementwise for LabeledTensor, either.
self.ops = [
('add', operator.add, tf.add, core.add),
('sub', operator.sub, tf.sub, core.sub),
('mul', operator.mul, tf.mul, core.mul),
('div', operator.truediv, tf.div, core.div),
('mod', operator.mod, tf.mod, core.mod),
('pow', operator.pow, tf.pow, core.pow_function),
('equal', None, tf.equal, core.equal),
('less', operator.lt, tf.less, core.less),
('less_equal', operator.le, tf.less_equal, core.less_equal),
('not_equal', None, tf.not_equal, core.not_equal),
('greater', operator.gt, tf.greater, core.greater),
('greater_equal', operator.ge, tf.greater_equal, core.greater_equal),
]
self.test_lt_1 = self.x_probs_lt
self.test_lt_2 = self.channel_probs_lt
self.test_lt_1_broadcast = self.x_probs_broadcast_tensor
self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor
self.broadcast_axes = [self.a0, self.a1, self.a3]
def image_preprocessing(image_buffer, bbox, train, thread_id=0):
"""Decode and preprocess one image for evaluation or training.
Args:
image_buffer: JPEG encoded string Tensor
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
train: boolean
thread_id: integer indicating preprocessing thread
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if bbox is None:
raise ValueError('Please supply a bounding box.')
image = decode_jpeg(image_buffer)
height = FLAGS.input_size
width = FLAGS.input_size
if train:
image = distort_image(image, height, width, bbox, thread_id)
else:
image = eval_image(image, height, width)
# Finally, rescale to [-1,1] instead of [0, 1)
image = tf.sub(image, 0.5)
image = tf.mul(image, 2.0)
return image