def imagenet_preprocess_example(example, mode, resize_size=None):
"""Preprocessing used for Imagenet and similar problems."""
if resize_size is None:
resize_size = [299, 299]
def preprocess(img):
img = tf.image.resize_images(img, [360, 360])
img = common_layers.image_augmentation(
tf.to_float(img) / 255., crop_size=resize_size)
return tf.to_int64(img * 255.)
def resize(img):
return tf.to_int64(tf.image.resize_images(img, resize_size))
inputs = tf.cast(example["inputs"], tf.int64)
if mode == tf.estimator.ModeKeys.TRAIN:
example["inputs"] = tf.cond( # Preprocess 90% of the time.
tf.less(tf.random_uniform([]), 0.9),
lambda img=inputs: preprocess(img),
lambda img=inputs: resize(img))
else:
example["inputs"] = resize(inputs)
return example
python类less()的实例源码
def normal_ccdf(x, mu, sigma2):
"""Normal CCDF"""
# Check for degenerate distributions when sigma2 == 0
# if x >= mu, n = 0
# if x < mu, n = 1
# sigma2_le_0 = tf.less_equal(sigma2, 0.)
# x_gte_mu = tf.greater_equal(x, mu)
# x_lt_mu = tf.less(x, mu)
# Never divide by zero, instead the logic below handles degenerate distribution cases
# sigma2 = tf.cond(sigma2_le_0, lambda: tf.ones_like(sigma2), lambda: sigma2)
p = (1. - 0.5 * (1. + tf.erf((x - mu) / tf.sqrt(2. * sigma2))))
# p = tf.cond(tf.logical_and(sigma2_le_0, x_gte_mu), lambda: tf.zeros_like(p), lambda: p)
# p = tf.cond(tf.logical_and(sigma2_le_0, x_lt_mu), lambda: tf.ones_like(p), lambda: p)
return p
def bboxes_filter_center(labels, bboxes, margins=[0., 0., 0., 0.],
scope=None):
"""Filter out bounding boxes whose center are not in
the rectangle [0, 0, 1, 1] + margins. The margin Tensor
can be used to enforce or loosen this condition.
Return:
labels, bboxes: Filtered elements.
"""
with tf.name_scope(scope, 'bboxes_filter', [labels, bboxes]):
cy = (bboxes[:, 0] + bboxes[:, 2]) / 2.
cx = (bboxes[:, 1] + bboxes[:, 3]) / 2.
mask = tf.greater(cy, margins[0])
mask = tf.logical_and(mask, tf.greater(cx, margins[1]))
mask = tf.logical_and(mask, tf.less(cx, 1. + margins[2]))
mask = tf.logical_and(mask, tf.less(cx, 1. + margins[3]))
# Boolean masking...
labels = tf.boolean_mask(labels, mask)
bboxes = tf.boolean_mask(bboxes, mask)
return labels, bboxes
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the (encoded) predicted locations of objects.
target_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the regression targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a (scalar) tensor representing the value of the loss function
"""
diff = prediction_tensor - target_tensor
abs_diff = tf.abs(diff)
abs_diff_lt_1 = tf.less(abs_diff, 1)
anchorwise_smooth_l1norm = tf.reduce_sum(
tf.where(abs_diff_lt_1, 0.5 * tf.square(abs_diff), abs_diff - 0.5),
2) * weights
if self._anchorwise_output:
return anchorwise_smooth_l1norm
return tf.reduce_sum(anchorwise_smooth_l1norm)
image_reader_segment.py 文件源码
项目:dcsp_segmentation
作者: arslan-chaudhry
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def image_mirroring(img, label, seed):
"""
Randomly mirrors the images.
Args:
img: Training image to mirror.
label: Segmentation mask to mirror.
seed: Random seed.
"""
distort_left_right_random = tf.random_uniform([1], 0, 1.0, dtype=tf.float32, seed=seed)[0]
mirror = tf.less(tf.stack([1.0, distort_left_right_random, 1.0]), 0.5)
mirror = tf.boolean_mask([0, 1, 2], mirror)
img = tf.reverse(img, mirror)
label = tf.reverse(label, mirror)
return img, label
def _smooth_l1_loss(self, bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, sigma=1.0, dim=[1]):
sigma_2 = sigma ** 2
box_diff = bbox_pred - bbox_targets
in_box_diff = bbox_inside_weights * box_diff
abs_in_box_diff = tf.abs(in_box_diff)
smoothL1_sign = tf.stop_gradient(tf.to_float(tf.less(abs_in_box_diff, 1. / sigma_2)))
in_loss_box = tf.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign \
+ (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)
out_loss_box = bbox_outside_weights * in_loss_box
loss_box = tf.reduce_mean(tf.reduce_sum(
out_loss_box,
axis=dim
))
return loss_box
def lesser(x, y):
'''Element-wise truth value of (x < y).
Returns a bool tensor.
'''
return tf.less(x, y)
def detectMinVal(input_mat, var, threshold=1e-6, name='', debug=False):
eigen_min = tf.reduce_min(input_mat)
eigen_max = tf.reduce_max(input_mat)
eigen_ratio = eigen_max / eigen_min
input_mat_clipped = clipoutNeg(input_mat, threshold)
if debug:
input_mat_clipped = tf.cond(tf.logical_or(tf.greater(eigen_ratio, 0.), tf.less(eigen_ratio, -500)), lambda: input_mat_clipped, lambda: tf.Print(
input_mat_clipped, [tf.convert_to_tensor('screwed ratio ' + name + ' eigen values!!!'), tf.convert_to_tensor(var.name), eigen_min, eigen_max, eigen_ratio]))
return input_mat_clipped
def _init_step_size(self, q, p, mass, get_gradient, get_log_posterior):
factor = 1.5
def loop_cond(step_size, last_acceptance_rate, cond):
return cond
def loop_body(step_size, last_acceptance_rate, cond):
# Calculate acceptance_rate
new_q, new_p = leapfrog_integrator(
q, p, tf.constant(0.0), step_size / 2,
get_gradient, mass)
new_q, new_p = leapfrog_integrator(
new_q, new_p, step_size, step_size / 2,
get_gradient, mass)
__, _, _, _, acceptance_rate = get_acceptance_rate(
q, p, new_q, new_p,
get_log_posterior, mass, self.data_axes)
acceptance_rate = tf.reduce_mean(acceptance_rate)
# Change step size and stopping criteria
new_step_size = tf.cond(
tf.less(acceptance_rate,
self.target_acceptance_rate),
lambda: step_size * (1.0 / factor),
lambda: step_size * factor)
cond = tf.logical_not(tf.logical_xor(
tf.less(last_acceptance_rate, self.target_acceptance_rate),
tf.less(acceptance_rate, self.target_acceptance_rate)))
return [new_step_size, acceptance_rate, cond]
new_step_size, _, _ = tf.while_loop(
loop_cond,
loop_body,
[self.step_size, tf.constant(1.0), tf.constant(True)]
)
return new_step_size
def __lt__(self, other):
return tf.less(self, other)
def _sample(self, n_samples):
p = tf.sigmoid(self.logits)
shape = tf.concat([[n_samples], self.batch_shape], 0)
alpha = tf.random_uniform(
shape, minval=0, maxval=1, dtype=self.param_dtype)
samples = tf.cast(tf.less(alpha, p), dtype=self.dtype)
static_n_samples = n_samples if isinstance(n_samples, int) else None
samples.set_shape(
tf.TensorShape([static_n_samples]).concatenate(
self.get_batch_shape()))
return samples
def smooth_l1_loss(bbox_prediction, bbox_target, sigma=1.0):
"""
Return Smooth L1 Loss for bounding box prediction.
Args:
bbox_prediction: shape (1, H, W, num_anchors * 4)
bbox_target: shape (1, H, W, num_anchors * 4)
Smooth L1 loss is defined as:
0.5 * x^2 if |x| < d
abs(x) - 0.5 if |x| >= d
Where d = 1 and x = prediction - target
"""
sigma2 = sigma ** 2
diff = bbox_prediction - bbox_target
abs_diff = tf.abs(diff)
abs_diff_lt_sigma2 = tf.less(abs_diff, 1.0 / sigma2)
bbox_loss = tf.reduce_sum(
tf.where(
abs_diff_lt_sigma2, 0.5 * tf.square(abs_diff),
abs_diff - 0.5
), [1]
)
return bbox_loss
def huber_loss(labels, predictions, delta=1.0):
''' Huber loss: L2 befor delta, L1 after delta '''
residual = tf.abs(predictions - labels)
condition = tf.less(residual, delta)
small_res = 0.5 * tf.square(residual)
large_res = delta * residual - 0.5 * tf.square(delta)
return tf.where(condition, small_res, large_res)
def cout_zeros():
zeros_num = 0.
all_num = 0.
for v in tf.trainable_variables():
zeros_num += tf.reduce_sum(tf.to_float(tf.less(tf.abs(v), tf.ones_like(v) * 0.0001))) # ??0???
all_num += tf.reduce_sum(tf.ones_like(v))
return [zeros_num, all_num]
#????
def exceedingAngleThreshold(pred, gt, ss, threshold, outputChannels=2):
with tf.name_scope("angular_error"):
pred = tf.reshape(pred, (-1, outputChannels))
gt = tf.to_float(tf.reshape(gt, (-1, outputChannels)))
ss = tf.to_float(tf.reshape(ss, (-1, 1)))
pred = tf.nn.l2_normalize(pred, 1) * 0.999999
gt = tf.nn.l2_normalize(gt, 1) * 0.999999
errorAngles = tf.acos(tf.reduce_sum(pred * gt, reduction_indices=[1], keep_dims=True)) * ss
exceedCount = tf.reduce_sum(tf.to_float(tf.less(threshold/180*3.14159, errorAngles)))
return exceedCount
def testCreatePhasesWithLoop(self):
# Test a preprocessing function with control flow.
#
# The loop represents
#
# i = 0
# while i < 10:
# i += 1
# x += 1
#
# To get an error in the case where apply_function is not called, we have
# to call an analyzer first (see testCreatePhasesWithUnwrappedLoop). So
# we also do so here.
def preprocessing_fn(inputs):
def _subtract_ten(x):
i = tf.constant(0)
c = lambda i, x: tf.less(i, 10)
b = lambda i, x: (tf.add(i, 1), tf.add(x, -1))
return tf.while_loop(c, b, [i, x])[1]
scaled_to_0_1 = mappers.scale_to_0_1(
api.apply_function(_subtract_ten, inputs['x']))
return {'x_scaled': scaled_to_0_1}
input_schema = sch.Schema({
'x': sch.ColumnSchema(tf.int32, [], sch.FixedColumnRepresentation())
})
graph, _, _ = impl_helper.run_preprocessing_fn(
preprocessing_fn, input_schema)
phases = impl_helper.create_phases(graph)
self.assertEqual(len(phases), 1)
self.assertEqual(len(phases[0].analyzers), 2)
def testCreatePhasesWithUnwrappedLoop(self):
# Test a preprocessing function with control flow.
#
# The loop represents
#
# i = 0
# while i < 10:
# i += 1
# x += 1
#
# We need to call an analyzer after the loop because only the transitive
# parents of analyzers are inspected by create_phases
def preprocessing_fn(inputs):
def _subtract_ten(x):
i = tf.constant(0)
c = lambda i, x: tf.less(i, 10)
b = lambda i, x: (tf.add(i, 1), tf.add(x, -1))
return tf.while_loop(c, b, [i, x])[1]
scaled_to_0_1 = mappers.scale_to_0_1(_subtract_ten(inputs['x']))
return {'x_scaled': scaled_to_0_1}
input_schema = sch.Schema({
'x': sch.ColumnSchema(tf.int32, [], sch.FixedColumnRepresentation())
})
graph, _, _ = impl_helper.run_preprocessing_fn(
preprocessing_fn, input_schema)
with self.assertRaisesRegexp(ValueError, 'Cycle detected'):
_ = impl_helper.create_phases(graph)
def sequential_for(fn, begin, end):
def _cond(i):
return tf.less(i, end)
def _body(i):
ops = fn(i)
with tf.control_dependencies(ops):
return i + 1
return tf.while_loop(_cond, _body, [begin])
def smooth_l1(x):
l2 = 0.5 * (x**2.0)
l1 = tf.abs(x) - 0.5
condition = tf.less(tf.abs(x), 1.0)
re = tf.where(condition, l2, l1)
return re
def lt(self, x, y):
return tf.less(x, y)