def crop_to_fixed_size(img_tensor,annotation_tensor,output_shape):
"""
the output_shape must be smaller than the input_shape
:param img_tensor: [w,h,depth]
:param annotation_tensor: [w,h,1]
:param output_shape:
:param mask_out_num:
:return: (output_shape,output_shape,3) (output_shape,output_shape,1)
"""
original_shape = tf.shape(img_tensor)
crop_width, crop_height = output_shape[0],output_shape[1]
image_width, image_height = original_shape[0],original_shape[1]
img_cropped_shape = tf.stack([output_shape[0], output_shape[1], 3])
annotate_cropped_shape = tf.stack([output_shape[0], output_shape[1], 1])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_width),
tf.greater_equal(original_shape[1], crop_height)),
['Crop size greater than the image size.'])
max_offset_height = tf.reshape(image_height - crop_height + 1, [])
max_offset_width = tf.reshape(image_width - crop_width + 1, [])
offset_height = tf.random_uniform(
[], maxval=max_offset_height, dtype=tf.int32)
offset_width = tf.random_uniform(
[], maxval=max_offset_width, dtype=tf.int32)
offsets = tf.to_int32(tf.stack([offset_width, offset_height, 0]))
annotation_tensor = tf.to_int32(annotation_tensor)
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(img_tensor, offsets, img_cropped_shape)
annotate = tf.slice(annotation_tensor,offsets,annotate_cropped_shape)
return tf.reshape(image, img_cropped_shape),tf.reshape(annotate,annotate_cropped_shape)
评论列表
文章目录