def csv_inputs_test(self, csv_file_path):
filename_queue = tf.train.string_input_producer([csv_file_path], shuffle=False)
reader = tf.TextLineReader()
_, serialized_example = reader.read(filename_queue)
filename, depth_filename, depthMeters_filename = tf.decode_csv(serialized_example, [["path"], ["annotation"], ["meters"]])
# input
rgb_png = tf.read_file(filename)
image = tf.image.decode_png(rgb_png, channels=3)
image = tf.cast(image, tf.float32)
# target
depth_png = tf.read_file(depth_filename)
depth = tf.image.decode_png(depth_png, channels=1)
depth = tf.cast(depth, tf.float32)
depth = tf.div(depth, [255.0])
# resize
image = tf.image.resize_images(image, (IMAGE_HEIGHT, IMAGE_WIDTH))
depth = tf.image.resize_images(depth, (TARGET_HEIGHT, TARGET_WIDTH))
invalid_depth = tf.sign(depth)
# generate batch
images, depths, invalid_depths, filenames, depth_filenames = tf.train.batch(
[image, depth, invalid_depth, filename, depth_filename],
batch_size=self.batch_size,
num_threads=4,
capacity= 50 + 3 * self.batch_size,
)
return images, depths, invalid_depths, filenames, depth_filenames
评论列表
文章目录