def get_image(filepath, height, width, preprocess_fn, queue=None):
png = filepath.lower().endswith('png')
if queue is None:
img_bytes = tf.read_file(filepath)
else:
reader = tf.WholeFileReader()
_, img_bytes = reader.read(queue)
image = tf.image.decode_png(img_bytes, channels=3) if png else tf.image.decode_jpeg(img_bytes, channels=3)
return preprocess_fn(image, height, width)
python类WholeFileReader()的实例源码
def load_target_image():
"""
"""
file_names = tf.train.string_input_producer([FLAGS.target_image_path])
_, image = tf.WholeFileReader().read(file_names)
# Decode byte data, no gif please.
# NOTE: tf.image.decode_image can decode both jpeg and png. However, the
# shape (height and width) is unknown.
image = tf.image.decode_png(image, channels=3)
image = tf.cast(image, tf.float32)
image = tf.image.resize_images(image, [FLAGS.image_size, FLAGS.image_size])
image = tf.reshape(image, [1, FLAGS.image_size, FLAGS.image_size, 3])
image = image / 127.5 - 1.0
with tf.Session() as session:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
image = session.run(image)
coord.request_stop()
coord.join(threads)
return tf.constant(image, name='target_image')
def build_dataset_reader():
"""
"""
paths_png_wildcards = os.path.join(FLAGS.portraits_dir_path, '*.png')
paths_png = glob.glob(paths_png_wildcards)
file_name_queue = tf.train.string_input_producer(paths_png)
reader = tf.WholeFileReader()
reader_key, reader_val = reader.read(file_name_queue)
image = tf.image.decode_png(reader_val, channels=3, dtype=tf.uint8)
# assume the size of input images are either 128x128x3 or 64x64x3.
if FLAGS.crop_image:
image = tf.image.crop_to_bounding_box(
image,
FLAGS.crop_image_offset_y,
FLAGS.crop_image_offset_x,
FLAGS.crop_image_size_m,
FLAGS.crop_image_size_m)
image = tf.random_crop(
image, size=[FLAGS.crop_image_size_n, FLAGS.crop_image_size_n, 3])
image = tf.image.resize_images(image, [FLAGS.image_size, FLAGS.image_size])
image = tf.image.random_flip_left_right(image)
image = tf.cast(image, dtype=tf.float32) / 127.5 - 1.0
return tf.train.batch(
tensors=[image],
batch_size=FLAGS.batch_size,
capacity=FLAGS.batch_size)
def readfile(filename):
try:
reader = tf.WholeFileReader()
key,value = reader.read(filename)
image = tf.image.decode_jpeg(value, channels=3)
image = tf.image.resize_images(image, 224, 224)
float_image = tf.div(tf.cast(image,tf.float32), 255)
return float_image
except:
print -1
return readfile(filename)
def input_pipeline(filenames, batch_size, num_epochs=None, image_size=142, crop_size=256):
with tf.device('/cpu:0'):
filenames = tf.train.match_filenames_once(filenames)
filename_queue = tf.train.string_input_producer(filenames, num_epochs=num_epochs, shuffle=True)
reader = tf.WholeFileReader()
filename, value = reader.read(filename_queue)
image = tf.image.decode_jpeg(value, channels=3)
processed = tf.image.resize_images(
image,
[image_size, image_size],
tf.image.ResizeMethod.BILINEAR)
processed = tf.image.random_flip_left_right(processed)
processed = tf.random_crop(processed, [crop_size, crop_size, 3])
# CHANGE TO 'CHW' DATA_FORMAT FOR FASTER GPU PROCESSING
processed = tf.transpose(processed, [2, 0, 1])
processed = (tf.cast(processed, tf.float32) - 128.0) / 128.0
images = tf.train.batch(
[processed],
batch_size=batch_size,
num_threads=NUM_THREADS,
capacity=batch_size * 5)
return images
def input_pipeline(filenames, batch_size, num_epochs=None, image_size=142, crop_size=256):
with tf.device('/cpu:0'):
filenames = tf.train.match_filenames_once(filenames)
filename_queue = tf.train.string_input_producer(filenames, num_epochs=num_epochs, shuffle=True)
reader = tf.WholeFileReader()
filename, value = reader.read(filename_queue)
image = tf.image.decode_jpeg(value, channels=3)
processed = tf.image.resize_images(
image,
[image_size, image_size],
tf.image.ResizeMethod.BILINEAR )
processed = tf.image.random_flip_left_right(processed)
processed = tf.random_crop(processed, [crop_size, crop_size, 3] )
# CHANGE TO 'CHW' DATA_FORMAT FOR FASTER GPU PROCESSING
processed = tf.transpose(processed, [2, 0, 1])
processed = (tf.cast(processed, tf.float32) - 128.0) / 128.0
images = tf.train.batch(
[processed],
batch_size = batch_size,
num_threads = NUM_THREADS,
capacity=batch_size * 5)
return images
def disk_image_batch(image_paths, batch_size, shape, preprocess_fn=None, shuffle=True, num_threads=16,
min_after_dequeue=100, allow_smaller_final_batch=False, scope=None):
"""
This function is suitable for bmp, jpg, png and gif files
image_paths: string list or 1-D tensor, each of which is an iamge path
preprocess_fn: single image preprocessing function
"""
with tf.name_scope(scope, 'disk_image_batch'):
data_num = len(image_paths)
# dequeue a single image path and read the image bytes; enqueue the whole file list
_, img = tf.WholeFileReader().read(tf.train.string_input_producer(image_paths, shuffle=shuffle, capacity=data_num))
img = tf.image.decode_image(img)
# preprocessing
img.set_shape(shape)
if preprocess_fn is not None:
img = preprocess_fn(img)
# batch datas
if shuffle:
capacity = min_after_dequeue + (num_threads + 1) * batch_size
img_batch = tf.train.shuffle_batch([img],
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
allow_smaller_final_batch=allow_smaller_final_batch)
else:
img_batch = tf.train.batch([img],
batch_size=batch_size,
allow_smaller_final_batch=allow_smaller_final_batch)
return img_batch, data_num
def reader(self):
"""Return a reader for a single entry from the data set.
See io_ops.py for details of Reader class.
Returns:
Reader object that reads the data set.
"""
return tf.WholeFileReader()
def _file_reader(self, filename_queue):
# read file from queue
reader = tf.WholeFileReader()
_, img_bytes = reader.read(filename_queue)
# decode it
image_data = tf.image.decode_jpeg(img_bytes, channels=3)
# preprocess it and return
return preprocess(image_data, self.config)
def get_image_batch(pattern,batch_size,image_size=143,crop_size=128,train=True) :
if (train) :
random_flip = lambda x : tf.image.random_flip_left_right(x)
crop = lambda x : tf.random_crop(x,[crop_size,crop_size,3])
queue = lambda : tf.train.string_input_producer(tf.train.match_filenames_once(pattern),
num_epochs=None, shuffle=True)
batch = lambda f,x: tf.train.shuffle_batch([f,x],
batch_size=batch_size,
num_threads=NUM_THREADS,
capacity=batch_size*5,
min_after_dequeue=batch_size*3)
else :
random_flip = lambda x : tf.identity(x)
crop = lambda x : tf.image.resize_image_with_crop_or_pad(image,crop_size,crop_size)
queue = lambda : tf.train.string_input_producer(tf.train.match_filenames_once(pattern),
num_epochs=1, shuffle=False)
batch = lambda f,x: tf.train.batch([f,x],
batch_size=batch_size,
num_threads=NUM_THREADS,
allow_smaller_final_batch=False)
def _preprocess(image) :
image = random_flip(image)
image = crop(image)
image = tf.transpose(image,[2,0,1]) #change to CHW format
image = (tf.cast(image,tf.float32) - 128.0)/128.0 #push in to [-1 to 1] area.
return image
with tf.device('/cpu:0'):
filename_queue = queue()
image_reader = tf.WholeFileReader()
filename, image_file = image_reader.read(filename_queue)
image = tf.image.decode_jpeg(image_file,3)
resized = tf.image.resize_images(image,[image_size,image_size],tf.image.ResizeMethod.BILINEAR)
preprocessed = _preprocess(resized)
filenames, images = batch(filename,preprocessed)
return filenames, images
def read_image(filename_queue, shuffle):
image_reader = tf.WholeFileReader()
path, image_file = image_reader.read(filename_queue)
# Preprocessing
image = tf.image.decode_jpeg(image_file, 3)
if shuffle:
# image = tf.image.random_contrast(image, lower=0.8, upper=1.2)
if image.get_shape()[0] > IMAGE_SIZE['cropped'][0] and image.get_shape()[1] > IMAGE_SIZE['cropped'][1]:
image = tf.random_crop(image, IMAGE_SIZE['cropped'])
# image = tf.image.per_image_whitening(image)
image = tf.image.resize_images(image, IMAGE_SIZE['resized'])
image = image * (1. / 255) - 0.5
return [image, path]
image_resizing.py 文件源码
项目:Handwritten_recognition_tensorflow
作者: sanjanaramprasad
项目源码
文件源码
阅读 44
收藏 0
点赞 0
评论 0
def read_image(filename_queue):
reader = tf.WholeFileReader()
key,value = reader.read(filename_queue)
image = tf.image.decode_png(value)
return image
def load_image(path):
"""
"""
file_names = tf.train.string_input_producer([path])
_, image = tf.WholeFileReader().read(file_names)
# Decode byte data, no gif please.
# NOTE: tf.image.decode_image can decode both jpeg and png. However, the
# shape (height and width) is unknown.
image = tf.image.decode_jpeg(image, channels=3)
image = tf.cast(image, tf.float32)
shape = tf.shape(image)[:2]
image = tf.image.resize_images(image, [256, 256])
image = tf.reshape(image, [1, 256, 256, 3])
# for VggNet, subtract the mean color of it's training data.
# image = tf.subtract(image, VggNet.mean_color_rgb())
image = tf.cast(image, dtype=tf.float32) / 127.5 - 1.0
# R/G/B to B/G/R
image = tf.reverse(image, [3])
padding = [FLAGS.padding, FLAGS.padding]
image = tf.pad(
tensor=image,
paddings=[[0, 0], padding, padding, [0, 0]],
mode='symmetric')
with tf.Session() as session:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
image, shape = session.run([image, shape])
coord.request_stop()
coord.join(threads)
return image, shape
def load_image(path):
"""
"""
file_names = tf.train.string_input_producer([path])
_, image = tf.WholeFileReader().read(file_names)
# Decode byte data, no gif please.
# NOTE: tf.image.decode_image can decode both jpeg and png. However, the
# shape (height and width) is unknown.
image = tf.image.decode_jpeg(image, channels=3)
image = tf.cast(image, tf.float32)
shape = tf.shape(image)[:2]
image = tf.image.resize_images(image, [224, 224])
image = tf.reshape(image, [1, 224, 224, 3])
# for VggNet, subtract the mean color of it's training data.
image = tf.subtract(image, VggNet.mean_color_rgb())
# R/G/B to B/G/R
image = tf.reverse(image, [3])
with tf.Session() as session:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
image, shape = session.run([image, shape])
coord.request_stop()
coord.join(threads)
return image, shape
def process_data(sess, filenames):
"""
This script gen the input images(downsample) and labels(origin images)
"""
images_size = FLAGS.input_image_size
reader = tf.WholeFileReader()
filename_queue = tf.train.string_input_producer(filenames)
_, value = reader.read(filename_queue)
channels = FLAGS.image_channels
image = tf.image.decode_jpeg(
value, channels=channels, name="dataset_image")
# add data augmentation here
image.set_shape([None, None, channels])
image = tf.reshape(image, [1, images_size, images_size, 3])
image = tf.cast(image, tf.float32) / 255.0
K = FLAGS.scale
downsampled = tf.image.resize_area(
image, [images_size // K, images_size // K])
upsampled = tf.image.resize_area(downsampled, [images_size, images_size])
feature = tf.reshape(upsampled, [images_size, images_size, 3])
label = tf.reshape(image, [images_size, images_size, 3])
features, labels = tf.train.shuffle_batch(
[feature, label], batch_size=FLAGS.batch_size, num_threads=4, capacity=5000, min_after_dequeue=1000, name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
print 'tag31', features.eval(), labels.get_shape()
return features, labels
train_multigpu.py 文件源码
项目:DAVIS-2016-Chanllege-Solution
作者: tangyuhao
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def single_JPEGimage_reader(filename_queue):
image_reader = tf.WholeFileReader()
_, image_file = image_reader.read(filename_queue)
image = (tf.to_float(tf.image.decode_jpeg(image_file, channels=3)))
image = tf.image.resize_images(image,[HEIGHT,WIDTH],method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return image
train_multigpu.py 文件源码
项目:DAVIS-2016-Chanllege-Solution
作者: tangyuhao
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def single_PNGimage_reader(filename_queue):
image_reader = tf.WholeFileReader()
_, image_file = image_reader.read(filename_queue)
image = tf.to_float(tf.image.decode_png(image_file, channels=1))
image = tf.image.resize_images(image,[HEIGHT,WIDTH],method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# pixel distribution ground truth
return image
def single_JPEGimage_reader(filename_queue):
image_reader = tf.WholeFileReader()
_, image_file = image_reader.read(filename_queue)
image = (tf.to_float(tf.image.decode_jpeg(image_file, channels=3)))
image = tf.image.resize_images(image,[HEIGHT,WIDTH],method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return image
def single_PNGimage_reader(filename_queue):
image_reader = tf.WholeFileReader()
_, image_file = image_reader.read(filename_queue)
image = tf.to_float(tf.image.decode_png(image_file, channels=1))
image = tf.image.resize_images(image,[HEIGHT,WIDTH],method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# pixel distribution ground truth
return image
def single_JPEGimage_reader(filename_queue):
image_reader = tf.WholeFileReader()
_, image_file = image_reader.read(filename_queue)
image = (tf.to_float(tf.image.decode_jpeg(image_file, channels=3)))
image = tf.image.resize_images(image,[HEIGHT,WIDTH],method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return image