def read_audio_csv(filename_queue):
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
defaultVal = [[0.] for idx in range(WIDE*FEATURE_DIM + OUT_DIM)]
fileData = tf.decode_csv(value, record_defaults=defaultVal)
features = fileData[:WIDE*FEATURE_DIM]
features = tf.reshape(features, [WIDE, FEATURE_DIM])
labels = fileData[WIDE*FEATURE_DIM:]
return features, labels
python类decode_csv()的实例源码
def read_audio_csv(filename_queue):
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
defaultVal = [[0.] for idx in range(WIDE*FEATURE_DIM + OUT_DIM)]
fileData = tf.decode_csv(value, record_defaults=defaultVal)
features = fileData[:WIDE*FEATURE_DIM]
features = tf.reshape(features, [WIDE, FEATURE_DIM])
labels = fileData[WIDE*FEATURE_DIM:]
return features, labels
def _load_samples(csv_name, image_type):
filename_queue = tf.train.string_input_producer(
[csv_name])
reader = tf.TextLineReader()
_, csv_filename = reader.read(filename_queue)
record_defaults = [tf.constant([], dtype=tf.string),
tf.constant([], dtype=tf.string)]
filename_i, filename_j = tf.decode_csv(
csv_filename, record_defaults=record_defaults)
file_contents_i = tf.read_file(filename_i)
file_contents_j = tf.read_file(filename_j)
if image_type == '.jpg':
image_decoded_A = tf.image.decode_jpeg(
file_contents_i, channels=model.IMG_CHANNELS)
image_decoded_B = tf.image.decode_jpeg(
file_contents_j, channels=model.IMG_CHANNELS)
elif image_type == '.png':
image_decoded_A = tf.image.decode_png(
file_contents_i, channels=model.IMG_CHANNELS, dtype=tf.uint8)
image_decoded_B = tf.image.decode_png(
file_contents_j, channels=model.IMG_CHANNELS, dtype=tf.uint8)
return image_decoded_A, image_decoded_B
def test_inputs(self, csv, batch_size, verbose=False):
print("input csv file path: %s, batch size: %d" % (csv, batch_size))
filename_queue = tf.train.string_input_producer([csv], shuffle=False)
reader = tf.TextLineReader()
_, serialized_example = reader.read(filename_queue)
filename, label = tf.decode_csv(serialized_example, [["path"], [0]])
label = tf.cast(label, tf.int32)
jpg = tf.read_file(filename)
image = tf.image.decode_jpeg(jpg, channels=3)
image = tf.cast(image, tf.float32)
if verbose:
print "original image shape:"
print image.get_shape()
# resize to distort
dist = tf.image.resize_images(image, (FLAGS.scale_h, FLAGS.scale_w))
# random crop
dist = tf.image.resize_image_with_crop_or_pad(dist, FLAGS.input_h, FLAGS.input_w)
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(FLAGS.num_examples_per_epoch_for_train * min_fraction_of_examples_in_queue)
print (
'filling queue with %d train images before starting to train. This will take a few minutes.' % min_queue_examples)
return self._generate_image_and_label_batch(dist, label, min_queue_examples, batch_size, shuffle=False)
def csv_inputs(self, csv, batch_size, distorted=False, verbose=False):
print("input csv file path: %s, batch size: %d" % (csv, batch_size))
filename_queue = tf.train.string_input_producer([csv], shuffle=True)
reader = tf.TextLineReader()
_, serialized_example = reader.read(filename_queue)
filename, label = tf.decode_csv(serialized_example, [["path"], [0]])
label = tf.cast(label, tf.int32)
jpg = tf.read_file(filename)
image = tf.image.decode_jpeg(jpg, channels=3)
image = tf.cast(image, tf.float32)
if verbose:
print "original image shape:"
print image.get_shape()
if distorted:
# resize to distort
dist = tf.image.resize_images(image, (FLAGS.scale_h, FLAGS.scale_w))
# random crop
dist = tf.image.resize_image_with_crop_or_pad(dist, FLAGS.input_h, FLAGS.input_w)
# random flip
dist = tf.image.random_flip_left_right(dist)
# color constancy
#dist = self.distort_color(dist)
else:
# resize to input
dist = tf.image.resize_images(image, FLAGS.input_h, FLAGS.input_w)
if verbose:
print "dist image shape:"
print dist.get_shape()
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(FLAGS.num_examples_per_epoch_for_train * min_fraction_of_examples_in_queue)
print ('filling queue with %d train images before starting to train. This will take a few minutes.' % min_queue_examples)
return self._generate_image_and_label_batch(dist, label, min_queue_examples, batch_size)
image_processing.py 文件源码
项目:single-image-depth-estimation
作者: liuhyCV
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def train_image(dataset, batch_size=None):
filename_queue = tf.train.string_input_producer([dataset.file_name()], shuffle=True)
reader = tf.TextLineReader()
_, serialized_example = reader.read(filename_queue)
rgb_filename, depth_filename = tf.decode_csv(serialized_example,
[["path"], ["meters"]])
# input
rgb_png = tf.read_file(rgb_filename)
image = tf.image.decode_png(rgb_png, channels=3)
image = tf.cast(image, tf.float32)
# target
depth_png = tf.read_file(depth_filename)
depth = tf.image.decode_png(depth_png, channels=1)
depth = tf.cast(depth, tf.float32)
depth = tf.div(depth, [255.0])
# depth = tf.cast(depth, tf.int64)
# resize
image = tf.image.resize_images(image, (IMAGE_HEIGHT, IMAGE_WIDTH))
depth = tf.image.resize_images(depth, (TARGET_HEIGHT, TARGET_WIDTH))
invalid_depth = tf.sign(depth)
# generate batch
images, depths, invalid_depths = tf.train.batch(
[image, depth, invalid_depth],
batch_size=self.batch_size,
num_threads=4,
capacity=50 + 3 * self.batch_size,
)
return images, depths, invalid_depths
image_processing.py 文件源码
项目:single-image-depth-estimation
作者: liuhyCV
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def train_batch_inputs(dataset_csv_file_path, batch_size):
with tf.name_scope('batch_processing'):
if (os.path.isfile(dataset_csv_file_path) != True):
raise ValueError('No data files found for this dataset')
filename_queue = tf.train.string_input_producer([dataset_csv_file_path], shuffle=True)
reader = tf.TextLineReader()
_, serialized_example = reader.read(filename_queue)
filename, depth_filename = tf.decode_csv(serialized_example, [["path"], ["annotation"]])
# input
png = tf.read_file(filename)
image = tf.image.decode_png(png, channels=3)
image = tf.cast(image, tf.float32)
# target
depth_png = tf.read_file(depth_filename)
depth = tf.image.decode_png(depth_png, dtype=tf.uint16, channels=1)
depth = tf.cast(depth, dtype=tf.int16)
# resize
image = tf.image.resize_images(image, (IMAGE_HEIGHT, IMAGE_WIDTH))
depth = tf.image.resize_images(depth, (TARGET_HEIGHT, TARGET_WIDTH))
invalid_depth = tf.sign(depth)
# generate batch
images, depths, invalid_depths = tf.train.batch(
[image, depth, invalid_depth],
batch_size = batch_size,
num_threads = 4,
capacity = 50 + 3 * batch_size
)
return images, depths, invalid_depths
image_processing.py 文件源码
项目:single-image-depth-estimation
作者: liuhyCV
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def eval_batch_inputs(dataset_csv_file_path, batch_size):
with tf.name_scope('eval_batch_processing'):
if (os.path.isfile(dataset_csv_file_path) != True):
raise ValueError('No data files found for this dataset')
filename_queue = tf.train.string_input_producer([dataset_csv_file_path], shuffle=True)
reader = tf.TextLineReader()
_, serialized_example = reader.read(filename_queue)
filename, depth_filename = tf.decode_csv(serialized_example, [["path"], ["annotation"]])
# input
png = tf.read_file(filename)
image = tf.image.decode_png(png, channels=3)
image = tf.cast(image, tf.float32)
# target
depth_png = tf.read_file(depth_filename)
depth = tf.image.decode_png(depth_png, dtype=tf.uint16, channels=1)
depth = tf.cast(depth, dtype=tf.int16)
# resize
image = tf.image.resize_images(image, (IMAGE_HEIGHT, IMAGE_WIDTH))
depth = tf.image.resize_images(depth, (TARGET_HEIGHT, TARGET_WIDTH))
invalid_depth = tf.sign(depth)
# generate batch
images, depths, invalid_depths = tf.train.batch(
[image, depth, invalid_depth],
batch_size = batch_size,
num_threads = 4,
capacity = 50 + 3 * batch_size
)
return images, depths, invalid_depths
def csv_inputs(self, csv_file_path):
filename_queue = tf.train.string_input_producer([csv_file_path], shuffle=True)
reader = tf.TextLineReader()
_, serialized_example = reader.read(filename_queue)
filename, depth_filename, depthMeters_filename = tf.decode_csv(serialized_example, [["path"], ["annotation"], ["meters"]])
# input
rgb_png = tf.read_file(filename)
image = tf.image.decode_png(rgb_png, channels=3)
image = tf.cast(image, tf.float32)
# target
depth_png = tf.read_file(depth_filename)
depth = tf.image.decode_png(depth_png, channels=1)
depth = tf.cast(depth, tf.float32)
depth = tf.div(depth, [255.0])
#depth = tf.cast(depth, tf.int64)
# resize
image = tf.image.resize_images(image, (IMAGE_HEIGHT, IMAGE_WIDTH))
depth = tf.image.resize_images(depth, (TARGET_HEIGHT, TARGET_WIDTH))
invalid_depth = tf.sign(depth)
# generate batch
images, depths, invalid_depths = tf.train.batch(
[image, depth, invalid_depth],
batch_size=self.batch_size,
num_threads=4,
capacity= 50 + 3 * self.batch_size,
)
return images, depths, invalid_depths
def csv_inputs_test(self, csv_file_path):
filename_queue = tf.train.string_input_producer([csv_file_path], shuffle=False)
reader = tf.TextLineReader()
_, serialized_example = reader.read(filename_queue)
filename, depth_filename, depthMeters_filename = tf.decode_csv(serialized_example, [["path"], ["annotation"], ["meters"]])
# input
rgb_png = tf.read_file(filename)
image = tf.image.decode_png(rgb_png, channels=3)
image = tf.cast(image, tf.float32)
# target
depth_png = tf.read_file(depth_filename)
depth = tf.image.decode_png(depth_png, channels=1)
depth = tf.cast(depth, tf.float32)
depth = tf.div(depth, [255.0])
# resize
image = tf.image.resize_images(image, (IMAGE_HEIGHT, IMAGE_WIDTH))
depth = tf.image.resize_images(depth, (TARGET_HEIGHT, TARGET_WIDTH))
invalid_depth = tf.sign(depth)
# generate batch
images, depths, invalid_depths, filenames, depth_filenames = tf.train.batch(
[image, depth, invalid_depth, filename, depth_filename],
batch_size=self.batch_size,
num_threads=4,
capacity= 50 + 3 * self.batch_size,
)
return images, depths, invalid_depths, filenames, depth_filenames
def _get_image(self):
_, records = self.reader.read(self.input_queue)
file_names = tf.decode_csv(records, [tf.constant([], tf.string), tf.constant([], tf.string)], field_delim=None,
name=None)
im_raw = tf.read_file(self.base_folder+file_names[0])
seg_raw = tf.read_file(self.base_folder+file_names[1])
image = tf.reshape(tf.cast(tf.image.decode_png(im_raw, channels=1, dtype=tf.uint16), tf.float32),
self.image_size, name='input_image')
seg = tf.reshape(tf.cast(tf.image.decode_png(seg_raw, channels=1, dtype=tf.uint8), tf.float32),
self.image_size, name='input_seg')
return image, seg, file_names[0]
def load_data(csv, batch_size, shuffle = True, distored = True):
queue = tf.train.string_input_producer(csv, shuffle=shuffle)
reader = tf.TextLineReader()
key, value = reader.read(queue)
filename, label = tf.decode_csv(value, [["path"],[1]], field_delim=" ")
label = tf.cast(label, tf.int64)
label = tf.one_hot(label, depth = get_count_member(), on_value = 1.0, off_value = 0.0, axis = -1)
jpeg = tf.read_file(filename)
image = tf.image.decode_jpeg(jpeg, channels=3)
image = tf.cast(image, tf.float32)
image.set_shape([IMAGE_SIZE, IMAGE_SIZE, 3])
if distored:
cropsize = random.randint(INPUT_SIZE, INPUT_SIZE + (IMAGE_SIZE - INPUT_SIZE) / 2)
framesize = INPUT_SIZE + (cropsize - INPUT_SIZE) * 2
image = tf.image.resize_image_with_crop_or_pad(image, framesize, framesize)
image = tf.random_crop(image, [cropsize, cropsize, 3])
image = tf.image.random_flip_left_right(image)
image = tf.image.random_brightness(image, max_delta=0.8)
image = tf.image.random_contrast(image, lower=0.8, upper=1.0)
image = tf.image.random_hue(image, max_delta=0.04)
image = tf.image.random_saturation(image, lower=0.6, upper=1.4)
image = tf.image.resize_images(image, DST_INPUT_SIZE, DST_INPUT_SIZE)
image = tf.image.per_image_whitening(image)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * min_fraction_of_examples_in_queue)
return _generate_image_and_label_batch(
image,
label,
filename,
min_queue_examples, batch_size,
shuffle=shuffle)
def read_csv(batch_size,file_name):
filename_queue = tf.train.string_input_producer([file_name])
reader = tf.TextLineReader(skip_header_lines=0)
key,value = reader.read(filename_queue)
decoded = tf.decode_csv(value,field_delim=' ',
record_defaults=[[0] for i in range(nlp_segment.flags.max_sentence_len*2)])
return tf.train.shuffle_batch(decoded,
batch_size=batch_size,
capacity=batch_size*50,
min_after_dequeue=batch_size)
def read_csv(batch_size,file_name):
filename_queue = tf.train.string_input_producer([file_name])
reader = tf.TextLineReader(skip_header_lines=0)
key,value = reader.read(filename_queue)
decoded = tf.decode_csv(value,field_delim=' ',
record_defaults=[[0] for i in range(ner_tv.flags.sentence_length*2)])
return tf.train.shuffle_batch(decoded,
batch_size=batch_size,
capacity=batch_size*50,
min_after_dequeue=batch_size)
def read_my_file_format(filename):
record_defaults = [[""]] + [[1.0]] * 10
components = tf.decode_csv(filename, record_defaults=record_defaults,
field_delim=" ")
imgName = components[0]
features = components[1:]
img_contents = tf.read_file(imgName)
img = tf.image.decode_jpeg(img_contents, channels=1)
return img, features
def ImageProducer_imagenet(filename_queue,isotropic):
line_reader = tf.TextLineReader()
key, line = line_reader.read(filename_queue)
# line_batch or line (depending if you want to batch)
filename, label = tf.decode_csv(line,record_defaults=[tf.constant([],dtype=tf.string),tf.constant([],dtype=tf.int32)],field_delim=' ')
file_contents = tf.read_file(filename)
example = tf.image.decode_jpeg(file_contents)
processed_img = process_image(example,isotropic)
# Convert from RGB channel ordering to BGR This matches, for instance, how OpenCV orders the channels.
processed_img = tf.reverse(processed_img, [False, False, True])
#processed_img.set_shape([224, 224, 3])
return processed_img, label
def parse_csv(rows_string_tensor):
"""Takes the string input tensor and returns a dict of rank-2 tensors."""
# Takes a rank-1 tensor and converts it into rank-2 tensor
# Example if the data is ['csv,line,1', 'csv,line,2', ..] to
# [['csv,line,1'], ['csv,line,2']] which after parsing will result in a
# tuple of tensors: [['csv'], ['csv']], [['line'], ['line']], [[1], [2]]
row_columns = tf.expand_dims(rows_string_tensor, -1)
columns = tf.decode_csv(row_columns, record_defaults=CSV_COLUMN_DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
# Remove unused columns
for col in UNUSED_COLUMNS:
features.pop(col)
return features
def input_fn(batch_size,file_name):
"""
:param batch_size:
:param file_name:
:return: features and label dict
"""
examples_op = tf.contrib.learn.read_batch_examples(
file_name,
batch_size=batch_size,
reader=tf.TextLineReader,
num_epochs=1,
parse_fn=lambda x: tf.decode_csv(x, [tf.constant([''], dtype=tf.string)] * len(COLUMNS),field_delim=","))
examples_dict = {}
for i, header in enumerate(COLUMNS):
examples_dict[header] = examples_op[:,i]
feature_cols = {k: tf.string_to_number(examples_dict[k], out_type=tf.float32)
for k in CONTINUOUS_COLUMNS}
feature_cols.update({k: dense_to_sparse(examples_dict[k])
for k in CATEGORICAL_COLUMNS})
label = tf.string_to_number(examples_dict[LABEL_COLUMN], out_type=tf.int32)
return feature_cols, label
wide_deep_evaluate_predict.py 文件源码
项目:provectus-final-project
作者: eds-uga
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def input_fn_eval(batch_size,file_name):
"""
Input function to predict the test features
:param batch_size:
:param file_name:
:return: features and label dict
"""
examples_op = tf.contrib.learn.read_batch_examples(
file_name,
batch_size=batch_size,
reader=tf.TextLineReader,
randomize_input=False,
read_batch_size=1,
num_threads=5,
num_epochs=1,
parse_fn=lambda x: tf.decode_csv(x, [tf.constant([''], dtype=tf.string)] * len(COLUMNS),field_delim=","))
examples_dict = {}
for i, header in enumerate(COLUMNS):
examples_dict[header] = examples_op[:,i]
feature_cols = {k: tf.string_to_number(examples_dict[k], out_type=tf.float32)
for k in CONTINUOUS_COLUMNS}
feature_cols.update({k: dense_to_sparse(examples_dict[k])
for k in CATEGORICAL_COLUMNS})
return feature_cols
def _parse_example_proto(example_serialized):
# parse record
# decode jpeg
# random select one caption, convert it into integers
# compute the length of the caption
feature_map = {
'image/encoded': tf.FixedLenFeature([], dtype=tf.string),
'image/coco-id': tf.FixedLenFeature([], dtype=tf.int64),
'caption': tf.VarLenFeature(dtype=tf.string),
# 'image/path': tf.FixedLenFeature([], dtype=tf.string),
}
features = tf.parse_single_example(example_serialized, feature_map)
cocoid = features['image/coco-id']
image = tf.image.decode_jpeg(
features['image/encoded'],
channels=3,
try_recover_truncated=True)
# the image COCO_train2014_000000167126.jpg was corrupted
# replaced that image in my train2014/ directory
# but do not want to re encode everything, so just try_recover_truncated
# which is just part of the image
# [0,255) --> [0,1)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
#image_path = features['image/path']
caption = tf.sparse_tensor_to_dense(features['caption'], default_value=".")
caption = tf.random_shuffle(caption)[0]
record_defaults = [[PAD]] * MAX_SEQ_LEN
caption_tids = tf.decode_csv(caption, record_defaults)
caption_tids = tf.pack(caption_tids)
return image, caption_tids, cocoid #, image_path