def _test_metric_spec(self, metric_spec, hyps, refs, expected_scores):
"""Tests a MetricSpec"""
predictions = {"predicted_tokens": tf.placeholder(dtype=tf.string)}
labels = {"target_tokens": tf.placeholder(dtype=tf.string)}
value, update_op = metric_spec.create_metric_ops(None, labels, predictions)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
scores = []
for hyp, ref in zip(hyps, refs):
hyp = hyp.split(" ")
ref = ref.split(" ")
sess.run(update_op, {
predictions["predicted_tokens"]: [hyp],
labels["target_tokens"]: [ref]
})
scores.append(sess.run(value))
for score, expected in zip(scores, expected_scores):
np.testing.assert_almost_equal(score, expected, decimal=2)
np.testing.assert_almost_equal(score, expected, decimal=2)
python类local_variables_initializer()的实例源码
def omniglot():
sess = tf.InteractiveSession()
""" def wrapper(v):
return tf.Print(v, [v], message="Printing v")
v = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='Matrix')
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
temp = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='temp')
temp = wrapper(v)
#with tf.control_dependencies([temp]):
temp.eval()
print 'Hello'"""
def update_tensor(V, dim2, val): # Update tensor V, with index(:,dim2[:]) by val[:]
val = tf.cast(val, V.dtype)
def body(_, (v, d2, chg)):
d2_int = tf.cast(d2, tf.int32)
return tf.slice(tf.concat_v2([v[:d2_int],[chg] ,v[d2_int+1:]], axis=0), [0], [v.get_shape().as_list()[0]])
Z = tf.scan(body, elems=(V, dim2, val), initializer=tf.constant(1, shape=V.get_shape().as_list()[1:], dtype=tf.float32), name="Scan_Update")
return Z
def test_reading_without_targets(self):
num_epochs = 50
data_provider = make_parallel_data_provider(
data_sources_source=[self.source_file.name],
data_sources_target=None,
num_epochs=num_epochs,
shuffle=True)
item_keys = list(data_provider.list_items())
item_values = data_provider.get(item_keys)
items_dict = dict(zip(item_keys, item_values))
self.assertEqual(set(item_keys), set(["source_tokens", "source_len"]))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
with tf.contrib.slim.queues.QueueRunners(sess):
item_dicts_ = [sess.run(items_dict) for _ in range(num_epochs * 3)]
for item_dict in item_dicts_:
self.assertEqual(item_dict["source_len"], 2)
item_dict["source_tokens"] = np.char.decode(
item_dict["source_tokens"].astype("S"), "utf-8")
self.assertEqual(item_dict["source_tokens"][-1], "SEQUENCE_END")
def predictPL(self):
B = self.flags.batch_size
W,H,C = self.flags.width, self.flags.height, self.flags.color
inputs = tf.placeholder(dtype=tf.float32,shape=[None,H,W,C])
#with open(self.flags.pred_path,'w') as f:
# pass
self._build(inputs,resize=False)
counter = 0
with tf.Session() as sess:
self.sess = sess
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
for imgs,imgnames in self.DATA.test_generator():
pred = sess.run(self.logit,feed_dict={inputs:imgs})
np.save("%s/%d.npy"%(self.flags.pred_path,counter),{"pred":pred,"name":imgnames})
counter+=len(imgs)
if counter/B%10 ==0:
print_mem_time("%d images predicted"%counter)
# train with placeholders
def predict_from_placeholder(self,activation=None):
self._build()
self._get_summary()
if activation is not None:
self.logit = self._activate(self.logit,activation)
with open(self.flags.pred_path,'w') as f:
pass
count = 0
with tf.Session() as sess:
self.sess = sess
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
if self.flags.log_path and self.flags.visualize is not None:
summary_writer = tf.summary.FileWriter(self.flags.log_path, sess.graph)
for batch in self._batch_gen_test():
x,_,epoch = batch
if self.flags.log_path and self.flags.visualize is not None:
summary,pred = sess.run([self.summ_op,self.logit],feed_dict={self.inputs:x,self.is_training:0})
summary_writer.add_summary(summary, count)
else:
pred = sess.run(self.logit,feed_dict={self.inputs:x,self.is_training:0})
count+=1
if count%self.flags.verbosity == 0:
print_mem_time("Epoch %d Batch %d "%(epoch,count))
self.write_pred(pred)
def omniglot():
sess = tf.InteractiveSession()
""" def wrapper(v):
return tf.Print(v, [v], message="Printing v")
v = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='Matrix')
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
temp = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='temp')
temp = wrapper(v)
#with tf.control_dependencies([temp]):
temp.eval()
print 'Hello'"""
def update_tensor(V, dim2, val): # Update tensor V, with index(:,dim2[:]) by val[:]
val = tf.cast(val, V.dtype)
def body(_, (v, d2, chg)):
d2_int = tf.cast(d2, tf.int32)
return tf.slice(tf.concat_v2([v[:d2_int],[chg] ,v[d2_int+1:]], axis=0), [0], [v.get_shape().as_list()[0]])
Z = tf.scan(body, elems=(V, dim2, val), initializer=tf.constant(1, shape=V.get_shape().as_list()[1:], dtype=tf.float32), name="Scan_Update")
return Z
def input_pipeline(filenames, batch_size, read_threads=4, num_epochs=None, is_training=True):
filename_queue = tf.train.string_input_producer(
filenames, num_epochs=FLAGS.num_epochs, shuffle=is_training)
# initialize local variables if num_epochs is not None or it'll raise uninitialized problem
tf.get_default_session().run(tf.local_variables_initializer())
example_list = [read_my_file_format(filename_queue, is_training) \
for _ in range(read_threads)]
min_after_dequeue = 300 if is_training else 10
capacity = min_after_dequeue + 3 * batch_size
clip_batch, img_mask_batch, loss_mask_batch = tf.train.shuffle_batch_join(
example_list, batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue)
return clip_batch, img_mask_batch, loss_mask_batch
def input_pipeline_dis(filenames, batch_size, read_threads=4, num_epochs=None, is_training=True):
filename_queue = tf.train.string_input_producer(
filenames, num_epochs=FLAGS.num_epochs, shuffle=is_training)
# initialize local variables if num_epochs is not None or it'll raise uninitialized problem
tf.get_default_session().run(tf.local_variables_initializer())
example_list = [read_my_file_format_dis(filename_queue, is_training) \
for _ in range(read_threads)]
min_after_dequeue = 300 if is_training else 10
capacity = min_after_dequeue + 3 * batch_size
clip_batch, label_batch, text_batch = tf.train.shuffle_batch_join(
example_list, batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue)
return clip_batch, label_batch, text_batch
def examine_batches(features_batch, targets_batch):
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
try:
for it in range(5000):
features, targets = sess.run([features_batch, targets_batch])
if it % 100 == 0:
LOGGER.debug(it)
LOGGER.debug(
len(features),
features[0].shape,
np.max(features[0][0][7][:])
)
LOGGER.debug(np.argmax(targets, axis=1))
except tf.errors.OutOfRangeError:
LOGGER.info('Training stopped - queue is empty.')
except Exception as e:
LOGGER.error(e)
finally:
coord.request_stop()
coord.join(threads)
test_conv1.py 文件源码
项目:tensorflow-action-conditional-video-prediction
作者: williamd4112
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def main(args):
with tf.Graph().as_default() as graph:
# Create dataset
logging.info('Create data flow from %s' % args.data)
caffe_dataset = CaffeDataset(dir=args.data, num_act=args.num_act, mean_path=args.mean)
# Config session
config = get_config(args)
x = tf.placeholder(dtype=tf.float32, shape=[None, 84, 84, 12])
op = load_caffe_model(x, args.load)
init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
# Start session
with tf.Session(config=config) as sess:
sess.run(init)
i = 0
for s, a in caffe_dataset(5):
pred_data = sess.run([op], feed_dict={x: [s]})[0]
print pred_data.shape
np.save('tf-%03d.npy' % i, pred_data)
i += 1
def evaluate():
"""Eval ocr for a number of steps."""
with tf.Graph().as_default() as g:
images, labels, seq_lengths = ocr.inputs()
logits, timesteps = ocr.inference(images, FLAGS.eval_batch_size, train=True)
ler = ocr.create_label_error_rate(logits, labels, timesteps)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
config = tf.ConfigProto(
device_count={'GPU': 0}
)
sess = tf.Session(config=config)
sess.run(init_op)
saver = tf.train.Saver()
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)
while True:
eval_once(saver, summary_writer, ler, summary_op)
if FLAGS.run_once:
break
# print("Waiting for next evaluation for " + str(FLAGS.eval_interval_secs) + " sec")
time.sleep(FLAGS.eval_interval_secs)
def main(args):
with tf.Graph().as_default() as graph:
# Create dataset
logging.info('Create data flow from %s' % args.data)
caffe_dataset = CaffeDataset(dir=args.data, num_act=args.num_act, mean_path=args.mean)
# Config session
config = get_config(args)
x = tf.placeholder(dtype=tf.float32, shape=[None, 84, 84, 12])
op = load_caffe_model(x, args.load)
init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
# Start session
with tf.Session(config=config) as sess:
sess.run(init)
i = 0
for s, a in caffe_dataset(5):
pred_data = sess.run([op], feed_dict={x: [s]})[0]
print pred_data.shape
np.save('tf-%03d.npy' % i, pred_data)
i += 1
def _test_metric_spec(self, metric_spec, hyps, refs, expected_scores):
"""Tests a MetricSpec"""
predictions = {"predicted_tokens": tf.placeholder(dtype=tf.string)}
labels = {"target_tokens": tf.placeholder(dtype=tf.string)}
value, update_op = metric_spec.create_metric_ops(None, labels, predictions)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
scores = []
for hyp, ref in zip(hyps, refs):
hyp = hyp.split(" ")
ref = ref.split(" ")
sess.run(update_op, {
predictions["predicted_tokens"]: [hyp],
labels["target_tokens"]: [ref]
})
scores.append(sess.run(value))
for score, expected in zip(scores, expected_scores):
np.testing.assert_almost_equal(score, expected, decimal=2)
np.testing.assert_almost_equal(score, expected, decimal=2)
def test_reading_without_targets(self):
num_epochs = 50
data_provider = make_parallel_data_provider(
data_sources_source=[self.source_file.name],
data_sources_target=None,
num_epochs=num_epochs,
shuffle=True)
item_keys = list(data_provider.list_items())
item_values = data_provider.get(item_keys)
items_dict = dict(zip(item_keys, item_values))
self.assertEqual(set(item_keys), set(["source_tokens", "source_len"]))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
with tf.contrib.slim.queues.QueueRunners(sess):
item_dicts_ = [sess.run(items_dict) for _ in range(num_epochs * 3)]
for item_dict in item_dicts_:
self.assertEqual(item_dict["source_len"], 2)
item_dict["source_tokens"] = np.char.decode(
item_dict["source_tokens"].astype("S"), "utf-8")
self.assertEqual(item_dict["source_tokens"][-1], "SEQUENCE_END")
def test_batch(self):
with self.test_session() as sess:
df = pd.DataFrame(
['TQ2379_0_0_B TQ2379_0_0.jpg F 1776:520|1824:125'.split(),
'TQ2379_0_0_B TQ2379_0_0.jpg F 1776:500|1824:125'.split(),
],
columns=['id', 'image', 'class', 'detections'])
df = extract_crops_sw(df, 250, False, 250)
batch = create_batch(df, False)
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
images, labels = sess.run(batch)
self.assertListEqual(list(labels[0, 2, 3, :5]),
[1., 74., 125., 30., 30.])
self.assertTrue(labels[0, 2, 3, 5 + 5])
coord.request_stop()
coord.join(threads)
def inputs_test(filename, batch_size, num_epochs, num_threads,
imshape, num_examples_per_epoch=128):
tf.local_variables_initializer()
if not num_epochs:
num_epochs = None
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer(
[filename], num_epochs=num_epochs, name='string_input_producer')
image, label = reader.read_and_decode_wholefile(filename_queue, imshape, normalize=True)
images, sparse_labels = tf.train.batch([image, label], batch_size=batch_size)
return images, sparse_labels
def main():
dqn = DQN(ENV_NAME, DOUBLE_DQN, DUELING_DQN, PER, TRAINING, RENDER)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
#tries to restore a trained model and play!
dqn.util.restore_graph(sess,forTrain = TRAINING)
for ep in tqdm(range(MAX_EPISODES)):# for episodes
print("Episode no. {} :".format(ep))
dqn.playing(sess)
print('Episode %d: totalEpReward = %.2f , took: %.3f mins' % (ep, dqn.totalReward,dqn.duration/60.0))
#RUN...
def main():
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
dummy_reader = Dataset_reader_classification(filename=_DATASET_PATH_, num_classes=_CLASSES_)
#dummy_reader.pre_process_image(writer_pre_proc)
with tf.Session() as sess:
init_op.run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
images, labels = dummy_reader.next_batch(_BATCH_SIZE_)
meanimage = sess.run([dummy_reader.mean_image])[0]
print(meanimage)
print(images[0])
if _SHOW_IMAGES_ :
for image in images:
cv2.imshow('Image', image)
cv2.imshow('Meanimage',meanimage)
cv2.waitKey(0)
coord.request_stop()
coord.join(threads)
def _test_metric_spec(self, metric_spec, hyps, refs, expected_scores):
"""Tests a MetricSpec"""
predictions = {"predicted_tokens": tf.placeholder(dtype=tf.string)}
labels = {"target_tokens": tf.placeholder(dtype=tf.string)}
value, update_op = metric_spec.create_metric_ops(
None, labels, predictions)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
scores = []
for hyp, ref in zip(hyps, refs):
hyp = hyp.split(" ")
ref = ref.split(" ")
sess.run(update_op, {
predictions["predicted_tokens"]: [hyp],
labels["target_tokens"]: [ref]
})
scores.append(sess.run(value))
for score, expected in zip(scores, expected_scores):
self.assertNDArrayNear(score, expected, 0.01)
self.assertNDArrayNear(score, expected, 0.01)
def testFinalOpsOnEvaluationLoop(self):
value_op, update_op = slim.metrics.streaming_accuracy(
self._predictions, self._labels)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
# Create Checkpoint and log directories
chkpt_dir = os.path.join(self.get_temp_dir(), 'tmp_logs/')
gfile.MakeDirs(chkpt_dir)
logdir = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
gfile.MakeDirs(logdir)
# Save initialized variables to checkpoint directory
saver = tf.train.Saver()
with self.test_session() as sess:
init_op.run()
saver.save(sess, os.path.join(chkpt_dir, 'chkpt'))
# Now, run the evaluation loop:
accuracy_value = slim.evaluation.evaluation_loop(
'', chkpt_dir, logdir, eval_op=update_op, final_op=value_op,
max_number_of_evaluations=1)
self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
def testRestoredModelPerformance(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
log_dir = os.path.join(self.get_temp_dir(), 'log_dir1/')
# First, save out the current model to a checkpoint:
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
saver = tf.train.Saver(write_version=tf.train.SaverDef.V1)
with self.test_session() as sess:
sess.run(init_op)
saver.save(sess, checkpoint_path)
# Next, determine the metric to evaluate:
value_op, update_op = slim.metrics.streaming_accuracy(
self._predictions, self._labels)
# Run the evaluation and verify the results:
accuracy_value = slim.evaluation.evaluate_once(
'',
checkpoint_path,
log_dir,
eval_op=update_op,
final_op=value_op)
self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(1.475, sess.run(update_op), 5)
self.assertAlmostEqual(12.4/6.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def test1dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
tf.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = (
(0, 1),
(-4.2, 9.1),
(6.5, 0),
(-3.2, 4.0)
)
values = tf.placeholder(dtype=tf.float32)
# Create the queue that populates the weighted labels.
weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
tf.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test2dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
tf.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
def testMultiDimensional(self):
with self.test_session() as sess:
values_queue = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(2, 2, 2))
_enqueue_vector(sess,
values_queue,
[[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
shape=(2, 2, 2))
_enqueue_vector(sess,
values_queue,
[[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
shape=(2, 2, 2))
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(tf.local_variables_initializer())
for _ in range(2):
sess.run(update_op)
self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]],
sess.run(mean))
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(tf.local_variables_initializer())
self.assertAllClose([[0, 1]], sess.run(update_op), 5)
self.assertAllClose([[-2.1, 5.05]], sess.run(update_op), 5)
self.assertAllClose([[2.3/3., 10.1/3.]], sess.run(update_op), 5)
self.assertAllClose([[-0.9/4., 3.525]], sess.run(update_op), 5)
self.assertAllClose([[-0.9/4., 3.525]], sess.run(mean), 5)
def testWeighted1d(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(tf.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
def testWeighted2d_1(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(tf.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
def testValueTensorIsIdempotent(self):
predictions = tf.random_uniform((10, 3), maxval=3, dtype=tf.int64, seed=1)
labels = tf.random_uniform((10, 3), maxval=3, dtype=tf.int64, seed=1)
accuracy, update_op = metrics.streaming_accuracy(
predictions, labels)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_accuracy = accuracy.eval()
for _ in range(10):
self.assertEqual(initial_accuracy, accuracy.eval())