def test_lm(self):
hps = get_test_hparams()
with tf.variable_scope("model"):
model = LM(hps)
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_local_variables().run()
loss = 1e5
for i in range(50):
x, y, w = simple_data_generator(hps.batch_size, hps.num_steps)
loss, _ = sess.run([model.loss, model.train_op], {model.x: x, model.y: y, model.w: w})
print("%d: %.3f %.3f" % (i, loss, np.exp(loss)))
if np.isnan(loss):
print("NaN detected")
break
self.assertLess(loss, 1.0)
python类initialize_local_variables()的实例源码
def test_lm(self):
hps = get_test_hparams()
with tf.variable_scope("model"):
model = LM(hps)
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_local_variables().run()
loss = 1e5
for i in range(50):
x, y, w = simple_data_generator(hps.batch_size, hps.num_steps)
loss, _ = sess.run([model.loss, model.train_op], {model.x: x, model.y: y, model.w: w})
print("%d: %.3f %.3f" % (i, loss, np.exp(loss)))
if np.isnan(loss):
print("NaN detected")
break
self.assertLess(loss, 1.0)
def testFinalOpsOnEvaluationLoop(self):
value_op, update_op = slim.metrics.streaming_accuracy(
self._predictions, self._labels)
init_op = tf.group(tf.initialize_all_variables(),
tf.initialize_local_variables())
# Create Checkpoint and log directories
chkpt_dir = os.path.join(self.get_temp_dir(), 'tmp_logs/')
gfile.MakeDirs(chkpt_dir)
logdir = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
gfile.MakeDirs(logdir)
# Save initialized variables to checkpoint directory
saver = tf.train.Saver()
with self.test_session() as sess:
init_op.run()
saver.save(sess, os.path.join(chkpt_dir, 'chkpt'))
# Now, run the evaluation loop:
accuracy_value = slim.evaluation.evaluation_loop(
'', chkpt_dir, logdir, eval_op=update_op, final_op=value_op,
max_number_of_evaluations=1)
self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
def testRestoredModelPerformance(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
log_dir = os.path.join(self.get_temp_dir(), 'log_dir1/')
# First, save out the current model to a checkpoint:
init_op = tf.group(tf.initialize_all_variables(),
tf.initialize_local_variables())
saver = tf.train.Saver()
with self.test_session() as sess:
sess.run(init_op)
saver.save(sess, checkpoint_path)
# Next, determine the metric to evaluate:
value_op, update_op = slim.metrics.streaming_accuracy(
self._predictions, self._labels)
# Run the evaluation and verify the results:
accuracy_value = slim.evaluation.evaluate_once(
'',
checkpoint_path,
log_dir,
eval_op=update_op,
final_op=value_op)
self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
def test_read_text_lines(self):
gfile.Glob = self._orig_glob
filename = self._create_temp_file("ABC\nDEF\nGHK\n")
batch_size = 1
queue_capacity = 5
name = "my_batch"
with tf.Graph().as_default() as g, self.test_session(graph=g) as session:
inputs = tf.contrib.learn.io.read_batch_examples(
filename, batch_size, reader=tf.TextLineReader,
randomize_input=False, num_epochs=1, queue_capacity=queue_capacity,
name=name)
session.run(tf.initialize_local_variables())
coord = tf.train.Coordinator()
tf.train.start_queue_runners(session, coord=coord)
self.assertAllEqual(session.run(inputs), [b"ABC"])
self.assertAllEqual(session.run(inputs), [b"DEF"])
self.assertAllEqual(session.run(inputs), [b"GHK"])
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
def test_read_text_lines_multifile(self):
gfile.Glob = self._orig_glob
filenames = self._create_sorted_temp_files(["ABC\n", "DEF\nGHK\n"])
batch_size = 1
queue_capacity = 5
name = "my_batch"
with tf.Graph().as_default() as g, self.test_session(graph=g) as session:
inputs = tf.contrib.learn.io.read_batch_examples(
filenames, batch_size, reader=tf.TextLineReader,
randomize_input=False, num_epochs=1, queue_capacity=queue_capacity,
name=name)
session.run(tf.initialize_local_variables())
coord = tf.train.Coordinator()
tf.train.start_queue_runners(session, coord=coord)
self.assertAllEqual(session.run(inputs), [b"ABC"])
self.assertAllEqual(session.run(inputs), [b"DEF"])
self.assertAllEqual(session.run(inputs), [b"GHK"])
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
def test_batch_text_lines(self):
gfile.Glob = self._orig_glob
filename = self._create_temp_file("A\nB\nC\nD\nE\n")
batch_size = 3
queue_capacity = 10
name = "my_batch"
with tf.Graph().as_default() as g, self.test_session(graph=g) as session:
inputs = tf.contrib.learn.io.read_batch_examples(
[filename], batch_size, reader=tf.TextLineReader,
randomize_input=False, num_epochs=1, queue_capacity=queue_capacity,
read_batch_size=10, name=name)
session.run(tf.initialize_local_variables())
coord = tf.train.Coordinator()
tf.train.start_queue_runners(session, coord=coord)
self.assertAllEqual(session.run(inputs), [b"A", b"B", b"C"])
self.assertAllEqual(session.run(inputs), [b"D", b"E"])
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(tf.initialize_local_variables())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(1.475, sess.run(update_op), 5)
self.assertAlmostEqual(12.4/6.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def test1dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = (
(0, 1),
(-4.2, 9.1),
(6.5, 0),
(-3.2, 4.0)
)
values = tf.placeholder(dtype=tf.float32)
# Create the queue that populates the weighted labels.
weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
tf.initialize_local_variables().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test2dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
tf.initialize_local_variables().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = (
(0, 1),
(-4.2, 9.1),
(6.5, 0),
(-3.2, 4.0)
)
values = tf.placeholder(dtype=tf.float32)
# Create the queue that populates the weighted labels.
weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
tf.initialize_local_variables().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
def testMultiDimensional(self):
with self.test_session() as sess:
values_queue = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(2, 2, 2))
_enqueue_vector(sess,
values_queue,
[[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
shape=(2, 2, 2))
_enqueue_vector(sess,
values_queue,
[[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
shape=(2, 2, 2))
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(tf.initialize_local_variables())
for _ in range(2):
sess.run(update_op)
self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]],
sess.run(mean))
def testWeighted1d(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(tf.initialize_local_variables())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
def testWeighted2d_1(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(tf.initialize_local_variables())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
def testWeighted2d_2(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(tf.initialize_local_variables())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[0, 0.5]], sess.run(mean), 5)
def testValueTensorIsIdempotent(self):
predictions = tf.random_uniform((10, 3), maxval=3, dtype=tf.int64, seed=1)
labels = tf.random_uniform((10, 3), maxval=3, dtype=tf.int64, seed=1)
accuracy, update_op = metrics.streaming_accuracy(
predictions, labels)
with self.test_session() as sess:
sess.run(tf.initialize_local_variables())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_accuracy = accuracy.eval()
for _ in range(10):
self.assertEqual(initial_accuracy, accuracy.eval())
def testValueTensorIsIdempotent(self):
predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1)
labels = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1)
precision, update_op = metrics.streaming_precision(
predictions, labels)
with self.test_session() as sess:
sess.run(tf.initialize_local_variables())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_precision = precision.eval()
for _ in range(10):
self.assertEqual(initial_precision, precision.eval())
def testWeighted1d_placeholders(self):
predictions = tf.placeholder(dtype=tf.float32)
labels = tf.placeholder(dtype=tf.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=tf.constant([[2], [5]]))
with self.test_session():
tf.initialize_local_variables().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testWeighted2d_placeholders(self):
predictions = tf.placeholder(dtype=tf.float32)
labels = tf.placeholder(dtype=tf.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=tf.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
tf.initialize_local_variables().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testValueTensorIsIdempotent(self):
predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.float32, seed=1)
labels = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1)
auc, update_op = metrics.streaming_auc(
predictions, labels)
with self.test_session() as sess:
sess.run(tf.initialize_local_variables())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in range(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testValueTensorIsIdempotent(self):
predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.float32, seed=1)
labels = tf.random_uniform((10, 3), maxval=2, dtype=tf.int64, seed=1)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.test_session() as sess:
sess.run(tf.initialize_local_variables())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_specificity = specificity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_specificity, specificity.eval(), 5)
def testWeighted2d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0,
0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = tf.constant(predictions_values, dtype=tf.float32)
labels = tf.constant(labels_values)
weights = tf.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(tf.initialize_local_variables())
self.assertAlmostEqual(8.0 / 15.0, sess.run(update_op))
self.assertAlmostEqual(8.0 / 15.0, specificity.eval())
def testValueTensorIsIdempotent(self):
predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.float32, seed=1)
labels = tf.random_uniform((10, 3), maxval=2, dtype=tf.int64, seed=1)
sensitivity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.test_session() as sess:
sess.run(tf.initialize_local_variables())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_sensitivity = sensitivity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, sensitivity.eval(), 5)
def testValueTensorIsIdempotent(self):
predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.float32, seed=1)
labels = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1)
thresholds = [0, 0.5, 1.0]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
with self.test_session() as sess:
sess.run(tf.initialize_local_variables())
# Run several updates, then verify idempotency.
sess.run([prec_op, rec_op])
initial_prec = prec.eval()
initial_rec = rec.eval()
for _ in range(10):
sess.run([prec_op, rec_op])
self.assertAllClose(initial_prec, prec.eval())
self.assertAllClose(initial_rec, rec.eval())
# TODO(nsilberman): fix tests (passing but incorrect).
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = tf.constant(inputs, dtype=tf.float32)
labels = tf.constant(inputs)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(tf.initialize_local_variables())
sess.run([prec_op, rec_op])
self.assertEqual(1, prec.eval())
self.assertEqual(1, rec.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = tf.constant(inputs, dtype=tf.float32)
labels = tf.constant(1 - inputs, dtype=tf.float32)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(tf.initialize_local_variables())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval())
self.assertAlmostEqual(0, rec.eval())
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = tf.constant([1, 0, 1, 0], shape=(1, 4), dtype=tf.float32)
labels = tf.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
[prec_low, prec_high] = tf.split(0, 2, prec)
[rec_low, rec_high] = tf.split(0, 2, rec)
sess.run(tf.initialize_local_variables())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.75, prec_low.eval())
self.assertAlmostEqual(0.0, prec_high.eval())
self.assertAlmostEqual(1.0, rec_low.eval())
self.assertAlmostEqual(0.0, rec_high.eval())
def testValueTensorIsIdempotent(self):
predictions = tf.random_normal((10, 3), seed=1)
labels = tf.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_absolute_error(
predictions, labels)
with self.test_session() as sess:
sess.run(tf.initialize_local_variables())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testValueTensorIsIdempotent(self):
predictions = tf.random_normal((10, 3), seed=1)
labels = tf.random_normal((10, 3), seed=2)
normalizer = tf.random_normal((10, 3), seed=3)
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer)
with self.test_session() as sess:
sess.run(tf.initialize_local_variables())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateNormalizedByLabels(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_labels = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_predictions - np_labels),
np_labels))
predictions = tf.constant(np_predictions, shape=(1, 4), dtype=tf.float32)
labels = tf.constant(np_labels, shape=(1, 4))
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=labels)
with self.test_session() as sess:
sess.run(tf.initialize_local_variables())
self.assertEqual(expected_error, sess.run(update_op))
self.assertEqual(expected_error, error.eval())