def testTensorLearningRate(self):
for dtype in [tf.complex64]:
with self.test_session(force_gpu=True):
v0 = [1.0+2.0j, 2.0+1.0j]
v1 = [3.0-4.0j, 4.0-3.0j]
g0 = [0.1+0.1j, 0.1-0.1j]
g1 = [0.01-0.01j, 0.01+0.01j]
lr = 3.0-1.5j
var0 = tf.Variable(v0, dtype=dtype)
var1 = tf.Variable(v1, dtype=dtype)
grads0 = tf.constant(g0, dtype=dtype)
grads1 = tf.constant(g1, dtype=dtype)
lrate = tf.constant(lr)
sgd_op = ctf.train.CplxGradientDescentOptimizer(
lrate).apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType(v0, var0.eval())
self.assertAllCloseAccordingToType(v1, var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[v0[0] - lr * g0[0],
v0[1] - lr * g0[1]], var0.eval())
self.assertAllCloseAccordingToType(
[v1[0] - lr * g1[0],
v1[1] - lr * g1[1]], var1.eval())
评论列表
文章目录