cplx_adam_test.py 文件源码

python
阅读 32 收藏 0 点赞 0 评论 0

项目:complex_tf 作者: woodshop 项目源码 文件源码
def testTensorLearningRate(self):
    for dtype in [tf.complex64]:
      with self.test_session(force_gpu=True):
        # Initialize variables for numpy implementation.
        m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
        var0_np = np.array([1.0-1.0j, 2.0-2.0j], dtype=dtype.as_numpy_dtype)
        grads0_np = np.array([0.1+0.1j, 0.1-0.1j], dtype=dtype.as_numpy_dtype)
        var1_np = np.array([3.0+3.0j, 4.0-4.0j], dtype=dtype.as_numpy_dtype)
        grads1_np = np.array([0.01-0.01j, 0.01+0.01j],
                             dtype=dtype.as_numpy_dtype)

        var0 = tf.Variable(var0_np)
        var1 = tf.Variable(var1_np)
        grads0 = tf.constant(grads0_np)
        grads1 = tf.constant(grads1_np)
        opt = tf.train.AdamOptimizer(tf.constant(0.001))
        update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
        tf.global_variables_initializer().run()

        # Fetch params to validate initial values
        self.assertAllClose([1.0-1.0j, 2.0-2.0j], var0.eval())
        self.assertAllClose([3.0+3.0j, 4.0-4.0j], var1.eval())

        beta1_power, beta2_power = opt._get_beta_accumulators()

        # Run 3 steps of Adam
        for t in range(1, 4):
          self.assertAllCloseAccordingToType(0.9 ** t, beta1_power.eval())
          self.assertAllCloseAccordingToType(0.999 ** t, beta2_power.eval())
          update.run()

          var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
          var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)

          # Validate updated params
          self.assertAllCloseAccordingToType(var0_np, var0.eval())
          self.assertAllCloseAccordingToType(var1_np, var1.eval())
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号