python类complex64()的实例源码

variable_ops_test.py 文件源码 项目:complex_tf 作者: woodshop 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def testTemporaryVariableNoLeak(self):
    with self.test_session(use_gpu=True):
      var = gen_state_ops._temporary_variable(
          [1, 2],
          tf.complex64,
          var_name="bar")
      final = tf.identity(var)
      final.eval()
variable_ops_test.py 文件源码 项目:complex_tf 作者: woodshop 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def testTwoTemporaryVariablesNoLeaks(self):
    with self.test_session(use_gpu=True):
      var1 = gen_state_ops._temporary_variable(
          [1, 2],
          tf.complex64,
          var_name="var1")
      var2 = gen_state_ops._temporary_variable(
          [1, 2],
          tf.complex64,
          var_name="var2")
      final = var1 + var2
      final.eval()
variable_ops_test.py 文件源码 项目:complex_tf 作者: woodshop 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def testAssignDependencyAcrossDevices(self):
    with self.test_session(use_gpu=True):
      # The variable and an op to increment it are on the GPU.
      var = state_ops.variable_op([1], tf.complex64)
      tf.assign(var, [1.0+2.0j]).eval()
      increment = tf.assign_add(var, [2.0+3.0j])
      with tf.control_dependencies([increment]):
        with tf.device("/cpu:0"):
          # This mul op is pinned to the CPU, but reads the variable from the
          # GPU. The test ensures that the dependency on 'increment' is still
          # honored, i.e., the Send and Recv from GPU to CPU should take place
          # only after the increment.
          result = tf.multiply(var, var)
      self.assertAllClose([-16.0+30.0j], result.eval())
variable_ops_test.py 文件源码 项目:complex_tf 作者: woodshop 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def testIsVariableInitialized(self):
    for use_gpu in [True, False]:
      with self.test_session(use_gpu=use_gpu):
        v0 = state_ops.variable_op([1, 2], tf.complex64)
        self.assertEqual(False, tf.is_variable_initialized(v0).eval())
        tf.assign(v0, [[2.0+3.0j, 3.0+2.0j]]).eval()
        self.assertEqual(True, tf.is_variable_initialized(v0).eval())
fft.py 文件源码 项目:Mendelssohn 作者: diggerdu 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def istft(spec, overlap=4):
    assert (spec.shape[0] > 1)
    S = placeholder(dtype=tf.complex64, shape=spec.shape)
    X = tf.complex_abs(tf.concat(0, [tf.ifft(frame) \
            for frame in tf.unstack(S)]))
    sess = tf.Session()
    return sess.run(X, feed_dict={S:spec})
neural.py 文件源码 项目:neural-decoder 作者: Krastanov 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def s_binary_crossentropy(self, y_true, y_pred):
        if self.p:
            y_pred = undo_normcentererr(y_pred, self.p)
            y_true = undo_normcentererr(y_true, self.p)
        s_true = K.dot(y_true, K.transpose(self.H))%2
        twopminusone = 2*y_pred-1
        s_pred = ( 1 - tf.real(K.exp(K.dot(K.log(tf.cast(twopminusone, tf.complex64)), tf.cast(K.transpose(self.H), tf.complex64)))) ) / 2
        return K.mean(K.binary_crossentropy(s_pred, s_true), axis=-1)
sequential_batch_fft_ops.py 文件源码 项目:tensorflow_compact_bilinear_pooling 作者: ronghanghu 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _SequentialBatchFFTGrad(op, grad):
    if (grad.dtype == tf.complex64):
        size = tf.cast(tf.shape(grad)[1], tf.float32)
        return (sequential_batch_ifft(grad, op.get_attr("compute_size"))
            * tf.complex(size, 0.))
    else:
        size = tf.cast(tf.shape(grad)[1], tf.float64)
        return (sequential_batch_ifft(grad, op.get_attr("compute_size"))
            * tf.complex(size, tf.zeros([], tf.float64)))
sequential_batch_fft_ops.py 文件源码 项目:tensorflow_compact_bilinear_pooling 作者: ronghanghu 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _SequentialBatchIFFTGrad(op, grad):
    if (grad.dtype == tf.complex64):
        rsize = 1. / tf.cast(tf.shape(grad)[1], tf.float32)
        return (sequential_batch_fft(grad, op.get_attr("compute_size"))
            * tf.complex(rsize, 0.))
    else:
        rsize = 1. / tf.cast(tf.shape(grad)[1], tf.float64)
        return (sequential_batch_fft(grad, op.get_attr("compute_size"))
            * tf.complex(rsize, tf.zeros([], tf.float64)))
audio.py 文件源码 项目:tacotron 作者: keithito 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _griffin_lim_tensorflow(S):
  '''TensorFlow implementation of Griffin-Lim
  Based on https://github.com/Kyubyong/tensorflow-exercises/blob/master/Audio_Processing.ipynb
  '''
  with tf.variable_scope('griffinlim'):
    # TensorFlow's stft and istft operate on a batch of spectrograms; create batch of size 1
    S = tf.expand_dims(S, 0)
    S_complex = tf.identity(tf.cast(S, dtype=tf.complex64))
    y = _istft_tensorflow(S_complex)
    for i in range(hparams.griffin_lim_iters):
      est = _stft_tensorflow(y)
      angles = est / tf.cast(tf.maximum(1e-8, tf.abs(est)), tf.complex64)
      y = _istft_tensorflow(S_complex * angles)
    return tf.squeeze(y, 0)
cplx_momentum_test.py 文件源码 项目:complex_tf 作者: woodshop 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def testBasic(self):
    for dtype in [tf.complex64]:
      with self.test_session():
        var0 = tf.Variable([1.0, 2.0], dtype=dtype)
        var1 = tf.Variable([3.0, 4.0], dtype=dtype)
        grads0 = tf.constant([0.1, 0.1], dtype=dtype)
        grads1 = tf.constant([0.01, 0.01], dtype=dtype)
        mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)
        mom_update = mom_opt.apply_gradients(
            zip([grads0, grads1], [var0, var1]))
        tf.global_variables_initializer().run()
        # Check we have slots
        self.assertEqual(["momentum"], mom_opt.get_slot_names())
        slot0 = mom_opt.get_slot(var0, "momentum")
        self.assertEquals(slot0.get_shape(), var0.get_shape())
        self.assertFalse(slot0 in tf.trainable_variables())
        slot1 = mom_opt.get_slot(var1, "momentum")
        self.assertEquals(slot1.get_shape(), var1.get_shape())
        self.assertFalse(slot1 in tf.trainable_variables())

        # Fetch params to validate initial values
        self.assertAllClose([1.0, 2.0], var0.eval())
        self.assertAllClose([3.0, 4.0], var1.eval())
        # Step 1: the momentum accumulators where 0. So we should see a normal
        # update: v -= grad * learning_rate
        mom_update.run()
        # Check that the momentum accumulators have been updated.
        self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
        self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
        # Check that the parameters have been updated.
        self.assertAllCloseAccordingToType(np.array([1.0 - (0.1 * 2.0),
                                                     2.0 - (0.1 * 2.0)]),
                                           var0.eval())
        self.assertAllCloseAccordingToType(np.array([3.0 - (0.01 * 2.0),
                                                     4.0 - (0.01 * 2.0)]),
                                           var1.eval())
        # Step 2: the momentum accumulators contain the previous update.
        mom_update.run()
        # Check that the momentum accumulators have been updated.
        self.assertAllCloseAccordingToType(
            np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
            slot0.eval())
        self.assertAllCloseAccordingToType(
            np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
            slot1.eval())
        # Check that the parameters have been updated.
        self.assertAllCloseAccordingToType(
            np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
                      2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),
            var0.eval())
        self.assertAllCloseAccordingToType(
            np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
                      3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),
            var1.eval())
cplx_momentum_test.py 文件源码 项目:complex_tf 作者: woodshop 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def testSharing(self):
    for dtype in [tf.complex64]:
      with self.test_session():
        var0 = tf.Variable([1.0, 2.0], dtype=dtype)
        var1 = tf.Variable([3.0, 4.0], dtype=dtype)
        grads0 = tf.constant([0.1, 0.1], dtype=dtype)
        grads1 = tf.constant([0.01, 0.01], dtype=dtype)
        mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)
        mom_update1 = mom_opt.apply_gradients(
            zip([grads0, grads1], [var0, var1]))
        mom_update2 = mom_opt.apply_gradients(
            zip([grads0, grads1], [var0, var1]))
        tf.global_variables_initializer().run()

        self.assertEqual(["momentum"], mom_opt.get_slot_names())
        slot0 = mom_opt.get_slot(var0, "momentum")
        self.assertEquals(slot0.get_shape(), var0.get_shape())
        slot1 = mom_opt.get_slot(var1, "momentum")
        self.assertEquals(slot1.get_shape(), var1.get_shape())

        # Fetch params to validate initial values
        self.assertAllClose([1.0, 2.0], var0.eval())
        self.assertAllClose([3.0, 4.0], var1.eval())
        # Step 1: the momentum accumulators where 0. So we should see a normal
        # update: v -= grad * learning_rate
        mom_update1.run()
        # Check that the momentum accumulators have been updated.
        self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
        self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
        # Check that the parameters have been updated.
        self.assertAllCloseAccordingToType(np.array([1.0 - (0.1 * 2.0),
                                                     2.0 - (0.1 * 2.0)]),
                                           var0.eval())
        self.assertAllCloseAccordingToType(np.array([3.0 - (0.01 * 2.0),
                                                     4.0 - (0.01 * 2.0)]),
                                           var1.eval())
        # Step 2: the second momentum accumulators contain the previous update.
        mom_update2.run()
        # Check that the momentum accumulators have been updated.
        self.assertAllCloseAccordingToType(
            np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
            slot0.eval())
        self.assertAllCloseAccordingToType(
            np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
            slot1.eval())
        # Check that the parameters have been updated.
        self.assertAllCloseAccordingToType(
            np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
                      2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),
            var0.eval())
        self.assertAllCloseAccordingToType(
            np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
                      3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),
            var1.eval())
cplx_gradient_descent_test.py 文件源码 项目:complex_tf 作者: woodshop 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def testWithGlobalStep(self):
    for dtype in [tf.complex64]:
      with self.test_session(force_gpu=True):
        with tf.device('/cpu'):
          global_step = tf.Variable(0, trainable=False)
        v0 = [1.0+2.0j, 2.0+1.0j]
        v1 = [3.0-4.0j, 4.0-3.0j]
        g0 = [0.1+0.1j, 0.1-0.1j]
        g1 = [0.01-0.01j, 0.01+0.01j]
        lr = 3.0-1.5j 
        var0 = tf.Variable(v0, dtype=dtype)
        var1 = tf.Variable(v1, dtype=dtype)
        grads0 = tf.constant(g0, dtype=dtype)
        grads1 = tf.constant(g1, dtype=dtype)
        sgd_op = ctf.train.CplxGradientDescentOptimizer(lr).apply_gradients(
            zip([grads0, grads1], [var0, var1]),
            global_step=global_step)
        tf.global_variables_initializer().run()
        # Fetch params to validate initial values
        self.assertAllCloseAccordingToType(v0, var0.eval())
        self.assertAllCloseAccordingToType(v1, var1.eval())
        # Run 1 step of sgd
        sgd_op.run()
        # Validate updated params and global_step
        self.assertAllCloseAccordingToType(
            [v0[0] - lr * g0[0],
             v0[1] - lr * g0[1]], var0.eval())
        self.assertAllCloseAccordingToType(
            [v1[0] - lr * g1[0],
             v1[1] - lr * g1[1]], var1.eval())
        self.assertAllCloseAccordingToType(1, global_step.eval())

  ### Currently no support for sparse complex tensors
  # def testSparseBasic(self):
  #   for dtype in [tf.complex64]:
  #     with self.test_session(force_gpu=True):
  #       var0 = tf.Variable([[1.0], [2.0]], dtype=dtype)
  #       var1 = tf.Variable([[3.0], [4.0]], dtype=dtype)
  #       grads0 = tf.IndexedSlices(
  #           tf.constant([0.1], shape=[1, 1], dtype=dtype),
  #           tf.constant([0]),
  #           tf.constant([2, 1]))
  #       grads1 = tf.IndexedSlices(
  #           tf.constant([0.01], shape=[1, 1], dtype=dtype),
  #           tf.constant([1]),
  #           tf.constant([2, 1]))
  #       sgd_op = ctf.train.CplxGradientDescentOptimizer(3.0).apply_gradients(
  #           zip([grads0, grads1], [var0, var1]))
  #       tf.initialize_all_variables().run()
  #       # Fetch params to validate initial values
  #       self.assertAllCloseAccordingToType([[1.0], [2.0]], var0.eval())
  #       self.assertAllCloseAccordingToType([[3.0], [4.0]], var1.eval())
  #       # Run 1 step of sgd
  #       sgd_op.run()
  #       # Validate updated params
  #       self.assertAllCloseAccordingToType(
  #           [[1.0 - 3.0 * 0.1], [2.0]], var0.eval())
  #       self.assertAllCloseAccordingToType(
  #           [[3.0], [4.0 - 3.0 * 0.01]], var1.eval())
cplx_adam_test.py 文件源码 项目:complex_tf 作者: woodshop 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def testSharing(self):
    for dtype in [tf.complex64]:
      with self.test_session(force_gpu=True):
        # Initialize variables for numpy implementation.
        m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
        m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
        var0_np = np.array([1.0-1.0j, 2.0-2.0j], dtype=dtype.as_numpy_dtype)
        grads0_np = np.array([0.1+0.1j, 0.1-0.1j], dtype=dtype.as_numpy_dtype)
        var1_np = np.array([3.0+3.0j, 4.0-4.0j], dtype=dtype.as_numpy_dtype)
        grads1_np = np.array([0.01-0.01j, 0.01+0.01j],
                             dtype=dtype.as_numpy_dtype)

        var0 = tf.Variable(var0_np)
        var1 = tf.Variable(var1_np)
        grads0 = tf.constant(grads0_np)
        grads1 = tf.constant(grads1_np)
        opt = tf.train.AdamOptimizer()
        update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
        update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
        tf.global_variables_initializer().run()

        beta1_power, beta2_power = opt._get_beta_accumulators()

        # Fetch params to validate initial values
        self.assertAllClose([1.0-1.0j, 2.0-2.0j], var0.eval())
        self.assertAllClose([3.0+3.0j, 4.0-4.0j], var1.eval())

        # Run 3 steps of intertwined Adam1 and Adam2.
        for t in range(1, 4):
          self.assertAllCloseAccordingToType(0.9 ** t, beta1_power.eval())
          self.assertAllCloseAccordingToType(0.999 ** t, beta2_power.eval())
          if t % 2 == 0:
            update1.run()
          else:
            update2.run()

          var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
          var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)

          # Validate updated params
          self.assertAllCloseAccordingToType(var0_np, var0.eval())
          self.assertAllCloseAccordingToType(var1_np, var1.eval())


问题


面经


文章

微信
公众号

扫码关注公众号