def __call__(self, inputs, state, scope=None ):
zero_initer = tf.constant_initializer(0.)
with tf.variable_scope(scope or type(self).__name__):
#nick there are these two matrix multiplications and they are used to convert regular input sizes to complex outputs -- makes sense -- we can further modify this for lstm configurations
mat_in = tf.get_variable('W_in', [self.input_size, self.state_size*2])
mat_out = tf.get_variable('W_out', [self.state_size*2, self.output_size])
in_proj = tf.matmul(inputs, mat_in)
in_proj_c = tf.complex( in_proj[:, :self.state_size], in_proj[:, self.state_size:] )
out_state = modrelu_c( in_proj_c +
ulinear_c(state,transform=self.transform),
tf.get_variable(name='B', dtype=tf.float32, shape=[self.state_size], initializer=zero_initer)
)
out_bias = tf.get_variable(name='B_out', dtype=tf.float32, shape=[self.output_size], initializer = zero_initer)
out = tf.matmul( tf.concat(1,[tf.real(out_state), tf.imag(out_state)] ), mat_out ) + out_bias
return out, out_state
python类complex()的实例源码
unitary_rnn_cell_modern.py 文件源码
项目:tensorflow_with_latest_papers
作者: NickShahML
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def transform_spec_from_raw(raw):
'''
Read raw features from TFRecords and shape them into spectrograms
'''
spec = tf.decode_raw(raw, tf.float32)
spec.set_shape([EmbeddingConfig.num_time_frames * EmbeddingConfig.num_freq_bins * 2])
spec = tf.reshape(spec, [-1, EmbeddingConfig.num_freq_bins * 2])
real, imag = tf.split(spec, [EmbeddingConfig.num_freq_bins, EmbeddingConfig.num_freq_bins], axis=1)
orig_spec = tf.complex(real, imag)
# orig_spec = librosa.feature.melspectrogram(S=orig_spec, n_mels=150)
return orig_spec # shape: [time_frames, num_freq_bins]
def transform_spec_from_raw(raw):
'''
Read raw features from TFRecords and shape them into spectrograms
'''
spec = tf.decode_raw(raw, tf.float32)
spec.set_shape([Config.num_time_frames * Config.num_freq_bins * 2])
spec = tf.reshape(spec, [-1, Config.num_freq_bins * 2])
real, imag = tf.split(spec, [Config.num_freq_bins, Config.num_freq_bins], axis=1)
orig_spec = tf.complex(real, imag)
return orig_spec # [num_time_frames, num_freq_bin]
def __init__(self, name, num_units):
init_w = tf.random_uniform([num_units], minval=-np.pi, maxval=np.pi)
self.w = tf.Variable(init_w, name=name)
self.vec = tf.complex(tf.cos(self.w), tf.sin(self.w))
# [batch_sz, num_units]
def __init__(self, name, num_units):
self.num_units = num_units
self.re = tf.Variable(tf.random_uniform([num_units], minval=-1, maxval=1), name=name+"_re")
self.im = tf.Variable(tf.random_uniform([num_units], minval=-1, maxval=1), name=name+"_im")
self.v = tf.complex(self.re, self.im) # [num_units]
# self.v = normalize(self.v)
self.vstar = tf.conj(self.v) # [num_units]
# [batch_sz, num_units]
def mul(self, z):
v = tf.expand_dims(self.v, 1) # [num_units, 1]
vstar = tf.conj(v) # [num_units, 1]
vstar_z = tf.matmul(z, vstar) #[batch_size, 1]
sq_norm = tf.reduce_sum(tf.abs(self.v)**2) # [1]
factor = (2 / tf.complex(sq_norm, 0.0))
return z - factor * tf.matmul(vstar_z, tf.transpose(v))
# Permutation unitary matrix
def mul(self, z):
return tf.transpose(tf.gather(tf.transpose(z), self.P))
# FFTs
# z: complex[batch_sz, num_units]
def normalize(z):
norm = tf.sqrt(tf.reduce_sum(tf.abs(z)**2))
factor = (norm + 1e-6)
return tf.complex(tf.real(z) / factor, tf.imag(z) / factor)
# z: complex[batch_sz, num_units]
# bias: real[num_units]
def complex_mod_of_real(x):
xshp = x.get_shape().as_list()
assert xshp[1] % 2 == 0
xcplx = tf.complex(x[:, 0:xshp[1]/2], x[:, xshp[1]/2:])
return tf.complex_abs(xcplx)
def bound(x):
bound = tf.maximum(tf.sqrt(tf.mul(tf.real(x), tf.real(x)) \
+ tf.mul(tf.imag(x), tf.imag(x))),
1.0)
return tf.complex(tf.real(x) / bound, tf.imag(x) / bound)
def _normalize(self, keys):
# Normalize our keys to mod 1 if specified
if self.complex_normalize:
print 'normalizing via complex abs..'
keys = HolographicMemory.normalize_real_by_complex_abs(keys)
# Normalize our keys using the l2 norm
if self.l2_normalize:
print 'normalizing via l2..'
keys = tf.nn.l2_normalize(keys, 1)
return keys
def split_to_complex(x, xshp=None):
xshp = x.get_shape().as_list() if xshp is None else xshp
if len(xshp) == 2:
assert xshp[1] % 2 == 0, \
"Vector is not evenly divisible into complex: %d" % xshp[1]
mid = xshp[1] / 2
return tf.complex(x[:, 0:mid], x[:, mid:])
else:
assert xshp[0] % 2 == 0, \
"Vector is not evenly divisible into complex: %d" % xshp[0]
mid = xshp[0] / 2
return tf.complex(x[0:mid], x[mid:])
def unsplit_from_complex_ir(x):
#return tf.concat(1, [tf.imag(x), tf.abs(tf.real(x))])
return tf.abs(tf.concat(1, [tf.imag(x), tf.real(x)]))
#mag = tf.maximum(1.0, tf.complex_abs(x))
#x = tf.complex(tf.real(x) / (mag + 1e-10), tf.imag(x) / (mag + 1e-10))
# real = tf.concat(1, [tf.imag(x), tf.real(x)])
# return tf.abs(HolographicMemory.normalize_real_by_complex_abs([real])[0])
def complex_mul_real( z, r ):
return tf.complex(tf.real(z)*r, tf.imag(z)*r)
def refl_c(in_, normal_):
normal_rk2 = tf.expand_dims( normal_, 1 )
scale = 2*tf.matmul( in_, tf.conj( normal_rk2 ) )
return in_ - tf.matmul(scale, tf.transpose(normal_rk2))
#get complex variable
def get_variable_c( name, shape, initializer=None ):
re = tf.get_variable(name+'_re', shape=shape, initializer=initializer)
im = tf.get_variable(name+'_im', shape=shape, initializer=initializer)
return tf.complex(re,im, name=name)
#get unit complex numbers in polar form
def get_unit_variable_c( name, scope, shape ):
theta = tf.get_variable(name, shape=shape, initializer = tf.random_uniform_initializer(-pi,pi) )
return tf.complex( tf.cos(theta), tf.sin(theta) )
def modrelu_c(in_c, bias):
if not in_c.dtype.is_complex:
raise(ValueError('modrelu_c: Argument in_c must be complex type'))
if bias.dtype.is_complex:
raise(ValueError('modrelu_c: Argument bias must be real type'))
n = tf.complex_abs(in_c)
scale = 1./(n+1e-5)
return complex_mul_real(in_c, ( tf.nn.relu(n+bias)*scale ))
unitary_rnn_cell_modern.py 文件源码
项目:tensorflow_with_latest_papers
作者: NickShahML
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def __call__(self, inputs, state, scope=None ):
with tf.variable_scope(scope or type(self).__name__):
unitary_hidden_state, secondary_cell_hidden_state = tf.split(1,2,state)
mat_in = tf.get_variable('mat_in', [self.input_size, self.state_size*2])
mat_out = tf.get_variable('mat_out', [self.state_size*2, self.output_size])
in_proj = tf.matmul(inputs, mat_in)
in_proj_c = tf.complex(tf.split(1,2,in_proj))
out_state = modReLU( in_proj_c +
ulinear(unitary_hidden_state, self.state_size),
tf.get_variable(name='bias', dtype=tf.float32, shape=tf.shape(unitary_hidden_state), initializer = tf.constant_initalizer(0.)),
scope=scope)
with tf.variable_scope('unitary_output'):
'''computes data linear, unitary linear and summation -- TODO: should be complex output'''
unitary_linear_output_real = linear.linear([tf.real(out_state), tf.imag(out_state), inputs], True, 0.0)
with tf.variable_scope('scale_nonlinearity'):
modulus = tf.complex_abs(unitary_linear_output_real)
rescale = tf.maximum(modulus + hidden_bias, 0.) / (modulus + 1e-7)
#transition to data shortcut connection
#out_ = tf.matmul(tf.concat(1,[tf.real(out_state), tf.imag(out_state), ] ), mat_out) + out_bias
#hidden state is complex but output is completely real
return out_, out_state #complex
sequential_batch_fft_ops.py 文件源码
项目:tensorflow_compact_bilinear_pooling
作者: ronghanghu
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def _SequentialBatchFFTGrad(op, grad):
if (grad.dtype == tf.complex64):
size = tf.cast(tf.shape(grad)[1], tf.float32)
return (sequential_batch_ifft(grad, op.get_attr("compute_size"))
* tf.complex(size, 0.))
else:
size = tf.cast(tf.shape(grad)[1], tf.float64)
return (sequential_batch_ifft(grad, op.get_attr("compute_size"))
* tf.complex(size, tf.zeros([], tf.float64)))
sequential_batch_fft_ops.py 文件源码
项目:tensorflow_compact_bilinear_pooling
作者: ronghanghu
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def _SequentialBatchIFFTGrad(op, grad):
if (grad.dtype == tf.complex64):
rsize = 1. / tf.cast(tf.shape(grad)[1], tf.float32)
return (sequential_batch_fft(grad, op.get_attr("compute_size"))
* tf.complex(rsize, 0.))
else:
rsize = 1. / tf.cast(tf.shape(grad)[1], tf.float64)
return (sequential_batch_fft(grad, op.get_attr("compute_size"))
* tf.complex(rsize, tf.zeros([], tf.float64)))
def random(self, *shapes, **kwargs):
if all(isinstance(i, int) for i in shapes):
if kwargs.get("complex", False):
return (self.random(*shapes) + 1j * self.random(*shapes)).astype(np.complex64)
else:
return np.random.rand(*shapes)
else:
return tuple(self.random(*shape) for shape in shapes)
def test_Svd(self):
t = tf.svd(self.random(4, 5, 3, 2).astype("float32"))
self.check(t, ndigits=4, abs=True)
#
# complex number ops
#
def test_Complex(self):
t = tf.complex(*self.random((3, 4), (3, 4)))
self.check(t)
def test_Conj(self):
t = tf.conj(self.random(3, 4, complex=True))
self.check(t)
def test_Imag(self):
t = tf.imag(tf.Variable(self.random(3, 4, complex=True)))
self.check(t)
def test_FFT2D(self):
# only defined for gpu
if DEVICE == GPU:
t = tf.fft2d(self.random(3, 4, complex=True))
self.check(t)
def test_IFFT2D(self):
# only defined for gpu
if DEVICE == GPU:
t = tf.ifft2d(self.random(3, 4, complex=True))
self.check(t)
def test_FFT3D(self):
# only defined for gpu
if DEVICE == GPU:
t = tf.fft3d(self.random(3, 4, 5, complex=True))
self.check(t)
def test_IFFT3D(self):
# only defined for gpu
if DEVICE == GPU:
t = tf.ifft3d(self.random(3, 4, 5, complex=True))
self.check(t)
#
# reduction
#