def test_Rsqrt(self):
t = tf.rsqrt(self.random(4, 3))
self.check(t)
python类rsqrt()的实例源码
def corr(a, b):
return cov(a, b)*tf.rsqrt(cov(a, a))*tf.rsqrt(cov(b, b))
def instance_normalization(x, index):
with tf.variable_scope("instance_norm"):
depth = x.get_shape()[3]
scale = tf.get_variable("scale" + str(index), [depth],
initializer=tf.random_normal_initializer(mean=1.0, stddev=0.02, dtype=tf.float32))
offset = tf.get_variable("offset" + str(index), [depth],
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.02, dtype=tf.float32))
mean, variance = tf.nn.moments(x, axes=[1, 2], keep_dims=True)
epsilon = 1e-5
inv = tf.rsqrt(variance + epsilon)
normalized = (x - mean) * inv
return scale*normalized + offset
def __l1_normalize(x, dim, epsilon=1e-12, name=None):
square_sum = tf.reduce_sum(tf.abs(x), [dim], keep_dims=True)
x_inv_norm = tf.rsqrt(tf.maximum(square_sum, epsilon))
return tf.mul(x, x_inv_norm, name=name)
def __l1_normalize(x, dim, epsilon=1e-12, name=None):
square_sum = tf.reduce_sum(tf.abs(x), [dim], keep_dims=True)
x_inv_norm = tf.rsqrt(tf.maximum(square_sum, epsilon))
return tf.mul(x, x_inv_norm, name=name)
def __l1_normalize(x, dim, epsilon=1e-12, name=None):
square_sum = tf.reduce_sum(tf.abs(x), [dim], keep_dims=True)
x_inv_norm = tf.rsqrt(tf.maximum(square_sum, epsilon))
return tf.mul(x, x_inv_norm, name=name)
def _apply(self, X, noise=0):
ndim = X.get_shape().ndims
# if is training, normalize input by its own mean and std
mean, var = tf.nn.moments(X, axes=self.axes)
# prepare dimshuffle pattern inserting broadcastable axes as needed
param_axes = iter(range(ndim - len(self.axes)))
pattern = ['x' if input_axis in self.axes else next(param_axes)
for input_axis in range(ndim)]
# apply dimshuffle pattern to all parameters
beta = 0 if self.beta_init is None else \
K.dimshuffle(self.get('beta'), pattern)
gamma = 1 if self.gamma_init is None else \
K.dimshuffle(self.get('gamma'), pattern)
# ====== if trainign: use local mean and var ====== #
def training_fn():
running_mean = ((1 - self.alpha) * self.get('mean') +
self.alpha * mean)
running_var = ((1 - self.alpha) * self.get('var') +
self.alpha * var)
with tf.control_dependencies([
tf.assign(self.get('mean'), running_mean),
tf.assign(self.get('var'), running_var)]):
return tf.identity(mean), tf.identity(var)
# ====== if inference: use global mean and var ====== #
def infer_fn():
return self.get('mean'), self.get('var')
mean, var = tf.cond(K.is_training(), training_fn, infer_fn)
inv_std = tf.rsqrt(var + self.epsilon)
normalized = (X - K.dimshuffle(mean, pattern)) * \
(gamma * K.dimshuffle(inv_std, pattern))
# ====== applying noise if required ====== #
if self.noise_level is not None:
normalized = K.rand.apply_noise(normalized,
level=self.noise_level, noise_dims=self.noise_dims,
noise_type='gaussian')
# add beta
normalized = normalized + beta
# activated output
return self.activation(normalized)
def batch_norm(x,
phase,
shift=True,
scale=True,
momentum=0.99,
eps=1e-3,
internal_update=False,
scope=None,
reuse=None):
C = x._shape_as_list()[-1]
ndim = len(x.shape)
var_shape = [1] * (ndim - 1) + [C]
with tf.variable_scope(scope, 'batch_norm', reuse=reuse):
def training():
m, v = tf.nn.moments(x, range(ndim - 1), keep_dims=True)
update_m = _assign_moving_average(moving_m, m, momentum, 'update_mean')
update_v = _assign_moving_average(moving_v, v, momentum, 'update_var')
tf.add_to_collection('update_ops', update_m)
tf.add_to_collection('update_ops', update_v)
if internal_update:
with tf.control_dependencies([update_m, update_v]):
output = (x - m) * tf.rsqrt(v + eps)
else:
output = (x - m) * tf.rsqrt(v + eps)
return output
def testing():
m, v = moving_m, moving_v
output = (x - m) * tf.rsqrt(v + eps)
return output
# Get mean and variance, normalize input
moving_m = tf.get_variable('mean', var_shape, initializer=tf.zeros_initializer, trainable=False)
moving_v = tf.get_variable('var', var_shape, initializer=tf.ones_initializer, trainable=False)
if isinstance(phase, bool):
output = training() if phase else testing()
else:
output = tf.cond(phase, training, testing)
if scale:
output *= tf.get_variable('gamma', var_shape, initializer=tf.ones_initializer)
if shift:
output += tf.get_variable('beta', var_shape, initializer=tf.zeros_initializer)
return output