python类rsqrt()的实例源码

ops.py 文件源码 项目:tfdeploy 作者: riga 项目源码 文件源码 阅读 74 收藏 0 点赞 0 评论 0
def test_Rsqrt(self):
        t = tf.rsqrt(self.random(4, 3))
        self.check(t)
utils.py 文件源码 项目:relax 作者: duvenaud 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def corr(a, b):
  return cov(a, b)*tf.rsqrt(cov(a, a))*tf.rsqrt(cov(b, b))
model_block.py 文件源码 项目:gy_mlcamp17 作者: gylee1103 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def instance_normalization(x, index):
  with tf.variable_scope("instance_norm"):
    depth = x.get_shape()[3]
    scale = tf.get_variable("scale" + str(index), [depth],
        initializer=tf.random_normal_initializer(mean=1.0, stddev=0.02, dtype=tf.float32))
    offset = tf.get_variable("offset" + str(index), [depth], 
        initializer=tf.random_normal_initializer(mean=0.0, stddev=0.02, dtype=tf.float32))
    mean, variance = tf.nn.moments(x, axes=[1, 2], keep_dims=True)
    epsilon = 1e-5
    inv = tf.rsqrt(variance + epsilon)
    normalized = (x - mean) * inv
    return scale*normalized + offset
ProjE_softmax_noweight.py 文件源码 项目:ProjE 作者: bxshi 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __l1_normalize(x, dim, epsilon=1e-12, name=None):
        square_sum = tf.reduce_sum(tf.abs(x), [dim], keep_dims=True)
        x_inv_norm = tf.rsqrt(tf.maximum(square_sum, epsilon))
        return tf.mul(x, x_inv_norm, name=name)
ProjE_softmax.py 文件源码 项目:ProjE 作者: bxshi 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __l1_normalize(x, dim, epsilon=1e-12, name=None):
        square_sum = tf.reduce_sum(tf.abs(x), [dim], keep_dims=True)
        x_inv_norm = tf.rsqrt(tf.maximum(square_sum, epsilon))
        return tf.mul(x, x_inv_norm, name=name)
ProjE_sigmoid.py 文件源码 项目:ProjE 作者: bxshi 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __l1_normalize(x, dim, epsilon=1e-12, name=None):
        square_sum = tf.reduce_sum(tf.abs(x), [dim], keep_dims=True)
        x_inv_norm = tf.rsqrt(tf.maximum(square_sum, epsilon))
        return tf.mul(x, x_inv_norm, name=name)
normalization.py 文件源码 项目:odin 作者: imito 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _apply(self, X, noise=0):
    ndim = X.get_shape().ndims
    # if is training, normalize input by its own mean and std
    mean, var = tf.nn.moments(X, axes=self.axes)
    # prepare dimshuffle pattern inserting broadcastable axes as needed
    param_axes = iter(range(ndim - len(self.axes)))
    pattern = ['x' if input_axis in self.axes else next(param_axes)
               for input_axis in range(ndim)]
    # apply dimshuffle pattern to all parameters
    beta = 0 if self.beta_init is None else \
        K.dimshuffle(self.get('beta'), pattern)
    gamma = 1 if self.gamma_init is None else \
        K.dimshuffle(self.get('gamma'), pattern)

    # ====== if trainign: use local mean and var ====== #
    def training_fn():
      running_mean = ((1 - self.alpha) * self.get('mean') +
                      self.alpha * mean)
      running_var = ((1 - self.alpha) * self.get('var') +
                     self.alpha * var)
      with tf.control_dependencies([
              tf.assign(self.get('mean'), running_mean),
              tf.assign(self.get('var'), running_var)]):
        return tf.identity(mean), tf.identity(var)

    # ====== if inference: use global mean and var ====== #
    def infer_fn():
      return self.get('mean'), self.get('var')

    mean, var = tf.cond(K.is_training(), training_fn, infer_fn)
    inv_std = tf.rsqrt(var + self.epsilon)
    normalized = (X - K.dimshuffle(mean, pattern)) * \
        (gamma * K.dimshuffle(inv_std, pattern))
    # ====== applying noise if required ====== #
    if self.noise_level is not None:
      normalized = K.rand.apply_noise(normalized,
          level=self.noise_level, noise_dims=self.noise_dims,
          noise_type='gaussian')
    # add beta
    normalized = normalized + beta
    # activated output
    return self.activation(normalized)
normalization.py 文件源码 项目:tensorbayes 作者: RuiShu 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def batch_norm(x,
               phase,
               shift=True,
               scale=True,
               momentum=0.99,
               eps=1e-3,
               internal_update=False,
               scope=None,
               reuse=None):

    C = x._shape_as_list()[-1]
    ndim = len(x.shape)
    var_shape = [1] * (ndim - 1) + [C]

    with tf.variable_scope(scope, 'batch_norm', reuse=reuse):
        def training():
            m, v = tf.nn.moments(x, range(ndim - 1), keep_dims=True)
            update_m = _assign_moving_average(moving_m, m, momentum, 'update_mean')
            update_v = _assign_moving_average(moving_v, v, momentum, 'update_var')
            tf.add_to_collection('update_ops', update_m)
            tf.add_to_collection('update_ops', update_v)

            if internal_update:
                with tf.control_dependencies([update_m, update_v]):
                    output = (x - m) * tf.rsqrt(v + eps)
            else:
                output = (x - m) * tf.rsqrt(v + eps)
            return output

        def testing():
            m, v = moving_m, moving_v
            output = (x - m) * tf.rsqrt(v + eps)
            return output

        # Get mean and variance, normalize input
        moving_m = tf.get_variable('mean', var_shape, initializer=tf.zeros_initializer, trainable=False)
        moving_v = tf.get_variable('var', var_shape, initializer=tf.ones_initializer, trainable=False)

        if isinstance(phase, bool):
            output = training() if phase else testing()
        else:
            output = tf.cond(phase, training, testing)

        if scale:
            output *= tf.get_variable('gamma', var_shape, initializer=tf.ones_initializer)

        if shift:
            output += tf.get_variable('beta', var_shape, initializer=tf.zeros_initializer)

    return output


问题


面经


文章

微信
公众号

扫码关注公众号