python类assign_moving_average()的实例源码

restnet_tensorflow.py 文件源码 项目:mlAlgorithms 作者: gu-yan 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def batch_norm(data, name):
    shape_param = data.get_shape()[-1]
    beta = tf.get_variable(name=name+'_beta', shape=shape_param, dtype=tf.float32,
                           initializer=tf.constant_initializer(0.0, tf.float32))
    gamma = tf.get_variable(name=name+'_gamma', shape=shape_param, dtype=tf.float32,
                            initializer=tf.constant_initializer(1.0, tf.float32))
    if FLAGS.train_mode:
        mean_param, variance_param = tf.nn.moments(x=data, axes=[0, 1, 2], name=name+'_moments')
        moving_mean = tf.get_variable(name=name+'_moving_mean', shape=shape_param, dtype=tf.float32,
                                      initializer=tf.constant_initializer(0.0, tf.float32), trainable=False)
        moving_variance = tf.get_variable(name=name+'_moving_variance', shape=shape_param, dtype=tf.float32,
                                          initializer=tf.constant_initializer(1.0, tf.float32), trainable=False)
        mean = moving_averages.assign_moving_average(variable=moving_mean, value=mean_param, decay=0.9)
        variance = moving_averages.assign_moving_average(variable=moving_variance, value=variance_param, decay=0.9)
    else:
        mean = tf.get_variable(name=name+'_moving_mean', shape=shape_param, dtype=tf.float32,
                               initializer=tf.constant_initializer(0.0, tf.float32), trainable=False)
        variance = tf.get_variable(name=name+'_moving_variance', shape=shape_param, dtype=tf.float32,
                                   initializer=tf.constant_initializer(1.0, tf.float32), trainable=False)
        tf.summary.scalar(mean.op.name, mean)
        tf.summary.scalar(variance.op.name, variance)
    b_norm = tf.nn.batch_normalization(x=data, mean=mean, variance=variance,
                                       offset=beta, scale=gamma, variance_epsilon=0.001, name=name)
    return b_norm
ops.py 文件源码 项目:attend_infer_repeat 作者: akosiorek 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def make_moving_average(name, value, init, decay, log=True):
    """Creates an exp-moving average of `value` and an update op, which is added to UPDATE_OPS collection.

    :param name: string, name of the created moving average tf.Variable
    :param value: tf.Tensor, the value to be averaged
    :param init: float, an initial value for the moving average
    :param decay: float between 0 and 1, exponential decay of the moving average
    :param log: bool, add a summary op if True
    :return: tf.Tensor, the moving average
    """
    var = tf.get_variable(name, shape=value.get_shape(),
                          initializer=tf.constant_initializer(init), trainable=False)

    update = moving_averages.assign_moving_average(var, value, decay, zero_debias=False)
    tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update)
    if log:
        tf.summary.scalar(name, var)

    return var
layers.py 文件源码 项目:third_person_im 作者: bstadie 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def get_output_for(self, input, phase='train', **kwargs):
        if phase == 'train':
            # Calculate the moments based on the individual batch.
            mean, variance = tf.nn.moments(input, self.axis, shift=self.moving_mean)
            # Update the moving_mean and moving_variance moments.
            update_moving_mean = moving_averages.assign_moving_average(
                self.moving_mean, mean, self.decay)
            update_moving_variance = moving_averages.assign_moving_average(
                self.moving_variance, variance, self.decay)
            # Make sure the updates are computed here.
            with tf.control_dependencies([update_moving_mean,
                                          update_moving_variance]):
                output = tf.nn.batch_normalization(
                    input, mean, variance, self.beta, self.gamma, self.epsilon)
        else:
            output = tf.nn.batch_normalization(
                input, self.moving_mean, self.moving_variance, self.beta, self.gamma, self.epsilon)
        output.set_shape(self.input_shape)
        return output
layers.py 文件源码 项目:rllabplusplus 作者: shaneshixiang 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def get_output_for(self, input, phase='train', **kwargs):
        if phase == 'train':
            # Calculate the moments based on the individual batch.
            mean, variance = tf.nn.moments(input, self.axis, shift=self.moving_mean)
            # Update the moving_mean and moving_variance moments.
            update_moving_mean = moving_averages.assign_moving_average(
                self.moving_mean, mean, self.decay)
            update_moving_variance = moving_averages.assign_moving_average(
                self.moving_variance, variance, self.decay)
            # Make sure the updates are computed here.
            with tf.control_dependencies([update_moving_mean,
                                          update_moving_variance]):
                output = tf.nn.batch_normalization(
                    input, mean, variance, self.beta, self.gamma, self.epsilon)
        else:
            output = tf.nn.batch_normalization(
                input, self.moving_mean, self.moving_variance, self.beta, self.gamma, self.epsilon)
        output.set_shape(self.input_shape)
        return output
tf-keras-skeleton.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def moving_average_update(x, value, momentum):
      """Compute the moving average of a variable.

      Arguments:
          x: A Variable.
          value: A tensor with the same shape as `variable`.
          momentum: The moving average momentum.

      Returns:
          An Operation to update the variable.
      """
      return moving_averages.assign_moving_average(
          x, value, momentum, zero_debias=False)


    # LINEAR ALGEBRA
optimizers.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
  """Find max_norm given norm and previous average."""
  with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):
    log_norm = math_ops.log(norm + epsilon)

    def moving_average(name, value, decay):
      moving_average_variable = vs.get_variable(
          name, shape=value.get_shape(), dtype=value.dtype,
          initializer=init_ops.zeros_initializer, trainable=False)
      return moving_averages.assign_moving_average(
          moving_average_variable, value, decay, zero_debias=False)

    # quicker adaptation at the beginning
    if global_step is not None:
      n = math_ops.to_float(global_step)
      decay = math_ops.minimum(decay, n / (n + 1.))

    # update averages
    mean = moving_average("mean", log_norm, decay)
    sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)

    variance = sq_mean - math_ops.square(mean)
    std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
    max_norms = math_ops.exp(mean + std_factor*std)
    return max_norms, mean
layers.py 文件源码 项目:gail-driver 作者: sisl 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def get_output_for(self, input, phase='train', **kwargs):
        if phase == 'train':
            # Calculate the moments based on the individual batch.
            mean, variance = tf.nn.moments(
                input, self.axis, shift=self.moving_mean)
            # Update the moving_mean and moving_variance moments.
            update_moving_mean = moving_averages.assign_moving_average(
                self.moving_mean, mean, self.decay)
            update_moving_variance = moving_averages.assign_moving_average(
                self.moving_variance, variance, self.decay)
            # Make sure the updates are computed here.
            with tf.control_dependencies([update_moving_mean,
                                          update_moving_variance]):
                output = tf.nn.batch_normalization(
                    input, mean, variance, self.beta, self.gamma, self.epsilon)
        else:
            output = tf.nn.batch_normalization(
                input, self.moving_mean, self.moving_variance, self.beta, self.gamma, self.epsilon)
        output.set_shape(self.input_shape)
        return output
model.py 文件源码 项目:tensorflow-cnn-finetune 作者: dgurkaynak 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def bn(x, is_training):
    x_shape = x.get_shape()
    params_shape = x_shape[-1:]

    axis = list(range(len(x_shape) - 1))

    beta = _get_variable('beta', params_shape, initializer=tf.zeros_initializer())
    gamma = _get_variable('gamma', params_shape, initializer=tf.ones_initializer())

    moving_mean = _get_variable('moving_mean', params_shape, initializer=tf.zeros_initializer(), trainable=False)
    moving_variance = _get_variable('moving_variance', params_shape, initializer=tf.ones_initializer(), trainable=False)

    # These ops will only be preformed when training.
    mean, variance = tf.nn.moments(x, axis)
    update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, BN_DECAY)
    update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, BN_DECAY)
    tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
    tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)

    mean, variance = control_flow_ops.cond(
        is_training, lambda: (mean, variance),
        lambda: (moving_mean, moving_variance))

    return tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON)
layers.py 文件源码 项目:rllab 作者: rll 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def get_output_for(self, input, phase='train', **kwargs):
        if phase == 'train':
            # Calculate the moments based on the individual batch.
            mean, variance = tf.nn.moments(input, self.axis, shift=self.moving_mean)
            # Update the moving_mean and moving_variance moments.
            update_moving_mean = moving_averages.assign_moving_average(
                self.moving_mean, mean, self.decay)
            update_moving_variance = moving_averages.assign_moving_average(
                self.moving_variance, variance, self.decay)
            # Make sure the updates are computed here.
            with tf.control_dependencies([update_moving_mean,
                                          update_moving_variance]):
                output = tf.nn.batch_normalization(
                    input, mean, variance, self.beta, self.gamma, self.epsilon)
        else:
            output = tf.nn.batch_normalization(
                input, self.moving_mean, self.moving_variance, self.beta, self.gamma, self.epsilon)
        output.set_shape(self.input_shape)
        return output
layers.py 文件源码 项目:maml_rl 作者: cbfinn 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def get_output_for(self, input, phase='train', **kwargs):
        if phase == 'train':
            # Calculate the moments based on the individual batch.
            mean, variance = tf.nn.moments(input, self.axis, shift=self.moving_mean)
            # Update the moving_mean and moving_variance moments.
            update_moving_mean = moving_averages.assign_moving_average(
                self.moving_mean, mean, self.decay)
            update_moving_variance = moving_averages.assign_moving_average(
                self.moving_variance, variance, self.decay)
            # Make sure the updates are computed here.
            with tf.control_dependencies([update_moving_mean,
                                          update_moving_variance]):
                output = tf.nn.batch_normalization(
                    input, mean, variance, self.beta, self.gamma, self.epsilon)
        else:
            output = tf.nn.batch_normalization(
                input, self.moving_mean, self.moving_variance, self.beta, self.gamma, self.epsilon)
        output.set_shape(self.input_shape)
        return output
convnet_builder.py 文件源码 项目:benchmarks 作者: tensorflow 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _batch_norm_without_layers(self, input_layer, decay, use_scale, epsilon):
    """Batch normalization on `input_layer` without tf.layers."""
    # We make this function as similar as possible to the
    # tf.contrib.layers.batch_norm, to minimize the differences between using
    # layers and not using layers.
    shape = input_layer.shape
    num_channels = shape[3] if self.data_format == 'NHWC' else shape[1]
    beta = self.get_variable('beta', [num_channels], tf.float32, tf.float32,
                             initializer=tf.zeros_initializer())
    if use_scale:
      gamma = self.get_variable('gamma', [num_channels], tf.float32,
                                tf.float32, initializer=tf.ones_initializer())
    else:
      gamma = tf.constant(1.0, tf.float32, [num_channels])
    # For moving variables, we use tf.get_variable instead of self.get_variable,
    # since self.get_variable returns the result of tf.cast which we cannot
    # assign to.
    moving_mean = tf.get_variable('moving_mean', [num_channels],
                                  tf.float32,
                                  initializer=tf.zeros_initializer(),
                                  trainable=False)
    moving_variance = tf.get_variable('moving_variance', [num_channels],
                                      tf.float32,
                                      initializer=tf.ones_initializer(),
                                      trainable=False)
    if self.phase_train:
      bn, batch_mean, batch_variance = tf.nn.fused_batch_norm(
          input_layer, gamma, beta, epsilon=epsilon,
          data_format=self.data_format, is_training=True)
      mean_update = moving_averages.assign_moving_average(
          moving_mean, batch_mean, decay=decay, zero_debias=False)
      variance_update = moving_averages.assign_moving_average(
          moving_variance, batch_variance, decay=decay, zero_debias=False)
      tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mean_update)
      tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, variance_update)
    else:
      bn, _, _ = tf.nn.fused_batch_norm(
          input_layer, gamma, beta, mean=moving_mean,
          variance=moving_variance, epsilon=epsilon,
          data_format=self.data_format, is_training=False)
    return bn
custom_ops.py 文件源码 项目:how_to_convert_text_to_images 作者: llSourcell 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __call__(self, input_layer, epsilon=1e-5, decay=0.9, name="batch_norm",
                 in_dim=None, phase=Phase.train):
        shape = input_layer.shape
        shp = in_dim or shape[-1]
        with tf.variable_scope(name) as scope:
            self.mean = self.variable('mean', [shp], init=tf.constant_initializer(0.), train=False)
            self.variance = self.variable('variance', [shp], init=tf.constant_initializer(1.0), train=False)

            self.gamma = self.variable("gamma", [shp], init=tf.random_normal_initializer(1., 0.02))
            self.beta = self.variable("beta", [shp], init=tf.constant_initializer(0.))

            if phase == Phase.train:
                mean, variance = tf.nn.moments(input_layer.tensor, [0, 1, 2])
                mean.set_shape((shp,))
                variance.set_shape((shp,))

                update_moving_mean = moving_averages.assign_moving_average(self.mean, mean, decay)
                update_moving_variance = moving_averages.assign_moving_average(self.variance, variance, decay)

                with tf.control_dependencies([update_moving_mean, update_moving_variance]):
                    normalized_x = tf.nn.batch_norm_with_global_normalization(
                        input_layer.tensor, mean, variance, self.beta, self.gamma, epsilon,
                        scale_after_normalization=True)
            else:
                normalized_x = tf.nn.batch_norm_with_global_normalization(
                    input_layer.tensor, self.mean, self.variance,
                    self.beta, self.gamma, epsilon,
                    scale_after_normalization=True)
            return input_layer.with_tensor(normalized_x, parameters=self.vars)
tensorflow_backend.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def moving_average_update(variable, value, momentum):
    return moving_averages.assign_moving_average(
        variable, value, momentum)


# LINEAR ALGEBRA
custom_ops.py 文件源码 项目:StackGAN 作者: hanzhanggit 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __call__(self, input_layer, epsilon=1e-5, decay=0.9, name="batch_norm",
                 in_dim=None, phase=Phase.train):
        shape = input_layer.shape
        shp = in_dim or shape[-1]
        with tf.variable_scope(name) as scope:
            self.mean = self.variable('mean', [shp], init=tf.constant_initializer(0.), train=False)
            self.variance = self.variable('variance', [shp], init=tf.constant_initializer(1.0), train=False)

            self.gamma = self.variable("gamma", [shp], init=tf.random_normal_initializer(1., 0.02))
            self.beta = self.variable("beta", [shp], init=tf.constant_initializer(0.))

            if phase == Phase.train:
                mean, variance = tf.nn.moments(input_layer.tensor, [0, 1, 2])
                mean.set_shape((shp,))
                variance.set_shape((shp,))

                update_moving_mean = moving_averages.assign_moving_average(self.mean, mean, decay)
                update_moving_variance = moving_averages.assign_moving_average(self.variance, variance, decay)

                with tf.control_dependencies([update_moving_mean, update_moving_variance]):
                    normalized_x = tf.nn.batch_norm_with_global_normalization(
                        input_layer.tensor, mean, variance, self.beta, self.gamma, epsilon,
                        scale_after_normalization=True)
            else:
                normalized_x = tf.nn.batch_norm_with_global_normalization(
                    input_layer.tensor, self.mean, self.variance,
                    self.beta, self.gamma, epsilon,
                    scale_after_normalization=True)
            return input_layer.with_tensor(normalized_x, parameters=self.vars)
network.py 文件源码 项目:GC-Net 作者: Jiankai-Sun 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def bn(x, c):
  x_shape = x.get_shape()
  params_shape = x_shape[-1:]

  axis = list(range(len(x_shape) - 1))

  beta = _get_variable('beta',
                       params_shape,
                       initializer=tf.zeros_initializer())
                       #tf.constant_initializer(0.00, dtype='float')
  gamma = _get_variable('gamma',
                        params_shape,
                        initializer=tf.ones_initializer())

  moving_mean = _get_variable('moving_mean',
                              params_shape,
                              initializer=tf.zeros_initializer(),
                              trainable=False)
  moving_variance = _get_variable('moving_variance',
                                  params_shape,
                                  initializer=tf.ones_initializer(),
                                  trainable=False)

  # These ops will only be performed when training.
  mean, variance = tf.nn.moments(x, axis)
  update_moving_mean = moving_averages.assign_moving_average(moving_mean,
                                                             mean, BN_DECAY)
  update_moving_variance = moving_averages.assign_moving_average(
                                        moving_variance, variance, BN_DECAY)
  tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
  tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)

  mean, variance = control_flow_ops.cond(
    c['is_training'], lambda: (mean, variance),
    lambda: (moving_mean, moving_variance))

  x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON)

  return x

# wrapper for get_variable op
resnet.py 文件源码 项目:Skeleton-key 作者: feiyu1990 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _bn(self, x, params_init, is_training):
        x_shape = x.get_shape()
        axis = list(range(len(x_shape) - 1))

        beta = self._get_variable_const('beta', initializer=tf.constant(params_init['bias']))
        gamma = self._get_variable_const('gamma', initializer=tf.constant(params_init['weight']))
        moving_mean = self._get_variable_const('moving_mean',
                                               initializer=tf.constant(params_init['running_mean']), trainable=False)
        moving_variance = self._get_variable_const('moving_variance',
                                                   initializer=tf.constant(params_init['running_var']), trainable=False)
        # mean, variance = tf.nn.moments(x, axis)
        # update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, BN_DECAY)
        # update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, BN_DECAY)
        # tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
        # tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)
        #
        # if ~is_training:
        #     mean = moving_mean
        #     variance = moving_variance
        # else:
        #     ema = tf.train.ExponentialMovingAverage(decay=BN_DECAY)
        #
        #     def mean_var_with_update():
        #         ema_apply_op = ema.apply([mean, variance])
        #         with tf.control_dependencies([ema_apply_op]):
        #             return tf.identity(mean), tf.identity(variance)
        #     mean, variance = mean_var_with_update()

        # mean, variance = control_flow_ops.cond(is_training, lambda: (mean, variance),
        #                                        lambda: (moving_mean, moving_variance))
        # x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON)
        x = tf.layers.batch_normalization(x, momentum=BN_DECAY, epsilon=BN_EPSILON, beta_initializer=tf.constant_initializer(params_init['bias']),
                                          gamma_initializer=tf.constant_initializer(params_init['weight']),
                                          moving_mean_initializer=tf.constant_initializer(params_init['running_mean']),
                                          moving_variance_initializer=tf.constant_initializer(params_init['running_var']),
                                          training=is_training)
        return x
tensorflow_backend.py 文件源码 项目:deep-learning-keras-projects 作者: jasmeetsb 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def moving_average_update(variable, value, momentum):
    try:
        return moving_averages.assign_moving_average(
            variable, value, momentum, zero_debias=False)
    except TypeError:
        return moving_averages.assign_moving_average(
            variable, value, momentum)


# LINEAR ALGEBRA
base.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def _adaptive_max_norm(self, norm, std_factor, decay, global_step, epsilon, name):
        """Find max_norm given norm and previous average."""
        with tf.variable_scope(name, "AdaptiveMaxNorm", [norm]):
            log_norm = tf.log(norm + epsilon)

            def moving_average(name, value, decay):
                moving_average_variable = tf.get_variable(name,
                                                          shape=value.get_shape(),
                                                          dtype=value.dtype,
                                                          initializer=tf.zeros_initializer(),
                                                          trainable=False)
                return moving_averages.assign_moving_average(moving_average_variable, value, decay, zero_debias=False)

            # quicker adaptation at the beginning
            if global_step is not None:
                n = tf.to_float(global_step)
                decay = tf.minimum(decay, n / (n + 1.))

            # update averages
            mean = moving_average("mean", log_norm, decay)
            sq_mean = moving_average(
                "sq_mean", tf.square(log_norm), decay)

            variance = sq_mean - tf.square(mean)
            std = tf.sqrt(tf.maximum(epsilon, variance))
            max_norms = tf.exp(mean + std_factor * std)
            return max_norms, mean
network.py 文件源码 项目:gc_net_stereo 作者: MaidouPP 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def bn(x, c):
  x_shape = x.get_shape()
  params_shape = x_shape[-1:]

  axis = list(range(len(x_shape) - 1))

  beta = _get_variable('beta',
                       params_shape,
                       initializer=tf.zeros_initializer())
                       #tf.constant_initializer(0.00, dtype='float')
  gamma = _get_variable('gamma',
                        params_shape,
                        initializer=tf.ones_initializer())

  moving_mean = _get_variable('moving_mean',
                              params_shape,
                              initializer=tf.zeros_initializer(),
                              trainable=False)
  moving_variance = _get_variable('moving_variance',
                                  params_shape,
                                  initializer=tf.ones_initializer(),
                                  trainable=False)

  # These ops will only be performed when training.
  mean, variance = tf.nn.moments(x, axis)
  update_moving_mean = moving_averages.assign_moving_average(moving_mean,
                                                             mean, BN_DECAY)
  update_moving_variance = moving_averages.assign_moving_average(
                                        moving_variance, variance, BN_DECAY)
  tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
  tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)

  mean, variance = control_flow_ops.cond(
    c['is_training'], lambda: (mean, variance),
    lambda: (moving_mean, moving_variance))

  x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON)

  return x

# wrapper for get_variable op
siamese_net.py 文件源码 项目:tensorflow-siamese-fc 作者: www0wwwjs1 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def batchNorm(self, x, isTraining):
        shape = x.get_shape()
        paramsShape = shape[-1:]

        axis = list(range(len(shape)-1))

        with tf.variable_scope('bn'):
            beta = self.getVariable('beta', paramsShape, initializer=tf.constant_initializer(value=0, dtype=tf.float32))
            self.learningRates[beta.name] = 1.0
            gamma = self.getVariable('gamma', paramsShape, initializer=tf.constant_initializer(value=1, dtype=tf.float32))
            self.learningRates[gamma.name] = 2.0
            movingMean = self.getVariable('moving_mean', paramsShape, initializer=tf.constant_initializer(value=0, dtype=tf.float32), trainable=False)
            movingVariance = self.getVariable('moving_variance', paramsShape, initializer=tf.constant_initializer(value=1, dtype=tf.float32), trainable=False)

        mean, variance = tf.nn.moments(x, axis)
        updateMovingMean = moving_averages.assign_moving_average(movingMean, mean, MOVING_AVERAGE_DECAY)
        updateMovingVariance = moving_averages.assign_moving_average(movingVariance, variance, MOVING_AVERAGE_DECAY)
        tf.add_to_collection(UPDATE_OPS_COLLECTION, updateMovingMean)
        tf.add_to_collection(UPDATE_OPS_COLLECTION, updateMovingVariance)

        mean, variance = control_flow_ops.cond(isTraining, lambda : (mean, variance), lambda : (movingMean, movingVariance))

        x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, variance_epsilon=0.001)

        return x

    # def batchNormalization(self, inputs, isTraining, name):
    #     with tf.variable_scope('bn'):
    #         output = tf.contrib.layers.batch_norm(inputs, center=True, scale=True, is_training=isTraining, decay=0.997, epsilon=0.0001)
    #     self.learningRates[name+'/bn/BatchNorm/gamma:0'] = 2.0
    #     self.learningRates[name+'/bn/BatchNorm/beta:0'] = 1.0
    #
    #     return output
tensorflow_backend.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def moving_average_update(variable, value, momentum):
    try:
        return moving_averages.assign_moving_average(
            variable, value, momentum, zero_debias=False)
    except TypeError:
        return moving_averages.assign_moving_average(
            variable, value, momentum)


# LINEAR ALGEBRA
convnet_builder.py 文件源码 项目:stuff 作者: yaroslavvb 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _batch_norm_without_layers(self, input_layer, decay, use_scale, epsilon):
    """Batch normalization on `input_layer` without tf.layers."""
    # We make this function as similar as possible to the
    # tf.contrib.layers.batch_norm, to minimize the differences between using
    # layers and not using layers.
    shape = input_layer.shape
    num_channels = shape[3] if self.data_format == 'NHWC' else shape[1]
    beta = self.get_variable('beta', [num_channels], tf.float32, tf.float32,
                             initializer=tf.zeros_initializer())
    if use_scale:
      gamma = self.get_variable('gamma', [num_channels], tf.float32,
                                tf.float32, initializer=tf.ones_initializer())
    else:
      gamma = tf.constant(1.0, tf.float32, [num_channels])
    # For moving variables, we use tf.get_variable instead of self.get_variable,
    # since self.get_variable returns the result of tf.cast which we cannot
    # assign to.
    moving_mean = tf.get_variable('moving_mean', [num_channels],
                                  tf.float32,
                                  initializer=tf.zeros_initializer(),
                                  trainable=False)
    moving_variance = tf.get_variable('moving_variance', [num_channels],
                                      tf.float32,
                                      initializer=tf.ones_initializer(),
                                      trainable=False)
    if self.phase_train:
      bn, batch_mean, batch_variance = tf.nn.fused_batch_norm(
          input_layer, gamma, beta, epsilon=epsilon,
          data_format=self.data_format, is_training=True)
      mean_update = moving_averages.assign_moving_average(
          moving_mean, batch_mean, decay=decay, zero_debias=False)
      variance_update = moving_averages.assign_moving_average(
          moving_variance, batch_variance, decay=decay, zero_debias=False)
      tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mean_update)
      tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, variance_update)
    else:
      bn, _, _ = tf.nn.fused_batch_norm(
          input_layer, gamma, beta, mean=moving_mean,
          variance=moving_variance, epsilon=epsilon,
          data_format=self.data_format, is_training=False)
    return bn
nn.py 文件源码 项目:image_captioning 作者: DeepRNN 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _batch_norm(x, name, is_train):
    """ Apply a batch normalization layer. """
    with tf.variable_scope(name):
        inputs_shape = x.get_shape()
        axis = list(range(len(inputs_shape) - 1))
        param_shape = int(inputs_shape[-1])

        moving_mean = tf.get_variable('mean', [param_shape], initializer=tf.constant_initializer(0.0), trainable=False)
        moving_var = tf.get_variable('variance', [param_shape], initializer=tf.constant_initializer(1.0), trainable=False)

        beta = tf.get_variable('offset', [param_shape], initializer=tf.constant_initializer(0.0))
        gamma = tf.get_variable('scale', [param_shape], initializer=tf.constant_initializer(1.0))

        control_inputs = []

        def mean_var_with_update():
            mean, var = tf.nn.moments(x, axis)
            update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, 0.99)
            update_moving_var = moving_averages.assign_moving_average(moving_var, var, 0.99)
            control_inputs = [update_moving_mean, update_moving_var]
            return tf.identity(mean), tf.identity(var)

        def mean_var():
            mean = moving_mean
            var = moving_var            
            return tf.identity(mean), tf.identity(var)

        mean, var = tf.cond(is_train, mean_var_with_update, mean_var)

        with tf.control_dependencies(control_inputs):
            normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)

    return normed
nn.py 文件源码 项目:chinese_image_captioning 作者: yuanx520 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _batch_norm(x, name, is_train):
    """ Apply a batch normalization layer. """
    with tf.variable_scope(name):
        inputs_shape = x.get_shape()
        axis = list(range(len(inputs_shape) - 1))
        param_shape = int(inputs_shape[-1])

        moving_mean = tf.get_variable('mean', [param_shape], initializer=tf.constant_initializer(0.0), trainable=False)
        moving_var = tf.get_variable('variance', [param_shape], initializer=tf.constant_initializer(1.0), trainable=False)

        beta = tf.get_variable('offset', [param_shape], initializer=tf.constant_initializer(0.0))
        gamma = tf.get_variable('scale', [param_shape], initializer=tf.constant_initializer(1.0))

        control_inputs = []

        def mean_var_with_update():
            mean, var = tf.nn.moments(x, axis)
            update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, 0.99)
            update_moving_var = moving_averages.assign_moving_average(moving_var, var, 0.99)
            control_inputs = [update_moving_mean, update_moving_var]
            return tf.identity(mean), tf.identity(var)

        def mean_var():
            mean = moving_mean
            var = moving_var            
            return tf.identity(mean), tf.identity(var)

        mean, var = tf.cond(is_train, mean_var_with_update, mean_var)

        with tf.control_dependencies(control_inputs):
            normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)

    return normed
tensorflow_backend.py 文件源码 项目:keras 作者: NVIDIA 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def moving_average_update(variable, value, momentum):
    try:
        return moving_averages.assign_moving_average(
            variable, value, momentum, zero_debias=False)
    except TypeError:
        return moving_averages.assign_moving_average(
            variable, value, momentum)


# LINEAR ALGEBRA
tensorflow_backend.py 文件源码 项目:keras_superpixel_pooling 作者: parag2489 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def moving_average_update(x, value, momentum):
    return moving_averages.assign_moving_average(
        x, value, momentum, zero_debias=False)


# LINEAR ALGEBRA
tensorflow_backend.py 文件源码 项目:InnerOuterRNN 作者: Chemoinformatics 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def moving_average_update(variable, value, momentum):
    return moving_averages.assign_moving_average(
        variable, value, momentum)


# LINEAR ALGEBRA
common.py 文件源码 项目:tensorflow_multigpu_imagenet 作者: arashno 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def batchNormalization(x, is_training= True, decay= 0.9, epsilon= 0.001):
    x_shape = x.get_shape()
    params_shape = x_shape[-1:]


    axis = list(range(len(x_shape) - 1))

    beta = _get_variable('beta',
                         params_shape,
                         initializer= tf.zeros_initializer)
    gamma = _get_variable('gamma',
                          params_shape,
                          initializer= tf.ones_initializer)

    moving_mean = _get_variable('moving_mean',
                                params_shape,
                                initializer= tf.zeros_initializer,
                                trainable= False)
    moving_variance = _get_variable('moving_variance',
                                    params_shape,
                                    initializer= tf.ones_initializer,
                                    trainable= False)

    # These ops will only be preformed when training.

    if is_training:
      mean, variance = tf.nn.moments(x, axis)
      update_moving_mean = moving_averages.assign_moving_average(moving_mean,
                                                               mean, decay)
      update_moving_variance = moving_averages.assign_moving_average(
        moving_variance, variance, decay)
      tf.add_to_collection(tf.GraphKeys.UPDATE_OPS , update_moving_mean)
      tf.add_to_collection(tf.GraphKeys.UPDATE_OPS , update_moving_variance)
      return tf.nn.batch_normalization(x, mean, variance, beta, gamma, epsilon)
    else:
      return tf.nn.batch_normalization(x, moving_mean, moving_variance, beta, gamma, epsilon)
optimizers.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
  """Find max_norm given norm and previous average."""
  with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):
    log_norm = math_ops.log(norm + epsilon)

    def moving_average(name, value, decay):
      moving_average_variable = vs.get_variable(
          name,
          shape=value.get_shape(),
          dtype=value.dtype,
          initializer=init_ops.zeros_initializer(),
          trainable=False)
      return moving_averages.assign_moving_average(
          moving_average_variable, value, decay, zero_debias=False)

    # quicker adaptation at the beginning
    if global_step is not None:
      n = math_ops.to_float(global_step)
      decay = math_ops.minimum(decay, n / (n + 1.))

    # update averages
    mean = moving_average("mean", log_norm, decay)
    sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)

    variance = sq_mean - math_ops.square(mean)
    std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
    max_norms = math_ops.exp(mean + std_factor * std)
    return max_norms, mean
resnet.py 文件源码 项目:tfplus 作者: renmengye 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def bn(x, c):
    x_shape = x.get_shape()
    params_shape = x_shape[-1:]

    if c['use_bias']:
        bias = _get_variable('bias', params_shape,
                             initializer=tf.zeros_initializer)
        return x + bias

    axis = list(range(len(x_shape) - 1))

    beta = _get_variable('beta',
                         params_shape,
                         initializer=tf.zeros_initializer)
    gamma = _get_variable('gamma',
                          params_shape,
                          initializer=tf.ones_initializer)

    moving_mean = _get_variable('moving_mean',
                                params_shape,
                                initializer=tf.zeros_initializer,
                                trainable=False)
    moving_variance = _get_variable('moving_variance',
                                    params_shape,
                                    initializer=tf.ones_initializer,
                                    trainable=False)

    # These ops will only be preformed when training.
    mean, variance = tf.nn.moments(x, axis)
    update_moving_mean = moving_averages.assign_moving_average(moving_mean,
                                                               mean, BN_DECAY)
    update_moving_variance = moving_averages.assign_moving_average(
        moving_variance, variance, BN_DECAY)
    tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
    tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)

    mean, variance = control_flow_ops.cond(
        c['is_training'], lambda: (mean, variance),
        lambda: (moving_mean, moving_variance))

    x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON)
    # x.set_shape(inputs.get_shape()) ??

    return x


问题


面经


文章

微信
公众号

扫码关注公众号