python类identity()的实例源码

thingtalk.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def value_transition(self, curr_state, next_symbols, batch_size):
        first_value_token = self.num_functions + self.num_begin_tokens + self.num_control_tokens
        num_value_tokens = self.output_size - first_value_token
        with tf.name_scope('grammar_transition'):
            adjusted_next_symbols = tf.where(next_symbols >= self.num_control_tokens, next_symbols + (first_value_token - self.num_control_tokens), next_symbols)

            assert1 = tf.Assert(tf.reduce_all(tf.logical_and(next_symbols < num_value_tokens, next_symbols >= 0)), [curr_state, next_symbols])
            with tf.control_dependencies([assert1]):
                transitions = tf.gather(tf.constant(self.transition_matrix), curr_state)
            assert transitions.get_shape()[1:] == (self.output_size,)

            indices = tf.stack((tf.range(0, batch_size), adjusted_next_symbols), axis=1)
            next_state = tf.gather_nd(transitions, indices)

            assert2 = tf.Assert(tf.reduce_all(next_state >= 0), [curr_state, adjusted_next_symbols, next_state])
            with tf.control_dependencies([assert2]):
                return tf.identity(next_state)
metric_specs.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def accumulate_strings(values, name="strings"):
  """Accumulates strings into a vector.

  Args:
    values: A 1-d string tensor that contains values to add to the accumulator.

  Returns:
    A tuple (value_tensor, update_op).
  """
  tf.assert_type(values, tf.string)
  strings = tf.Variable(
      name=name,
      initial_value=[],
      dtype=tf.string,
      trainable=False,
      collections=[],
      validate_shape=True)
  value_tensor = tf.identity(strings)
  update_op = tf.assign(
      ref=strings, value=tf.concat([strings, values], 0), validate_shape=False)
  return value_tensor, update_op
network_vgg16.py 文件源码 项目:HandDetection 作者: YunqiuXu 项目源码 文件源码 阅读 51 收藏 0 点赞 0 评论 0
def batch_norm_layer(self, to_be_normalized, is_training):
    if is_training:
      train_phase = tf.constant(1)
    else:
      train_phase = tf.constant(-1)
    beta = tf.Variable(tf.constant(0.0, shape=[to_be_normalized.shape[-1]]), name='beta', trainable=True)
    gamma = tf.Variable(tf.constant(1.0, shape=[to_be_normalized.shape[-1]]), name='gamma', trainable=True)
    # axises = np.arange(len(to_be_normalized.shape) - 1) # change to apply tensorflow 1.3
    axises = [0,1,2]

    print("start nn.moments")
    print("axises : " + str(axises))
    batch_mean, batch_var = tf.nn.moments(to_be_normalized, axises, name='moments')
    print("nn.moments successful")
    ema = tf.train.ExponentialMovingAverage(decay=0.5)

    def mean_var_with_update():
        ema_apply_op = ema.apply([batch_mean, batch_var])
        with tf.control_dependencies([ema_apply_op]):
            return tf.identity(batch_mean), tf.identity(batch_var)

    mean, var = tf.cond(train_phase > 0, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var))) # if is training --> update
    normed = tf.nn.batch_normalization(to_be_normalized, mean, var, beta, gamma, 1e-3)
    return normed
de_noise.py 文件源码 项目:US-image-prediction 作者: ChengruiWu008 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def input_norm(xs):
    fc_mean, fc_var = tf.nn.moments(
        xs,
        axes=[0],
    )
    scale = tf.Variable(tf.ones([1]))
    shift = tf.Variable(tf.zeros([1]))
    epsilon = 0.001
    # apply moving average for mean and var when train on batch
    ema = tf.train.ExponentialMovingAverage(decay=0.5)

    def mean_var_with_update():
        ema_apply_op = ema.apply([fc_mean, fc_var])
        with tf.control_dependencies([ema_apply_op]):
            return tf.identity(fc_mean), tf.identity(fc_var)

    mean, var = mean_var_with_update()
    xs = tf.nn.batch_normalization(xs, mean, var, shift, scale, epsilon)
    return xs
de_noise.py 文件源码 项目:US-image-prediction 作者: ChengruiWu008 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def batch_norm(Wx_plus_b,out_size):
    fc_mean, fc_var = tf.nn.moments(
        Wx_plus_b,
        axes=[0],  # the dimension you wanna normalize, here [0] for batch
        # for image, you wanna do [0, 1, 2] for [batch, height, width] but not channel
    )
    scale = tf.Variable(tf.ones([out_size]))
    shift = tf.Variable(tf.zeros([out_size]))
    epsilon = 0.001
    # apply moving average for mean and var when train on batch
    ema = tf.train.ExponentialMovingAverage(decay=0.5)
    def mean_var_with_update():
        ema_apply_op = ema.apply([fc_mean, fc_var])
        with tf.control_dependencies([ema_apply_op]):
            return tf.identity(fc_mean), tf.identity(fc_var)
    mean, var = mean_var_with_update()
    Wx_plus_b = tf.nn.batch_normalization(Wx_plus_b, mean, var, shift, scale, epsilon)
    return Wx_plus_b
ops.py 文件源码 项目:deligan 作者: val-iisc 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __call__(self, x, train=True):
        shape = x.get_shape().as_list()

        if train:
            with tf.variable_scope(self.name) as scope:
                self.beta = tf.get_variable("beta", [shape[-1]],
                                    initializer=tf.constant_initializer(0.))
                self.gamma = tf.get_variable("gamma", [shape[-1]],
                                    initializer=tf.random_normal_initializer(1., 0.02))

                batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
                ema_apply_op = self.ema.apply([batch_mean, batch_var])
                self.ema_mean, self.ema_var = self.ema.average(batch_mean), self.ema.average(batch_var)

                with tf.control_dependencies([ema_apply_op]):
                    mean, var = tf.identity(batch_mean), tf.identity(batch_var)
        else:
            mean, var = self.ema_mean, self.ema_var

        normed = tf.nn.batch_norm_with_global_normalization(
                x, mean, var, self.beta, self.gamma, self.epsilon, scale_after_normalization=True)

        return normed

# standard convolution layer
model.py 文件源码 项目:tfutils 作者: neuroailab 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def global_pool(inp, kind='avg', keep_dims=False, name=None):
    if kind not in ['max', 'avg']:
        raise ValueError('Only global avg or max pool is allowed, but'
                            'you requested {}.'.format(kind))
    if name is None:
        name = 'global_{}_pool'.format(kind)
    h, w = inp.get_shape().as_list()[1:3]
    out = getattr(tf.nn, kind + '_pool')(inp,
                                    ksize=[1,h,w,1],
                                    strides=[1,1,1,1],
                                    padding='VALID')
    if keep_dims:
        output = tf.identity(out, name=name)
    else:
        output = tf.reshape(out, [out.get_shape().as_list()[0], -1], name=name)

    return output
convertmodel.py 文件源码 项目:DmsMsgRcg 作者: bshao001 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def convert(model_dir, keras_model_file, tf_model_file, name_output='s1_output', num_output=1):
    # Parameter False is for tf.keras in TF 1.4. For real Keras, use 0 as parameter
    keras.backend.set_learning_phase(False)
    keras_model = keras.models.load_model(os.path.join(model_dir, keras_model_file),
                                          custom_objects={'custom_loss': YoloNet.custom_loss})

    output = [None] * num_output
    out_node_names = [None] * num_output
    for i in range(num_output):
        out_node_names[i] = name_output + str(i)
        output[i] = tf.identity(keras_model.outputs[i], name=out_node_names[i])

    sess = keras.backend.get_session()
    constant_graph = tf.graph_util.convert_variables_to_constants(
        sess,
        sess.graph.as_graph_def(),
        out_node_names  # All other operations relying on this will also be saved
    )
    output_file = os.path.join(model_dir, tf_model_file)
    with tf.gfile.GFile(output_file, "wb") as f:
        f.write(constant_graph.SerializeToString())

    print("Converted model was saved as {}.".format(tf_model_file))
conv_net_example.py 文件源码 项目:tfplus 作者: renmengye 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def build_input(self):
        self.init_default_options()
        inp_height = self.get_option('inp_height')
        inp_width = self.get_option('inp_width')
        inp_depth = self.get_option('inp_depth')
        x = self.add_input_var(
            'x', [None, inp_height, inp_width, inp_depth], 'float')
        x_id = tf.identity(x)
        self.register_var('x_id', x_id)
        y_gt = self.add_input_var('y_gt', [None, 10], 'float')
        phase_train = self.add_input_var('phase_train', None, 'bool')
        return {
            'x': x,
            'y_gt': y_gt,
            'phase_train': phase_train
        }
dbpedia.py 文件源码 项目:GAN 作者: ilblackdragon 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def autoencoder_model(feature, target, mode, params):
  """Autoencodes sequence model."""
  vocab_size = params.get('vocab_size')
  embed_dim = params.get('embed_dim')

  tf.identity(feature[0], name='feature')
  embed_feature = sequence.embed_features(
    feature, vocab_size=vocab_size, embed_dim=embed_dim)
  output, _ = sequence.sequence_autoencoder_discriminator(
    embed_feature, length=FLAGS.max_doc_length, hidden_size=embed_dim)
  logits, predictions = sequence.outbed_generated(output)

  # Loss and training.
  loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, feature)
  loss = tf.reduce_mean(tf.reduce_sum(loss, axis=1))
  train_op = layers.optimize_loss(
      loss, tf.train.get_global_step(),
      learning_rate=params['learning_rate'],
      optimizer=params.get('optimizer', 'Adam'))
  return predictions, loss, train_op
model.py 文件源码 项目:GAN 作者: ilblackdragon 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def autoencoder_model(feature, target, mode, params):
  """Autoencodes features with given function."""
  autoencoder_fn = params.get('autoencoder_fn')
  feature_processor = params.get('feature_processor', lambda f: f)
  generated_postprocess = params.get('generated_postprocess', lambda f: f)

  # Process features.
  feature = feature_processor(feature)

  # Auto-encode.
  generated, _ = autoencoder_fn(feature)

  # Loss and training.
  loss = tf.contrib.losses.mean_squared_error(feature, generated)
  train_op = layers.optimize_loss(
      loss, tf.train.get_global_step(),
      learning_rate=params['learning_rate'],
      optimizer=params.get('optimizer', 'Adam'))

  # Post process generated.
  prediction = generated_postprocess(generated)
  prediction = tf.identity(prediction, name='generated')
  return prediction, loss, train_op
batchnorm.py 文件源码 项目:Automatic-Image-Colorization 作者: Armour 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def normalize(self, x, train=True):
        """
        Returns a batch-normalized version of x.
        """
        if train is not None:
            mean, variance = tf.nn.moments(x, [0, 1, 2])
            assign_mean = self.mean.assign(mean)
            assign_variance = self.variance.assign(variance)
            with tf.control_dependencies([assign_mean, assign_variance]):
                return tf.nn.batch_norm_with_global_normalization(x, mean, variance, self.beta, self.gamma, self.epsilon, self.scale_after_norm)
        else:
            mean = self.ewma_trainer.average(self.mean)
            variance = self.ewma_trainer.average(self.variance)
            local_beta = tf.identity(self.beta)
            local_gamma = tf.identity(self.gamma)
            return tf.nn.batch_norm_with_global_normalization(x, mean, variance, local_beta, local_gamma, self.epsilon, self.scale_after_norm)
layers.py 文件源码 项目:acdc_segmenter 作者: baumgach 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def dense_layer_bn(bottom,
                   name,
                   training,
                   hidden_units=512,
                   activation=tf.nn.relu,
                   weight_init='he_normal'):

    '''
    Shortcut for batch normalised 2D dilated convolutional layer
    '''

    linact = dense_layer(bottom=bottom,
                         name=name,
                         hidden_units=hidden_units,
                         activation=tf.identity,
                         weight_init=weight_init,
                         add_bias=False)

    batchnorm = batch_normalisation_layer(linact, name + '_bn', training=training)
    act = activation(batchnorm)

    return act

### VARIABLE INITIALISERS ####################################################################################
AveragePolicyNetwork.py 文件源码 项目:RL_NFSP 作者: Richard-An 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def batch_norm(self, X):
        train_phase = self.train_phase
        with tf.name_scope('bn'):
            n_out = X.get_shape()[-1:]
            beta = tf.Variable(tf.constant(0.0, shape=n_out), name='beta', trainable=True)
            gamma = tf.Variable(tf.constant(1.0, shape=n_out), name='gamma', trainable=True)
            # batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            ema = tf.train.ExponentialMovingAverage(decay=0.5)

            def mean_var_with_update():
                ema_apply_op = ema.apply([batch_mean, batch_var])
                with tf.control_dependencies([ema_apply_op]):
                    return tf.identity(batch_mean), tf.identity(batch_var)

            mean, var = tf.cond(train_phase, mean_var_with_update,
                                lambda: (ema.average(batch_mean), ema.average(batch_var)))
            normed = tf.nn.batch_normalization(X, mean, var, beta, gamma, 1e-3)
        return normed
AveragePolicyNetwork.py 文件源码 项目:RL_NFSP 作者: Richard-An 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def batch_norm(self, X):
        train_phase = self.train_phase
        with tf.name_scope('bn'):
            n_out = X.get_shape()[-1:]
            beta = tf.Variable(tf.constant(0.0, shape=n_out), name='beta', trainable=True)
            gamma = tf.Variable(tf.constant(1.0, shape=n_out), name='gamma', trainable=True)
            # batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            ema = tf.train.ExponentialMovingAverage(decay=0.5)

            def mean_var_with_update():
                ema_apply_op = ema.apply([batch_mean, batch_var])
                with tf.control_dependencies([ema_apply_op]):
                    return tf.identity(batch_mean), tf.identity(batch_var)

            mean, var = tf.cond(train_phase, mean_var_with_update,
                                lambda: (ema.average(batch_mean), ema.average(batch_var)))
            normed = tf.nn.batch_normalization(X, mean, var, beta, gamma, 1e-3)
        return normed
DQN_DouDiZhu.py 文件源码 项目:RL_NFSP 作者: Richard-An 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def batch_norm(self, X):
        train_phase = self.train_phase
        with tf.name_scope('bn'):
            n_out = X.get_shape()[-1:]
            beta = tf.Variable(tf.constant(0.0, shape=n_out), name='beta', trainable=True)
            gamma = tf.Variable(tf.constant(1.0, shape=n_out), name='gamma', trainable=True)
            # batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            ema = tf.train.ExponentialMovingAverage(decay=0.5)

            def mean_var_with_update():
                ema_apply_op = ema.apply([batch_mean, batch_var])
                with tf.control_dependencies([ema_apply_op]):
                    return tf.identity(batch_mean), tf.identity(batch_var)

            mean, var = tf.cond(train_phase, mean_var_with_update,
                                lambda: (ema.average(batch_mean), ema.average(batch_var)))
            normed = tf.nn.batch_normalization(X, mean, var, beta, gamma, 1e-3)
        return normed
AveragePolicyNetwork.py 文件源码 项目:RL_NFSP 作者: Richard-An 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def batch_norm(self, X):
        train_phase = self.train_phase
        with tf.name_scope('bn'):
            n_out = X.get_shape()[-1:]
            beta = tf.Variable(tf.constant(0.0, shape=n_out), name='beta', trainable=True)
            gamma = tf.Variable(tf.constant(1.0, shape=n_out), name='gamma', trainable=True)
            # batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            ema = tf.train.ExponentialMovingAverage(decay=0.5)

            def mean_var_with_update():
                ema_apply_op = ema.apply([batch_mean, batch_var])
                with tf.control_dependencies([ema_apply_op]):
                    return tf.identity(batch_mean), tf.identity(batch_var)

            mean, var = tf.cond(train_phase, mean_var_with_update,
                                lambda: (ema.average(batch_mean), ema.average(batch_var)))
            normed = tf.nn.batch_normalization(X, mean, var, beta, gamma, 1e-3)
        return normed
DQN_DouDiZhu.py 文件源码 项目:RL_NFSP 作者: Richard-An 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def batch_norm(self, X):
        train_phase = self.train_phase
        with tf.name_scope('bn'):
            n_out = X.get_shape()[-1:]
            beta = tf.Variable(tf.constant(0.0, shape=n_out), name='beta', trainable=True)
            gamma = tf.Variable(tf.constant(1.0, shape=n_out), name='gamma', trainable=True)
            # batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            ema = tf.train.ExponentialMovingAverage(decay=0.5)

            def mean_var_with_update():
                ema_apply_op = ema.apply([batch_mean, batch_var])
                with tf.control_dependencies([ema_apply_op]):
                    return tf.identity(batch_mean), tf.identity(batch_var)

            mean, var = tf.cond(train_phase, mean_var_with_update,
                                lambda: (ema.average(batch_mean), ema.average(batch_var)))
            normed = tf.nn.batch_normalization(X, mean, var, beta, gamma, 1e-3)
        return normed
memory.py 文件源码 项目:DNC 作者: bgavran 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def update_link_matrix(self, link_matrix_old, precedence_weighting_old, write_weighting):
        """
        Updating the link matrix takes some effort (in order to vectorize the implementation)
        Instead of the original index-by-index operation, it's all done at once.


        :param link_matrix_old: from previous time step, shape [batch_size, memory_size, memory_size]
        :param precedence_weighting_old: from previous time step, shape [batch_size, memory_size]
        :param write_weighting: from current time step, shape [batch_size, memory_size]
        :return: updated link matrix
        """
        expanded = tf.expand_dims(write_weighting, axis=2)

        # vectorizing the paper's original implementation
        w = tf.tile(expanded, [1, 1, self.memory_size])  # shape [batch_size, memory_size, memory_size]
        # shape of w_transpose is the same: [batch_size, memory_size, memory_size]
        w_transp = tf.tile(tf.transpose(expanded, [0, 2, 1]), [1, self.memory_size, 1])

        # in einsum, m and n are the same dimension because tensorflow doesn't support duplicated subscripts. Why?
        lm = (1 - w - w_transp) * link_matrix_old + tf.einsum("bn,bm->bmn", precedence_weighting_old, write_weighting)
        lm *= (1 - tf.eye(self.memory_size, batch_shape=[self.batch_size]))  # making sure self links are off
        return tf.identity(lm, name="Link_matrix")
utils.py 文件源码 项目:WassersteinGAN.tensorflow 作者: shekkizh 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5, stddev=0.02):
    """
    Code taken from http://stackoverflow.com/a/34634291/2267819
    """
    with tf.variable_scope(scope):
        beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0)
                               , trainable=True)
        gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, stddev),
                                trainable=True)
        batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(phase_train,
                            mean_var_with_update,
                            lambda: (ema.average(batch_mean), ema.average(batch_var)))
        normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
    return normed
model.py 文件源码 项目:cloudml-samples 作者: GoogleCloudPlatform 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def build_prediction_graph(self):
    """Builds prediction graph and registers appropriate endpoints."""

    tensors = self.build_graph(None, 1, GraphMod.PREDICT)

    keys_placeholder = tf.placeholder(tf.string, shape=[None])
    inputs = {
        'key': keys_placeholder,
        'image_bytes': tensors.input_jpeg
    }

    # To extract the id, we need to add the identity function.
    keys = tf.identity(keys_placeholder)
    outputs = {
        'key': keys,
        'prediction': tensors.predictions[0],
        'scores': tensors.predictions[1]
    }

    return inputs, outputs
ops.py 文件源码 项目:Magic-Pixel 作者: zhwhong 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __call__(self, x, train=True):
        shape = x.get_shape().as_list()

        if train:
            with tf.variable_scope(self.name) as scope:
                self.beta = tf.get_variable("beta", [shape[-1]],
                                    initializer=tf.constant_initializer(0.))
                self.gamma = tf.get_variable("gamma", [shape[-1]],
                                    initializer=tf.random_normal_initializer(1., 0.02))

                batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
                ema_apply_op = self.ema.apply([batch_mean, batch_var])
                self.ema_mean, self.ema_var = self.ema.average(batch_mean), self.ema.average(batch_var)

                with tf.control_dependencies([ema_apply_op]):
                    mean, var = tf.identity(batch_mean), tf.identity(batch_var)
        else:
            mean, var = self.ema_mean, self.ema_var

        normed = tf.nn.batch_norm_with_global_normalization(
                x, mean, var, self.beta, self.gamma, self.epsilon, scale_after_normalization=True)

        return normed
utils_pg.py 文件源码 项目:rl_algorithms 作者: DanielTakeshi 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def gauss_KL(mu1, logstd1, mu2, logstd2):
    """ Returns KL divergence among two multivariate Gaussians, component-wise.

    It assumes the covariance matrix is diagonal. All inputs have shape (n,a).
    It is not necessary to know the number of actions because reduce_sum will
    sum over this to get the `d` constant offset. The part consisting of the
    trace in the formula is blended with the mean difference squared due to the
    common "denominator" of var2_na.  This forumula generalizes for an arbitrary
    number of actions.  I think mu2 and logstd2 should represent the policy
    before the update.

    Returns the KL divergence for each of the n components in the minibatch,
    then we do a reduce_mean outside this.
    """
    var1_na = tf.exp(2.*logstd1)
    var2_na = tf.exp(2.*logstd2)
    tmp_matrix = 2.*(logstd2 - logstd1) + (var1_na + tf.square(mu1-mu2))/var2_na - 1
    kl_n = tf.reduce_sum(0.5 * tmp_matrix, axis=[1]) # Don't forget the 1/2 !!
    assert_op = tf.Assert(tf.reduce_all(kl_n >= -0.0000001), [kl_n]) 
    with tf.control_dependencies([assert_op]):
        kl_n = tf.identity(kl_n)
    return kl_n
test_base.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_session_run(self):
        with self.test_session(use_gpu=True) as sess:
            samples = tf.constant([1, 2, 3])
            log_probs = Mock()
            probs = Mock()
            sample_func = Mock(return_value=samples)
            log_prob_func = Mock(return_value=log_probs)
            prob_func = Mock(return_value=probs)
            distribution = Mock(sample=sample_func,
                                log_prob=log_prob_func,
                                prob=prob_func,
                                dtype=tf.int32)

            # test session.run
            t = StochasticTensor('t', distribution, 1, samples)
            self.assertAllEqual(sess.run(t), np.asarray([1, 2, 3]))

            # test using as feed dict
            self.assertAllEqual(
                sess.run(tf.identity(t), feed_dict={
                    t: np.asarray([4, 5, 6])
                }),
                np.asarray([4, 5, 6])
            )
hmc.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def tune(self, acceptance_rate, fresh_start):
        def adapt_stepsize():
            new_step = tf.assign(self.step, (1 - fresh_start) * self.step + 1)
            rate1 = tf.div(1.0, new_step + self.t0)
            new_h_bar = tf.assign(
                self.h_bar, (1 - fresh_start) * (1 - rate1) * self.h_bar +
                rate1 * (self.delta - acceptance_rate))
            log_epsilon = self.mu - tf.sqrt(new_step) / self.gamma * new_h_bar
            rate = tf.pow(new_step, -self.kappa)
            new_log_epsilon_bar = tf.assign(
                self.log_epsilon_bar,
                rate * log_epsilon + (1 - fresh_start) * (1 - rate) *
                self.log_epsilon_bar)
            with tf.control_dependencies([new_log_epsilon_bar]):
                new_log_epsilon = tf.identity(log_epsilon)

            return tf.exp(new_log_epsilon)

        c = tf.cond(self.adapt_step_size,
                    adapt_stepsize,
                    lambda: tf.exp(self.log_epsilon_bar))

        return c
utils.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def assert_rank_at_least(tensor, k, name):
    """
    Whether the rank of `tensor` is at least k.

    :param tensor: A tensor to be checked.
    :param k: The least rank allowed.
    :param name: The name of `tensor` for error message.
    :return: The checked tensor.
    """
    static_shape = tensor.get_shape()
    shape_err_msg = '{} should have rank >= {}.'.format(name, k)
    if static_shape and (static_shape.ndims < k):
        raise ValueError(shape_err_msg)
    if not static_shape:
        _assert_shape_op = tf.assert_rank_at_least(
            tensor, k, message=shape_err_msg)
        with tf.control_dependencies([_assert_shape_op]):
            tensor = tf.identity(tensor)
    return tensor
utils.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def assert_scalar(tensor, name):
    """
    Whether the `tensor` is a scalar (0-D tensor).

    :param tensor: A tensor to be checked.
    :param name: The name of `tensor` for error message.
    :return: The checked tensor.
    """
    static_shape = tensor.get_shape()
    shape_err_msg = name + " should be a scalar (0-D tensor)."
    if static_shape and (static_shape.ndims >= 1):
        raise ValueError(shape_err_msg)
    else:
        _assert_shape_op = tf.assert_rank(tensor, 0, message=shape_err_msg)
        with tf.control_dependencies([_assert_shape_op]):
            tensor = tf.identity(tensor)
        return tensor
problem_unittests.py 文件源码 项目:deep-learning-nd 作者: RyanCCollins 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def test_get_init_cell(get_init_cell):
    with tf.Graph().as_default():
        test_batch_size_ph = tf.placeholder(tf.int32)
        test_rnn_size = 256

        cell, init_state = get_init_cell(test_batch_size_ph, test_rnn_size)

        # Check type
        assert isinstance(cell, tf.contrib.rnn.MultiRNNCell),\
            'Cell is wrong type.  Found {} type'.format(type(cell))

        # Check for name attribute
        assert hasattr(init_state, 'name'),\
            'Initial state doesn\'t have the "name" attribute.  Try using `tf.identity` to set the name.'

        # Check name
        assert init_state.name == 'initial_state:0',\
            'Initial state doesn\'t have the correct name. Found the name {}'.format(init_state.name)

    _print_success_message()
problem_unittests.py 文件源码 项目:deep-learning-nd 作者: RyanCCollins 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_build_rnn(build_rnn):
    with tf.Graph().as_default():
        test_rnn_size = 256
        test_rnn_layer_size = 2
        test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size)] * test_rnn_layer_size)

        test_inputs = tf.placeholder(tf.float32, [None, None, test_rnn_size])
        outputs, final_state = build_rnn(test_cell, test_inputs)

        # Check name
        assert hasattr(final_state, 'name'),\
            'Final state doesn\'t have the "name" attribute.  Try using `tf.identity` to set the name.'
        assert final_state.name == 'final_state:0',\
            'Final state doesn\'t have the correct name. Found the name {}'.format(final_state.name)

        # Check shape
        assert outputs.get_shape().as_list() == [None, None, test_rnn_size],\
            'Outputs has wrong shape.  Found shape {}'.format(outputs.get_shape())
        assert final_state.get_shape().as_list() == [test_rnn_layer_size, 2, None, test_rnn_size],\
            'Final state wrong shape.  Found shape {}'.format(final_state.get_shape())

    _print_success_message()
TensorflowUtils.py 文件源码 项目:AutoPortraitMatting 作者: PetroWu 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5):
    """
    Code taken from http://stackoverflow.com/a/34634291/2267819
    """
    with tf.variable_scope(scope):
        beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0)
                               , trainable=True)
        gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, 0.02),
                                trainable=True)
        batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(phase_train,
                            mean_var_with_update,
                            lambda: (ema.average(batch_mean), ema.average(batch_var)))
        normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
    return normed


问题


面经


文章

微信
公众号

扫码关注公众号