python类ones_like()的实例源码

nem_model.py 文件源码 项目:Neural-EM 作者: sjoerdvansteenkiste 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def init_state(self, batch_size, K, dtype):
        # inner RNN hidden state init
        with tf.name_scope('inner_RNN_init'):
            h = self.cell.zero_state(batch_size * K, dtype)

        # initial prediction (B, K, W, H, C)
        with tf.name_scope('pred_init'):
            pred_shape = tf.stack([batch_size, K] + self.input_shape.as_list())
            pred = tf.ones(shape=pred_shape, dtype=dtype) * self.pred_init

        # initial gamma (B, K, W, H, 1)
        with tf.name_scope('gamma_init'):
            gamma_shape = self.gamma_shape.as_list()
            shape = tf.stack([batch_size, K] + gamma_shape)

            # init with Gaussian distribution
            gamma = tf.abs(tf.random_normal(shape, dtype=dtype))
            gamma /= tf.reduce_sum(gamma, 1, keep_dims=True)

            # init with all 1 if K = 1
            if K == 1:
                gamma = tf.ones_like(gamma)

            return h, pred, gamma
topn.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def remove(self, ids):
    """Remove the ids (and their associated scores) from the TopN."""
    with tf.control_dependencies(self.last_ops):
      scatter_op = tf.scatter_update(
          self.id_to_score,
          ids,
          tf.ones_like(
              ids, dtype=tf.float32) * tf.float32.min)
      # We assume that removed ids are almost always in the shortlist,
      # so it makes no sense to hide the Op behind a tf.cond
      shortlist_ids_to_remove, new_length = self.ops.top_n_remove(self.sl_ids,
                                                                  ids)
      u1 = tf.scatter_update(
          self.sl_ids, tf.concat(0, [[0], shortlist_ids_to_remove]),
          tf.concat(0, [new_length,
                        tf.ones_like(shortlist_ids_to_remove) * -1]))
      u2 = tf.scatter_update(
          self.sl_scores,
          shortlist_ids_to_remove,
          tf.float32.min * tf.ones_like(
              shortlist_ids_to_remove, dtype=tf.float32))
      self.last_ops = [scatter_op, u1, u2]
topn.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def remove(self, ids):
    """Remove the ids (and their associated scores) from the TopN."""
    with tf.control_dependencies(self.last_ops):
      scatter_op = tf.scatter_update(
          self.id_to_score,
          ids,
          tf.ones_like(
              ids, dtype=tf.float32) * tf.float32.min)
      # We assume that removed ids are almost always in the shortlist,
      # so it makes no sense to hide the Op behind a tf.cond
      shortlist_ids_to_remove, new_length = self.ops.top_n_remove(self.sl_ids,
                                                                  ids)
      u1 = tf.scatter_update(
          self.sl_ids, tf.concat(0, [[0], shortlist_ids_to_remove]),
          tf.concat(0, [new_length,
                        tf.ones_like(shortlist_ids_to_remove) * -1]))
      u2 = tf.scatter_update(
          self.sl_scores,
          shortlist_ids_to_remove,
          tf.float32.min * tf.ones_like(
              shortlist_ids_to_remove, dtype=tf.float32))
      self.last_ops = [scatter_op, u1, u2]
tensorflow_backend.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def ones_like(x, name=None):
    '''Instantiates an all-ones Keras variable
    of the same shape as another Keras variable or tensor and returns it.

    # Arguments
        x: Keras variable or tensor.

    # Returns
        A Keras variable, filled with `1.0`.

    # Example
    ```python
        >>> from keras import backend as K
        >>> kvar = K.variable(np.random.random((2,3)))
        >>> kvar_ones = K.ones_like(kvar)
        >>> K.eval(kvar_ones)
        array([[ 1.,  1.,  1.],
               [ 1.,  1.,  1.]], dtype=float32)
'''
return tf.ones_like(x, name=name)

```

show_and_tell_model_test.py 文件源码 项目:im2txt_api 作者: mainyaa 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def build_inputs(self):
    if self.mode == "inference":
      # Inference mode doesn't read from disk, so defer to parent.
      return super(ShowAndTellModel, self).build_inputs()
    else:
      # Replace disk I/O with random Tensors.
      self.images = tf.random_uniform(
          shape=[self.config.batch_size, self.config.image_height,
                 self.config.image_width, 3],
          minval=-1,
          maxval=1)
      self.input_seqs = tf.random_uniform(
          [self.config.batch_size, 15],
          minval=0,
          maxval=self.config.vocab_size,
          dtype=tf.int64)
      self.target_seqs = tf.random_uniform(
          [self.config.batch_size, 15],
          minval=0,
          maxval=self.config.vocab_size,
          dtype=tf.int64)
      self.input_mask = tf.ones_like(self.input_seqs)
time_autoencoder.py 文件源码 项目:auDeep 作者: auDeep 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def loss(self) -> tf.Tensor:
        """
        Computes the reconstruction loss of the autoencoder.

        The reconstruction loss is computed as the root mean square error between the target sequence and the 
        reconstructed sequence.

        Returns
        -------
        tf.Tensor
            Scalar tensor containing the reconstruction loss averaged over the entire input batch
        """
        reconstruction = self.reconstruction

        if self.mask_silence:
            reconstruction = tf.where(self.targets == -1., -tf.ones_like(reconstruction), reconstruction)

        loss = tf.sqrt(tf.reduce_mean(tf.square(self.targets - reconstruction)))
        summaries.scalar_summaries(loss)

        tf.add_to_collection(tf.GraphKeys.LOSSES, loss)

        return loss
frequency_time_autoencoder.py 文件源码 项目:auDeep 作者: auDeep 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def loss(self) -> tf.Tensor:
        """
        Computes the reconstruction loss of the autoencoder.

        The reconstruction loss is computed as the root mean square error between the target sequence and the 
        reconstructed sequence.

        Returns
        -------
        tf.Tensor
            Scalar tensor containing the reconstruction loss averaged over the entire input batch
        """
        reconstruction = self.reconstruction

        if self.mask_silence:
            reconstruction = tf.where(self.targets == -1., -tf.ones_like(reconstruction), reconstruction)

        loss = tf.sqrt(tf.reduce_mean(tf.square(self.targets - reconstruction)))
        summaries.scalar_summaries(loss)

        tf.add_to_collection(tf.GraphKeys.LOSSES, loss)

        return loss
frequency_autoencoder.py 文件源码 项目:auDeep 作者: auDeep 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def loss(self) -> tf.Tensor:
        """
        Computes the reconstruction loss of the autoencoder.

        The reconstruction loss is computed as the root mean square error between the target sequence and the 
        reconstructed sequence.

        Returns
        -------
        tf.Tensor
            Scalar tensor containing the reconstruction loss averaged over the entire input batch
        """
        reconstruction = self.reconstruction

        if self.mask_silence:
            reconstruction = tf.where(self.targets == -1., -tf.ones_like(reconstruction), reconstruction)

        loss = tf.sqrt(tf.reduce_mean(tf.square(self.targets - reconstruction)))
        summaries.scalar_summaries(loss)

        tf.add_to_collection(tf.GraphKeys.LOSSES, loss)

        return loss
model.py 文件源码 项目:VAE-GAN 作者: sergeybok 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_vae(self,encoder_shapes,encoder_filters,optimizer=tf.train.AdamOptimizer,conv=True):
        self.encoder_shapes = encoder_shapes
        self.encoder_filters = encoder_filters
        self.encoder_X = tf.placeholder(tf.float32,shape=[None,28,28,1], name='encoder_X')

        self.mu, self.sigma, self.encoder_params = self.build_encoder(self.encoder_X)

        Qz = tf.contrib.distributions.Normal(mu=self.mu, sigma=self.sigma)
        z_sample = Qz.sample()

        self.decoded = self.build_generator(z_sample,self.phase,weights=self.gen_params)

        self.klloss = -(1)*tf.reduce_sum(1 + tf.log(z_sigma**2) - z_mu**2 - z_sigma**2,1)
        #sigmaloss = tf.reduce_sum((tf.ones_like(z_sigma)-z_sigma)**4 )

        offset = 1e-7
        obs = tf.clip_by_value(self.decoded, offset, 1 - offset)
        self.logloss = -1*(tf.reduce_sum(self.encoder_X*tf.log(obs) + (1-self.encoder_X)*tf.log(1-obs)))


        self.vae_cost = tf.reduce_mean(logloss + klloss)

        self.vae_optimizer = optimizer(self.LR)
        self.train_step_e = self.vae_optimizer.minimize(self.vae_cost,var_list=self.encoder_params)
base_optimizer.py 文件源码 项目:Sing_Par 作者: wanghm92 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _sparse_moving_average(self, x_tm1, idxs, a_t_, name, beta=.9):
    """"""

    b_tm1 = self.get_accumulator(x_tm1, '%s' % name)
    b_tm1_ = tf.gather(b_tm1, idxs)
    shape = self.get_variable_shape(x_tm1)
    tm1 = self.get_accumulator(x_tm1, '%s/tm1' % name, shape=[shape[0]]+[1]*(len(shape)-1))
    tm1_ = tf.gather(tm1, idxs)
    t = tf.scatter_add(tm1, idxs, tf.ones_like(tm1_))
    t_ = tf.gather(t, idxs)
    if beta < 1:
      beta_t = tf.convert_to_tensor(beta, name='%s/decay' % name)
      beta_t_ = beta_t * (1-beta_t**tm1_) / (1-beta_t**t_)
    else:
      beta_t_ = tm1_/t_
    b_t = tf.scatter_update(b_tm1, idxs, beta_t_*b_tm1_)
    b_t = tf.scatter_add(b_t, idxs, (1-beta_t_)*a_t_)
    return b_t, t

  #=============================================================
show_and_tell_model_test.py 文件源码 项目:semsearch 作者: sanjana-Bijoe 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def build_inputs(self):
    if self.mode == "inference":
      # Inference mode doesn't read from disk, so defer to parent.
      return super(ShowAndTellModel, self).build_inputs()
    else:
      # Replace disk I/O with random Tensors.
      self.images = tf.random_uniform(
          shape=[self.config.batch_size, self.config.image_height,
                 self.config.image_width, 3],
          minval=-1,
          maxval=1)
      self.input_seqs = tf.random_uniform(
          [self.config.batch_size, 15],
          minval=0,
          maxval=self.config.vocab_size,
          dtype=tf.int64)
      self.target_seqs = tf.random_uniform(
          [self.config.batch_size, 15],
          minval=0,
          maxval=self.config.vocab_size,
          dtype=tf.int64)
      self.input_mask = tf.ones_like(self.input_seqs)
capacities.py 文件源码 项目:openai-rl 作者: morgangiraud 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def eligibility_traces(Qs_t, states_t, actions_t, discount, lambda_value):
    et = tf.get_variable(
        "eligibilitytraces"
        , shape=Qs_t.get_shape()
        , dtype=tf.float32
        , trainable=False
        , initializer=tf.zeros_initializer()
    )
    tf.summary.histogram('eligibilitytraces', et)
    dec_et_op = tf.assign(et, discount * lambda_value * et)
    with tf.control_dependencies([dec_et_op]):
        state_action_pairs = tf.stack([states_t, actions_t], 1)
        update_et_op = tf.scatter_nd_update(et, indices=state_action_pairs, updates=tf.ones_like(states_t, dtype=tf.float32))

    reset_et_op = et.assign(tf.zeros_like(et, dtype=tf.float32))

    return (et, update_et_op, reset_et_op)
base_optimizer.py 文件源码 项目:Parser-v1 作者: tdozat 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _sparse_moving_average(self, x_tm1, idxs, a_t_, name, beta=.9):
    """"""

    b_tm1 = self.get_accumulator(x_tm1, '%s' % name)
    b_tm1_ = tf.gather(b_tm1, idxs)
    shape = self.get_variable_shape(x_tm1)
    tm1 = self.get_accumulator(x_tm1, '%s/tm1' % name, shape=[shape[0]]+[1]*(len(shape)-1))
    tm1_ = tf.gather(tm1, idxs)
    t = tf.scatter_add(tm1, idxs, tf.ones_like(tm1_))
    t_ = tf.gather(t, idxs)
    if beta < 1:
      beta_t = tf.convert_to_tensor(beta, name='%s/decay' % name)
      beta_t_ = beta_t * (1-beta_t**tm1_) / (1-beta_t**t_)
    else:
      beta_t_ = tm1_/t_
    b_t = tf.scatter_update(b_tm1, idxs, beta_t_*b_tm1_)
    b_t = tf.scatter_add(b_t, idxs, (1-beta_t_)*a_t_)
    return b_t, t

  #=============================================================
model.py 文件源码 项目:GalaxyGAN_python 作者: Ireneruru 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self):
        self.image = tf.placeholder(tf.float32, shape=(1,conf.train_size, conf.train_size, conf.img_channel))
        self.cond = tf.placeholder(tf.float32, shape=(1,conf.train_size, conf.train_size, conf.img_channel))

        self.gen_img = self.generator(self.cond)

        pos = self.discriminator(self.image, self.cond, False)
        neg = self.discriminator(self.gen_img, self.cond, True)
        pos_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=pos, labels=tf.ones_like(pos)))
        neg_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=neg, labels=tf.zeros_like(neg)))

        self.delta = tf.square(tf.reduce_mean(self.image)-(tf.reduce_mean(self.gen_img)))

        self.d_loss = pos_loss + neg_loss

        #with regularization
        self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=neg, labels=tf.ones_like(neg))) + \
                  conf.L1_lambda * tf.reduce_mean(tf.abs(self.image - self.gen_img)) + conf.sum_lambda *self.delta

        t_vars = tf.trainable_variables()
        self.d_vars = [var for var in t_vars if 'disc' in var.name]
        self.g_vars = [var for var in t_vars if 'gen' in var.name]
tensor.py 文件源码 项目:odin 作者: imito 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def renorm_rms(X, axis=1, target_rms=1.0, name="RescaleRMS"):
  """ Scales the data such that RMS of the features dimension is 1.0
  scale = sqrt(x^t x / (D * target_rms^2)).

  NOTE
  ----
  by defaults, assume the features dimension is `1`
  """
  with tf.variable_scope(name):
    D = tf.sqrt(tf.cast(tf.shape(X)[axis], X.dtype.base_dtype))
    l2norm = tf.sqrt(tf.reduce_sum(X ** 2, axis=axis, keep_dims=True))
    X_rms = l2norm / D
    X_rms = tf.where(tf.equal(X_rms, 0.),
                     x=tf.ones_like(X_rms, dtype=X_rms.dtype.base_dtype),
                     y=X_rms)
    return target_rms * X / X_rms


# ===========================================================================
# RNN and loop
# ===========================================================================
relative_trafo.py 文件源码 项目:hand3d 作者: lmb-freiburg 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _atan2(y, x):
    """ My implementation of atan2 in tensorflow.  Returns in -pi .. pi."""
    tan = tf.atan(y / (x + 1e-8))  # this returns in -pi/2 .. pi/2

    one_map = tf.ones_like(tan)

    # correct quadrant error
    correction = tf.where(tf.less(x + 1e-8, 0.0), 3.141592653589793*one_map, 0.0*one_map)
    tan_c = tan + correction  # this returns in -pi/2 .. 3pi/2

    # bring to positive values
    correction = tf.where(tf.less(tan_c, 0.0), 2*3.141592653589793*one_map, 0.0*one_map)
    tan_zero_2pi = tan_c + correction  # this returns in 0 .. 2pi

    # make symmetric
    correction = tf.where(tf.greater(tan_zero_2pi, 3.141592653589793), -2*3.141592653589793*one_map, 0.0*one_map)
    tan_final = tan_zero_2pi + correction  # this returns in -pi .. pi
    return tan_final
GAN.py 文件源码 项目:dreamscape 作者: themattinthehatt 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _define_loss(self):
        """Define loss function that will be used to optimize model params"""

        # define generator loss
        with tf.variable_scope('generator'):
            self.loss_gen = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=self.disc_gen,
                    labels=tf.ones_like(self.disc_gen)))

        # define discriminator loss
        with tf.variable_scope('discriminator'):
            self.loss_disc = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=self.disc_real,
                    labels=tf.ones_like(self.disc_real)) +
                tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=self.disc_gen,
                    labels=tf.zeros_like(self.disc_gen)))

        # save summaries of losses
        tf.summary.scalar('loss_gen', self.loss_gen)
        tf.summary.scalar('loss_disc', self.loss_disc)
GAN_models.py 文件源码 项目:GAN 作者: kunrenzhilu 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _gan_loss(self, logits_real, logits_fake, feature_real, feature_fake, use_features=False):
        discriminator_loss_real = self._cross_entropy_loss(logits_real, tf.ones_like(logits_real),
                                                           name="disc_real_loss")

        discriminator_loss_fake = self._cross_entropy_loss(logits_fake, tf.zeros_like(logits_fake),
                                                           name="disc_fake_loss")
        self.discriminator_loss = discriminator_loss_fake + discriminator_loss_real

        gen_loss_disc = self._cross_entropy_loss(logits_fake, tf.ones_like(logits_fake), name="gen_disc_loss")
        if use_features:
            gen_loss_features = tf.reduce_mean(tf.nn.l2_loss(feature_real - feature_fake)) / (self.crop_image_size ** 2)
        else:
            gen_loss_features = 0
        self.gen_loss = gen_loss_disc + 0.1 * gen_loss_features

        tf.summary.scalar("Discriminator_loss", self.discriminator_loss)
        tf.summary.scalar("Generator_loss", self.gen_loss)
beamsearch.py 文件源码 项目:TextGAN 作者: ankitkv 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def unwrap_output_sparse(self, final_state, include_stop_tokens=True):
        """
        Retreive the beam search output from the final state.

        Returns a sparse tensor with underlying dimensions of [batch_size, max_len]
        """
        output_dense = final_state[0]
        mask = tf.not_equal(output_dense, self.stop_token)

        if include_stop_tokens:
            output_dense = tf.concat(1, [output_dense[:, 1:],
                                         tf.ones_like(output_dense[:, 0:1]) *
                                         self.stop_token])
            mask = tf.concat(1, [mask[:, 1:], tf.cast(tf.ones_like(mask[:, 0:1],
                                                                   dtype=tf.int8),
                                                      tf.bool)])

        return sparse_boolean_mask(output_dense, mask)
train.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def get_weights_by_predictions(labels_batch, predictions):
  epsilon = 1e-6
  float_labels = tf.cast(labels_batch, dtype=tf.float32)
  cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
      1 - float_labels) * tf.log(1 - predictions + epsilon)
  ce = tf.reduce_sum(tf.negative(cross_entropy_loss), axis=1)
  mean_ce = tf.reduce_mean(ce + epsilon)
  weights = tf.where(ce > mean_ce, 
                     3.0 * tf.ones_like(ce),
                     0.5 * tf.ones_like(ce))
  return weights


问题


面经


文章

微信
公众号

扫码关注公众号