python类log()的实例源码

vbutils.py 文件源码 项目:chemblnet 作者: jaak-s 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __init__(self, name, shape, initial_stdev = 2.0, initial_prec_a = 5.0, initial_prec_b = 1.0, a0 = 1.0, b0 = 1.0, fixed_prec = False, mean_init_std = None):
        if mean_init_std is None:
            mean_init_std = 1.0 / np.sqrt(shape[-1])
        with tf.variable_scope(name) as scope:
            #self.mean   = tf.get_variable(name="mean", shape=shape, initializer=tf.contrib.layers.xavier_initializer(), dtype = tf.float32)
            #self.var    = tf.Variable(initial_var * np.ones(shape),      name = name + ".var", dtype = tf.float32)
            self.mean   = tf.Variable(tf.random_uniform(shape, minval=-mean_init_std, maxval=mean_init_std))
            self.logvar = tf.Variable(np.log(initial_stdev**2.0) * np.ones(shape), name = "logvar", dtype = tf.float32)
            if fixed_prec:
                self.prec_a = tf.constant(initial_prec_a * np.ones(shape[-1]), name = "prec_a", dtype = tf.float32)
                self.prec_b = tf.constant(initial_prec_b * np.ones(shape[-1]), name = "prec_b", dtype = tf.float32)
            else:
                self.prec_a = tf.Variable(initial_prec_a * np.ones(shape[-1]), name = "prec_a", dtype = tf.float32)
                self.prec_b = tf.Variable(initial_prec_b * np.ones(shape[-1]), name = "prec_b", dtype = tf.float32)
            self.prec   = tf.div(self.prec_a, self.prec_b, name = "prec")
            self.var    = tf.exp(self.logvar, name = "var")
            self.a0     = a0
            self.b0     = b0
            self.shape  = shape
distributions.py 文件源码 项目:baselines 作者: openai 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def sample_dtype(self):
        return tf.int32

# WRONG SECOND DERIVATIVES
# class CategoricalPd(Pd):
#     def __init__(self, logits):
#         self.logits = logits
#         self.ps = tf.nn.softmax(logits)
#     @classmethod
#     def fromflat(cls, flat):
#         return cls(flat)
#     def flatparam(self):
#         return self.logits
#     def mode(self):
#         return U.argmax(self.logits, axis=-1)
#     def logp(self, x):
#         return -tf.nn.sparse_softmax_cross_entropy_with_logits(self.logits, x)
#     def kl(self, other):
#         return tf.nn.softmax_cross_entropy_with_logits(other.logits, self.ps) \
#                 - tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
#     def entropy(self):
#         return tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
#     def sample(self):
#         u = tf.random_uniform(tf.shape(self.logits))
#         return U.argmax(self.logits - tf.log(-tf.log(u)), axis=-1)
A3C.py 文件源码 项目:CartPole-v0 作者: hmtai6 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def run(self):
        print("Starting worker " + str(self.name))
        print("Starting self.agent.name " + str(self.agent.name))
        print("id(self) ",id(self))
        agent = self.agent

        self.episode_count = sess.run(self.global_episodes)

        with sess.as_default(), sess.graph.as_default():
            while not coord.should_stop():
                R, r_l, p_l, e_l, g_n, v_n, mean_advantages_m = self.env.run(agent)
                if self.episode_count % 5 == 0 and self.episode_count!=0:
                    self.log(R, r_l, p_l, e_l, g_n, v_n, mean_advantages_m)
                if self.episode_count > TOTAL_EPISODE:
                    coord.request_stop()
                self.episode_count += 1
A3C.py 文件源码 项目:CartPole-v0 作者: hmtai6 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def log(self, rewards, v_l, p_l, e_l, g_n, v_n, mean_advantages_m):
        print(str(self.name), " episode_count", self.episode_count)
        summary = tf.Summary()
        summary.value.add(tag='Perf/Reward', simple_value=float(rewards))
#        summary.value.add(tag='Perf/Length', simple_value=float(mean_length))
#        summary.value.add(tag='Perf/Value', simple_value=float(mean_value))
        summary.value.add(tag='Losses/Value Loss', simple_value=float(v_l))
        summary.value.add(tag='Losses/Policy Loss', simple_value=float(p_l))
        summary.value.add(tag='Losses/Entropy', simple_value=float(e_l))
        summary.value.add(tag='Losses/Grad Norm', simple_value=float(g_n))
        summary.value.add(tag='Losses/Var Norm', simple_value=float(v_n))
        summary.value.add(tag='Losses/mean_advantages_m', simple_value=float(mean_advantages_m))
        self.summary_writer.add_summary(summary, self.episode_count)

        self.summary_writer.flush()
        pass
utils.py 文件源码 项目:magenta 作者: tensorflow 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def mu_law(x, mu=255, int8=False):
  """A TF implementation of Mu-Law encoding.

  Args:
    x: The audio samples to encode.
    mu: The Mu to use in our Mu-Law.
    int8: Use int8 encoding.

  Returns:
    out: The Mu-Law encoded int8 data.
  """
  out = tf.sign(x) * tf.log(1 + mu * tf.abs(x)) / np.log(1 + mu)
  out = tf.floor(out * 128)
  if int8:
    out = tf.cast(out, tf.int8)
  return out
utils.py 文件源码 项目:magenta 作者: tensorflow 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def calculate_softmax_and_summaries(logits, one_hot_labels, name):
  """Calculate the softmax cross entropy loss and associated summaries.

  Args:
    logits: Tensor of logits, first dimension is batch size.
    one_hot_labels: Tensor of one hot encoded categorical labels. First
      dimension is batch size.
    name: Name to use as prefix for summaries.

  Returns:
    loss: Dimensionless tensor representing the mean negative
      log-probability of the true class.
  """
  loss = tf.nn.softmax_cross_entropy_with_logits(
      logits=logits, labels=one_hot_labels)
  loss = tf.reduce_mean(loss)
  softmax_summaries(loss, logits, one_hot_labels, name)
  return loss
utils.py 文件源码 项目:magenta 作者: tensorflow 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def calculate_sparse_softmax_and_summaries(logits, labels, name):
  """Calculate the softmax cross entropy loss and associated summaries.

  Args:
    logits: Tensor of logits, first dimension is batch size.
    labels: Tensor of categorical labels [ints]. First
      dimension is batch size.
    name: Name to use as prefix for summaries.

  Returns:
    loss: Dimensionless tensor representing the mean negative
      log-probability of the true class.
  """
  loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits=logits, labels=labels)
  loss = tf.reduce_mean(loss)
  softmax_summaries(loss, logits, labels, name)
  return loss
a3C.py 文件源码 项目:A3C 作者: go2sea 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def actor_loss(self):
        if self.config.mode == 'discrete':
            log_prob = tf.reduce_sum(tf.log(self.a_prob) * tf.one_hot(self.action_input, self.action_dim, dtype=tf.float32),
                                     axis=1, keep_dims=True)
            # use entropy to encourage exploration
            exp_v = log_prob * self.TD_loss
            entropy = -tf.reduce_sum(self.a_prob * tf.log(self.a_prob), axis=1, keep_dims=True)  # encourage exploration
            exp_v = self.config.ENTROPY_BETA * entropy + exp_v
            return tf.reduce_mean(-exp_v)  # ????????log_prb????????????????????TD_loss
        elif self.config.mode == 'continuous':
            log_prob = self.action_normal_dist.log_prob(self.action_input)
            exp_v = log_prob * self.TD_loss
            # use entropy to encourage exploration
            exp_v = self.config.ENTROPY_BETA * self.action_normal_dist.entropy() + exp_v
            return tf.reduce_mean(-exp_v)
a3C.py 文件源码 项目:A3C 作者: go2sea 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def choose_action(self):
        if self.config.mode == 'discrete':
            return tf.multinomial(tf.log(self.a_prob), 1)[0][0]  # ???????tf.log??????action_dim??
        elif self.config.mode == 'continuous':
            # axis = 0?????0??squeeze
            sample_action = self.action_normal_dist.sample(1) * self.config.ACTION_GAP + self.config.ACTION_BOUND[0]
            return tf.clip_by_value(tf.squeeze(sample_action, axis=0),
                                    self.config.ACTION_BOUND[0],
                                    self.config.ACTION_BOUND[1])[0]
tdlm_model.py 文件源码 项目:topically-driven-language-model 作者: jhlau 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def sample(self, probs, temperature):
        if temperature == 0:
            return np.argmax(probs)

        probs = probs.astype(np.float64) #convert to float64 for higher precision
        probs = np.log(probs) / temperature
        probs = np.exp(probs) / math.fsum(np.exp(probs))
        return np.argmax(np.random.multinomial(1, probs, 1))

    #generate a sentence given conv_hidden
frame_level_models.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, **unused_params):
        with tf.name_scope("loss_frames"):
            epsilon = 10e-6
            float_labels = tf.cast(labels, tf.float32)
            cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
                1 - float_labels) * tf.log(1 - predictions + epsilon)

        return tf.reduce_sum(cross_entropy_loss, 2)
frame_level_models.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, **unused_params):
        with tf.name_scope("loss_frames"):
            epsilon = 10e-6
            float_labels = tf.cast(labels, tf.float32)
            cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
                1 - float_labels) * tf.log(1 - predictions + epsilon)

        return tf.reduce_sum(cross_entropy_loss, axis=2)
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def calculate_loss_distill(self, predictions, labels_distill, labels, **unused_params):
    with tf.name_scope("loss_distill"):
      print("loss_distill")
      epsilon = 10e-6
      float_labels = tf.cast(labels, tf.float32)
      float_labels_distill = tf.cast(labels_distill, tf.float32)
      embedding_mat = np.loadtxt("./resources/embedding_matrix.model")
      vocab_size = embedding_mat.shape[1]
      labels_size = float_labels.get_shape().as_list()[1]
      embedding_mat = tf.cast(embedding_mat,dtype=tf.float32)
      cross_entropy_loss_1 = float_labels * tf.log(predictions + epsilon) + (
          1 - float_labels) * tf.log(1 - predictions + epsilon)
      float_labels_1 = float_labels[:,:vocab_size]
      labels_smooth = tf.matmul(float_labels_1,embedding_mat)/tf.reduce_sum(float_labels_1,axis=1,keep_dims=True)
      float_classes = labels_smooth
      for i in range(labels_size//vocab_size-1):
        float_classes = tf.concat((float_classes,labels_smooth),axis=1)
      cross_entropy_loss_2 = float_classes * tf.log(predictions + epsilon) + (
          1 - float_classes) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss_3 = float_labels_distill * tf.log(predictions + epsilon) + (
          1 - float_labels_distill) * tf.log(1 - predictions + epsilon)

      cross_entropy_loss = cross_entropy_loss_1*0.5 + cross_entropy_loss_2*0.5 + cross_entropy_loss_3*0.5
      cross_entropy_loss = tf.negative(cross_entropy_loss)

      return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def calculate_loss_negative(self, predictions_pos, predictions_neg, labels, **unused_params):
    with tf.name_scope("loss_negative"):
      epsilon = 10e-6
      float_labels = tf.cast(labels, tf.float32)
      weight_pos = np.loadtxt(FLAGS.autoencoder_dir+"labels_uni.out")
      weight_pos = tf.reshape(tf.cast(weight_pos,dtype=tf.float32),[1,-1])
      weight_pos = tf.log(tf.reduce_max(weight_pos)/weight_pos)+1
      cross_entropy_loss_1 = float_labels * tf.log(predictions_pos + epsilon)*weight_pos + (
          1 - float_labels) * tf.log(1 - predictions_pos + epsilon)
      cross_entropy_loss_2 = (1-float_labels) * tf.log(predictions_neg + epsilon) + \
                             float_labels * tf.log(1 - predictions_neg + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss_1+cross_entropy_loss_2)
      return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def calculate_loss_max(self, predictions, predictions_experts, labels, **unused_params):
    with tf.name_scope("loss_max"):
      epsilon = 10e-6
      shape = predictions_experts.get_shape().as_list()
      float_labels = tf.cast(labels, tf.float32)
      cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
                        1 - float_labels) * tf.log(1 - predictions + epsilon)
      float_exprts = tf.tile(tf.reshape(float_labels,[-1,shape[1],1]),[1,1,shape[2]])
      cross_entropy_experts = float_exprts * tf.log(predictions_experts + epsilon) + (
                     1 - float_exprts) * tf.log(1 - predictions_experts + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss)
      cross_entropy_experts = tf.negative(tf.reduce_mean(cross_entropy_experts,axis=2))
      return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1)) + tf.reduce_mean(tf.reduce_sum(cross_entropy_experts, 1))
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, **unused_params):
    false_positive_punishment = FLAGS.false_positive_punishment
    false_negative_punishment = FLAGS.false_negative_punishment
    with tf.name_scope("loss_xent_recall"):
      epsilon = 10e-6
      if FLAGS.label_smoothing:
        float_labels = smoothing(labels)
      else:
        float_labels = tf.cast(labels, tf.float32)
      cross_entropy_loss = false_negative_punishment * float_labels * tf.log(predictions + epsilon) \
          + false_positive_punishment * ( 1 - float_labels) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss)
      return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, **unused_params):
    with tf.name_scope("loss_softmax"):
      epsilon = 10e-8
      float_labels = tf.cast(labels, tf.float32)
      # l1 normalization (labels are no less than 0)
      label_rowsum = tf.maximum(
          tf.reduce_sum(float_labels, 1, keep_dims=True),
          epsilon)
      norm_float_labels = tf.div(float_labels, label_rowsum)
      softmax_outputs = tf.nn.softmax(predictions)
      softmax_loss = tf.negative(tf.reduce_sum(
          tf.multiply(norm_float_labels, tf.log(softmax_outputs)), 1))
    return tf.reduce_mean(softmax_loss)
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, **unused_params):
    with tf.name_scope("loss_xent_batch"):
      batch_agreement = FLAGS.batch_agreement
      epsilon = 10e-6
      float_batch_size = float(FLAGS.batch_size)

      float_labels = tf.cast(labels, tf.float32)
      cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
          1 - float_labels) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss)

      positive_predictions = predictions * float_labels + 1.0 - float_labels
      min_pp = tf.reduce_min(positive_predictions)

      negative_predictions = predictions * (1.0 - float_labels)
      max_np = tf.reduce_max(negative_predictions)

      # 1s that fall under 0s
      false_negatives = tf.cast(predictions < max_np, tf.float32) * float_labels
      num_fn = tf.reduce_sum(false_negatives)
      center_fn = tf.reduce_sum(predictions * false_negatives) / num_fn

      # 0s that grow over 1s
      false_positives = tf.cast(predictions > min_pp, tf.float32) * (1.0 - float_labels)
      num_fp = tf.reduce_sum(false_positives)
      center_fp = tf.reduce_sum(predictions * false_positives) / num_fp

      false_range = tf.maximum(epsilon, max_np - min_pp)

      # for 1s that fall under 0s
      weight_fn = tf.nn.sigmoid((center_fp - predictions) / false_range * 3.0) * (num_fp / float_batch_size) * false_negatives
      # for 0s that grow over 1s
      weight_fp = tf.nn.sigmoid((predictions - center_fn) / false_range * 3.0) * (num_fn / float_batch_size) * false_positives

      weight = (weight_fn + weight_fp) * batch_agreement + 1.0
      print weight
      return tf.reduce_mean(tf.reduce_sum(weight * cross_entropy_loss, 1))
train.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_weights_by_predictions(labels_batch, predictions):
  epsilon = 1e-6
  float_labels = tf.cast(labels_batch, dtype=tf.float32)
  cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
      1 - float_labels) * tf.log(1 - predictions + epsilon)
  ce = tf.reduce_sum(tf.negative(cross_entropy_loss), axis=1)
  mean_ce = tf.reduce_mean(ce + epsilon)
  weights = tf.where(ce > mean_ce, 
                     3.0 * tf.ones_like(ce),
                     0.5 * tf.ones_like(ce))
  return weights
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, **unused_params):
    with tf.name_scope("loss_softmax"):
      epsilon = 10e-8
      float_labels = tf.cast(labels, tf.float32)
      # l1 normalization (labels are no less than 0)
      label_rowsum = tf.maximum(
          tf.reduce_sum(float_labels, 1, keep_dims=True),
          epsilon)
      norm_float_labels = tf.div(float_labels, label_rowsum)
      softmax_outputs = tf.nn.softmax(predictions)
      softmax_loss = tf.negative(tf.reduce_sum(
          tf.multiply(norm_float_labels, tf.log(softmax_outputs)), 1))
    return tf.reduce_mean(softmax_loss)


问题


面经


文章

微信
公众号

扫码关注公众号