def actor_loss(self):
if self.config.mode == 'discrete':
log_prob = tf.reduce_sum(tf.log(self.a_prob) * tf.one_hot(self.action_input, self.action_dim, dtype=tf.float32),
axis=1, keep_dims=True)
# use entropy to encourage exploration
exp_v = log_prob * self.TD_loss
entropy = -tf.reduce_sum(self.a_prob * tf.log(self.a_prob), axis=1, keep_dims=True) # encourage exploration
exp_v = self.config.ENTROPY_BETA * entropy + exp_v
return tf.reduce_mean(-exp_v) # ????????log_prb????????????????????TD_loss
elif self.config.mode == 'continuous':
log_prob = self.action_normal_dist.log_prob(self.action_input)
exp_v = log_prob * self.TD_loss
# use entropy to encourage exploration
exp_v = self.config.ENTROPY_BETA * self.action_normal_dist.entropy() + exp_v
return tf.reduce_mean(-exp_v)
评论列表
文章目录