def _sample(self, n_samples):
n = self.n_experiments
if self.logits.get_shape().ndims == 1:
logits_flat = self.logits
else:
logits_flat = tf.reshape(self.logits, [-1])
log_1_minus_p = -tf.nn.softplus(logits_flat)
log_p = logits_flat + log_1_minus_p
stacked_logits_flat = tf.stack([log_1_minus_p, log_p], axis=-1)
samples_flat = tf.transpose(
tf.multinomial(stacked_logits_flat, n_samples * n))
shape = tf.concat([[n, n_samples], self.batch_shape], 0)
samples = tf.reduce_sum(tf.reshape(samples_flat, shape), axis=0)
static_n_samples = n_samples if isinstance(n_samples, int) else None
static_shape = tf.TensorShape([static_n_samples]).concatenate(
self.get_batch_shape())
samples.set_shape(static_shape)
return tf.cast(samples, self.dtype)
评论列表
文章目录