def content_loss(self, layers):
activations = [self.activations_for_layer(i) for i in layers]
activation_diffs = [
tf.sub(
tf.tile(tf.slice(a, [self.num_style, 0, 0, 0], [self.num_content, -1, -1, -1]), [self.num_synthesized - self.num_content + 1, 1, 1, 1]),
tf.slice(a, [self.num_style + self.num_content, 0, 0, 0], [self.num_content, -1, -1, -1]))
for a in activations]
# This normalizer is in JCJohnson's paper, but not Gatys' I think?
Ns = [a.get_shape().as_list()[1] * a.get_shape().as_list()[2] * a.get_shape().as_list()[3] for a in activations]
content_loss = tf.div(tf.add_n([tf.div(tf.reduce_sum(tf.square(a)), n) for a, n in zip(activation_diffs, Ns)]), 2.0)
return content_loss
vgg_network.py 文件源码
python
阅读 28
收藏 0
点赞 0
评论 0
评论列表
文章目录