def __call__(self, x, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
size = 64
d = tcl.conv2d(x, num_outputs=size, kernel_size=3, # bzx64x64x3 -> bzx32x32x64
stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
d = tcl.conv2d(d, num_outputs=size * 2, kernel_size=3, # 16x16x128
stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
d = tcl.conv2d(d, num_outputs=size * 4, kernel_size=3, # 8x8x256
stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
d = tcl.conv2d(d, num_outputs=size * 8, kernel_size=3, # 4x4x512
stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
d = tcl.fully_connected(tcl.flatten(d), 256, activation_fn=lrelu, weights_initializer=tf.random_normal_initializer(0, 0.02))
d = tcl.fully_connected(d, 1, activation_fn=None, weights_initializer=tf.random_normal_initializer(0, 0.02))
return d
评论列表
文章目录