def encoder(self, images, is_training, reuse=False):
with tf.variable_scope("generator"):
if reuse:
tf.get_variable_scope().reuse_variables()
encode_layers = dict()
def encode_layer(x, output_filters, layer):
act = lrelu(x)
conv = conv2d(act, output_filters=output_filters, scope="g_e%d_conv" % layer)
enc = batch_norm(conv, is_training, scope="g_e%d_bn" % layer)
encode_layers["e%d" % layer] = enc
return enc
e1 = conv2d(images, self.generator_dim, scope="g_e1_conv")
encode_layers["e1"] = e1
e2 = encode_layer(e1, self.generator_dim * 2, 2)
e3 = encode_layer(e2, self.generator_dim * 4, 3)
e4 = encode_layer(e3, self.generator_dim * 8, 4)
e5 = encode_layer(e4, self.generator_dim * 8, 5)
e6 = encode_layer(e5, self.generator_dim * 8, 6)
e7 = encode_layer(e6, self.generator_dim * 8, 7)
e8 = encode_layer(e7, self.generator_dim * 8, 8)
return e8, encode_layers
评论列表
文章目录