def _generator(self, z, is_training):
subnet = self.arch['generator']
n_layer = len(subnet['output'])
h, w, c = subnet['hwc']
with slim.arg_scope(
[slim.batch_norm],
scale=True,
updates_collections=None,
decay=0.9, epsilon=1e-5,
is_training=is_training,
scope='BN'):
x = slim.fully_connected(
z,
h * w * c,
normalizer_fn=slim.batch_norm,
activation_fn=tf.nn.relu)
x = tf.reshape(x, [-1, h, w, c])
with slim.arg_scope(
[slim.conv2d_transpose],
weights_regularizer=slim.l2_regularizer(subnet['l2-reg']),
normalizer_fn=slim.batch_norm,
activation_fn=tf.nn.relu):
for i in range(n_layer -1):
x = slim.conv2d_transpose(
x,
subnet['output'][i],
subnet['kernel'][i],
subnet['stride'][i])
# Don't apply BN for the last layer of G
x = slim.conv2d_transpose(
x,
subnet['output'][-1],
subnet['kernel'][-1],
subnet['stride'][-1],
normalizer_fn=None,
activation_fn=tf.nn.tanh)
return x
评论列表
文章目录