def _encoder(self, x, is_training):
n_layer = len(self.arch['encoder']['output'])
subnet = self.arch['encoder']
with slim.arg_scope(
[slim.batch_norm],
scale=True,
updates_collections=None,
decay=0.9, epsilon=1e-5,
is_training=is_training,
reuse=None):
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(subnet['l2-reg']),
normalizer_fn=slim.batch_norm,
activation_fn=lrelu):
for i in range(n_layer):
x = slim.conv2d(
x,
subnet['output'][i],
subnet['kernel'][i],
subnet['stride'][i])
x = slim.flatten(x)
with slim.arg_scope(
[slim.fully_connected],
num_outputs=self.arch['z_dim'],
weights_regularizer=slim.l2_regularizer(subnet['l2-reg']),
normalizer_fn=None,
activation_fn=None):
z_mu = slim.fully_connected(x)
z_lv = slim.fully_connected(x)
return z_mu, z_lv
评论列表
文章目录