def _classifier(self, x, is_training):
n_layer = len(self.arch['classifier']['output'])
subnet = self.arch['classifier']
with slim.arg_scope(
[slim.batch_norm],
scale=True, scope='BN',
updates_collections=None,
# decay=0.9, epsilon=1e-5, # [TODO] Test these hyper-parameters
is_training=is_training):
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(subnet['l2-reg']),
normalizer_fn=slim.batch_norm,
activation_fn=lrelu):
for i in range(n_layer):
x = slim.conv2d(
x,
subnet['output'][i],
subnet['kernel'][i],
subnet['stride'][i])
tf.summary.image(
'down-sample{:d}'.format(i),
tf.transpose(x[:, :, :, 0:3], [2, 1, 0, 3]))
x = slim.flatten(x)
with slim.arg_scope(
[slim.fully_connected],
num_outputs=self.arch['y_dim'],
weights_regularizer=slim.l2_regularizer(subnet['l2-reg']),
normalizer_fn=None,
activation_fn=None):
y_logit = slim.fully_connected(x)
# z_mu = slim.fully_connected(x)
# z_lv = slim.fully_connected(x)
# return z_mu, z_lv
return y_logit
cvae.py 文件源码
python
阅读 21
收藏 0
点赞 0
评论 0
评论列表
文章目录