def dcgan_encoder(self, opts, input_, is_training=False, reuse=False, keep_prob=1.):
num_units = opts['e_num_filters']
num_layers = opts['e_num_layers']
layer_x = input_
for i in xrange(num_layers):
scale = 2**(num_layers-i-1)
layer_x = ops.conv2d(opts, layer_x, num_units / scale, scope='h%d_conv' % i)
if opts['batch_norm']:
layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bn%d' % i)
layer_x = tf.nn.relu(layer_x)
if opts['dropout']:
_keep_prob = tf.minimum(
1., 0.9 - (0.9 - keep_prob) * float(i + 1) / num_layers)
layer_x = tf.nn.dropout(layer_x, _keep_prob)
if opts['e_3x3_conv'] > 0:
before = layer_x
for j in range(opts['e_3x3_conv']):
layer_x = ops.conv2d(opts, layer_x, num_units / scale, d_h=1, d_w=1,
scope='conv2d_3x3_%d_%d' % (i, j),
conv_filters_dim=3)
layer_x = tf.nn.relu(layer_x)
layer_x += before # Residual connection.
if opts['e_is_random']:
latent_mean = ops.linear(
opts, layer_x, opts['latent_space_dim'], scope='hlast_lin')
log_latent_sigmas = ops.linear(
opts, layer_x, opts['latent_space_dim'], scope='hlast_lin_sigma')
return latent_mean, log_latent_sigmas
else:
return ops.linear(opts, layer_x, opts['latent_space_dim'], scope='hlast_lin')
评论列表
文章目录