def ali_encoder(self, opts, input_, is_training=False, reuse=False, keep_prob=1.):
num_units = opts['e_num_filters']
layer_params = []
layer_params.append([5, 1, num_units / 8])
layer_params.append([4, 2, num_units / 4])
layer_params.append([4, 1, num_units / 2])
layer_params.append([4, 2, num_units])
layer_params.append([4, 1, num_units * 2])
# For convolution: (n - k) / stride + 1 = s
# For transposed: (s - 1) * stride + k = n
layer_x = input_
height = int(layer_x.get_shape()[1])
width = int(layer_x.get_shape()[2])
assert height == width
for i, (kernel, stride, channels) in enumerate(layer_params):
height = (height - kernel) / stride + 1
width = height
# print((height, width))
layer_x = ops.conv2d(
opts, layer_x, channels, d_h=stride, d_w=stride,
scope='h%d_conv' % i, conv_filters_dim=kernel, padding='VALID')
if opts['batch_norm']:
layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bn%d' % i)
layer_x = ops.lrelu(layer_x, 0.1)
assert height == 1
assert width == 1
# Then two 1x1 convolutions.
layer_x = ops.conv2d(opts, layer_x, num_units * 2, d_h=1, d_w=1, scope='conv2d_1x1', conv_filters_dim=1)
if opts['batch_norm']:
layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bnlast')
layer_x = ops.lrelu(layer_x, 0.1)
layer_x = ops.conv2d(opts, layer_x, num_units / 2, d_h=1, d_w=1, scope='conv2d_1x1_2', conv_filters_dim=1)
if opts['e_is_random']:
latent_mean = ops.linear(
opts, layer_x, opts['latent_space_dim'], scope='hlast_lin')
log_latent_sigmas = ops.linear(
opts, layer_x, opts['latent_space_dim'], scope='hlast_lin_sigma')
return latent_mean, log_latent_sigmas
else:
return ops.linear(opts, layer_x, opts['latent_space_dim'], scope='hlast_lin')
评论列表
文章目录