def conv_up_res(self, opts, noise, is_training, reuse, keep_prob):
output_shape = self._data.data_shape
num_units = opts['g_num_filters']
batch_size = tf.shape(noise)[0]
num_layers = opts['g_num_layers']
data_height = output_shape[0]
data_width = output_shape[1]
data_channels = output_shape[2]
height = data_height / 2**num_layers
width = data_width / 2**num_layers
h0 = ops.linear(
opts, noise, num_units * height * width, scope='h0_lin')
h0 = tf.reshape(h0, [-1, height, width, num_units])
h0 = tf.nn.relu(h0)
layer_x = h0
for i in xrange(num_layers-1):
layer_x = tf.image.resize_nearest_neighbor(layer_x, (2 * height, 2 * width))
layer_x = ops.conv2d(opts, layer_x, num_units / 2, d_h=1, d_w=1, scope='conv2d_%d' % i)
height *= 2
width *= 2
num_units /= 2
if opts['g_3x3_conv'] > 0:
before = layer_x
for j in range(opts['g_3x3_conv']):
layer_x = ops.conv2d(opts, layer_x, num_units, d_h=1, d_w=1,
scope='conv2d_3x3_%d_%d' % (i, j),
conv_filters_dim=3)
layer_x = tf.nn.relu(layer_x)
layer_x += before # Residual connection.
if opts['batch_norm']:
layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bn%d' % i)
layer_x = tf.nn.relu(layer_x)
if opts['dropout']:
_keep_prob = tf.minimum(
1., 0.9 - (0.9 - keep_prob) * float(i + 1) / (num_layers - 1))
layer_x = tf.nn.dropout(layer_x, _keep_prob)
layer_x = tf.image.resize_nearest_neighbor(layer_x, (2 * height, 2 * width))
layer_x = ops.conv2d(opts, layer_x, data_channels, d_h=1, d_w=1, scope='last_conv2d_%d' % i)
if opts['input_normalize_sym']:
return tf.nn.tanh(layer_x)
else:
return tf.nn.sigmoid(layer_x)
评论列表
文章目录