def generator(noise):
with slim.arg_scope([slim.conv2d_transpose],
weights_initializer=tf.truncated_normal_initializer(stddev=0.045),
biases_initializer=tf.constant_initializer(value=0),
activation_fn=None):
with slim.arg_scope([slim.batch_norm], is_training=train_phase, decay=0.9, epsilon=1e-5,
param_initializers={
"beta": tf.constant_initializer(value=0),
"gamma": tf.random_normal_initializer(mean=1, stddev=0.045)
}):
weight = tf.get_variable('Generator/W', [z_dim, 2 * IMAGE_SIZE * IMAGE_SIZE], initializer=tf.truncated_normal_initializer(stddev=0.045))
bias = tf.get_variable("Generator/b", [2 * IMAGE_SIZE * IMAGE_SIZE], initializer=tf.constant_initializer(0))
out_1 = tf.add(tf.matmul(noise, weight, name="Generator/out_1_matmul"), bias, name="Generator/out_1_add")
out_1 = tf.reshape(out_1, [-1, IMAGE_SIZE // 16 , IMAGE_SIZE // 16, 512], name="Generator/out_1_reshape")
out_1 = slim.batch_norm(inputs=out_1, activation_fn=tf.nn.relu, scope="Generator/bn_1")
out_2 = slim.conv2d_transpose(out_1, num_outputs=256, kernel_size=[5, 5], stride=2, padding="SAME", scope="Generator/deconv_2")
out_2 = slim.batch_norm(inputs=out_2, activation_fn=tf.nn.relu, scope="Generator/bn_2")
out_3 = slim.conv2d_transpose(out_2, num_outputs=128, kernel_size=[5, 5], stride=2, padding="SAME", scope="Generator/deconv_3")
out_3 = slim.batch_norm(inputs=out_3, activation_fn=tf.nn.relu, scope="Generator/bn_3")
out_4 = slim.conv2d_transpose(out_3, num_outputs=64, kernel_size=[5, 5], stride=2, padding="SAME", scope="Generator/deconv_4")
out_4 = slim.batch_norm(inputs=out_4, activation_fn=tf.nn.relu, scope="Generator/bn_4")
out_5 = slim.conv2d_transpose(out_4, num_outputs=3, kernel_size=[5, 5], stride=2, padding="SAME", scope="Generator/deconv_5")
out_5 = tf.nn.tanh(out_5, name="Generator/tanh_5")
return out_5
评论列表
文章目录