def __init__(self, steps=30, learning_rate=5e-4):
def transition(z, step):
with tf.variable_scope('transition') as vs:
if step > 1:
vs.reuse_variables()
fc1 = tf.contrib.layers.fully_connected(z, 600, activation_fn=tf.identity)
fc1 = tf.nn.relu(tf.contrib.layers.batch_norm(fc1))
fc1 = tf.contrib.layers.fully_connected(fc1, 600, activation_fn=tf.identity)
fc1 = tf.nn.relu(tf.contrib.layers.batch_norm(fc1))
mu = tf.contrib.layers.fully_connected(fc1, 784, activation_fn=tf.sigmoid)
sig = tf.contrib.layers.fully_connected(fc1, 784, activation_fn=tf.sigmoid)
sig = tf.add(tf.div(sig, step ** 2), 1e-4)
#sig = tf.add(tf.scalar_mul(0.1, sig), 1e-4)
sig = tf.sqrt(sig)
e = tf.random_normal(tf.shape(mu), 0, 1, dtype=tf.float32)
z_ = tf.add(mu, tf.mul(e, sig))
z_ = tf.minimum(tf.maximum(0.0, z_), 1.0)
return z_, mu, sig
self.x = tf.placeholder(tf.float32, [None, 784])
self.alpha = tf.placeholder(tf.float32, [])
z = tf.random_normal(tf.shape(self.x), 0, 1, dtype=tf.float32)
z = mnist_mean + mnist_std * z
z = tf.minimum(tf.maximum(0.0, z), 1.0)
self.rand_init = infuse(z, self.x, self.alpha)
self.init = tf.placeholder(tf.float32, [None, 784])
self.z = [self.init]
z = self.z[0]
self.loss = 0.0
for t in range(1, steps + 1):
z, mu, sig = transition(z, t)
z = infuse(z, self.x, self.alpha * t)
dist = tf.contrib.distributions.Normal(mu=mu, sigma=sig)
#self.loss = self.loss + tf.reduce_mean(-dist.log_pdf(self.x))
self.loss = self.loss + tf.scalar_mul(t / float(steps), tf.reduce_mean(-dist.log_pdf(self.x)))
self.z.append(z)
for t in range(steps + 1, steps * 2 + 1):
z, mu, sig = transition(z, t)
z = infuse(z, self.x, self.alpha * t)
self.z.append(z)
self.trainer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.loss)
self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
self.saver = tf.train.Saver(tf.global_variables())
评论列表
文章目录