def __init__(self, input_dim, hidden_dim, epoch=250, learning_rate = 0.001):
self.epoch = epoch
self.learning_rate = learning_rate
x = tf.placeholder(dtype=tf.float32, shape=[None, input_dim])
with tf.name_scope("encode"):
weights = tf.Variable(tf.random_normal([input_dim, hidden_dim], dtype=tf.float32), name="weights")
biases = tf.Variable(tf.zeros([hidden_dim]), name="biases")
encoded = tf.nn.tanh(tf.matmul(x, weights) + biases)
with tf.name_scope("decode"):
weights = tf.Variable(tf.random_normal([hidden_dim, input_dim], dtype=tf.float32), name="weights")
biases = tf.Variable(tf.zeros([input_dim]), name="biases")
decoded = tf.matmul(encoded, weights) + biases
self.x = x
self.encoded = encoded
self.decoded = decoded
self.loss = tf.sqrt(tf.reduce_mean(tf.square(tf.sub(self.x, self.decoded))))
self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
self.saver = tf.train.Saver()
评论列表
文章目录