def build_lstm_autoencoder(autoencoder, X_train, X_test):
X_train = X_train[:, np.newaxis, :]
X_test = X_test[:, np.newaxis, :]
print("Modified X_train: ", X_train.shape)
print("Modified X_test: ", X_test.shape)
# The TimeDistributedDense isn't really necessary, however you need a lot of GPU memory to do 784x394-394x784
autoencoder.add(TimeDistributedDense(input_dim, 16))
autoencoder.add(AutoEncoder(encoder=LSTM(16, 8, activation=activation, return_sequences=True),
decoder=LSTM(8, input_dim, activation=activation, return_sequences=True),
output_reconstruction=False))
return autoencoder, X_train, X_test
评论列表
文章目录