def buildCharEncDec(hidden, RNN, layers, maxlen, chars, dropout=.3):
print('Build model...')
model = Sequential()
# "Encode" the input sequence using an RNN,
# producing an output of HIDDEN_SIZE.
# Note: In a situation where your input sequences have a variable length,
# use input_shape=(None, nb_feature).
# model.add(RNN(hidden, input_shape=(maxlen, len(chars)),
# name="encoder-rnn"))
model.add(Dropout(dropout, input_shape=(maxlen, len(chars)),
noise_shape=(1, maxlen, 1)))
model.add(RNN(hidden, name="encoder-rnn"))
# As the decoder RNN's input, repeatedly provide with the
# last hidden state of
# RNN for each time step. Repeat 'DIGITS + 1' times as that's the maximum
# length of output, e.g., when DIGITS=3, max output is 999+999=1998.
model.add(RepeatVector(maxlen, name="encoding"))
# The decoder RNN could be multiple layers stacked or a single layer.
for ii in range(layers):
# By setting return_sequences to True, return not only the last output
# but all the outputs so far in the form of (nb_samples, timesteps,
# output_dim). This is necessary as TimeDistributed in the below
# expects the first dimension to be the timesteps.
model.add(RNN(hidden, return_sequences=True,
name="decoder%i" % ii))
# Apply a dense layer to the every temporal slice of an input.
# For each step
# of the output sequence, decide which character should be chosen.
model.add(TimeDistributed(Dense(len(chars), name="dense"), name="td"))
model.add(Activation('softmax', name="softmax"))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
return model
评论列表
文章目录