def build_model(input_var=None):
layers = [1, 5, 10, 1]
l_in = InputLayer((None, None, layers[0]),
input_var=input_var)
l_lstm1 = LSTMLayer(l_in, layers[1])
l_lstm1_dropout = DropoutLayer(l_lstm1, p=0.2)
l_lstm2 = LSTMLayer(l_lstm1_dropout, layers[2])
l_lstm2_dropout = DropoutLayer(l_lstm2, p=0.2)
# The objective of this task depends only on the final value produced by
# the network. So, we'll use SliceLayers to extract the LSTM layer's
# output after processing the entire input sequence. For the forward
# layer, this corresponds to the last value of the second (sequence length)
# dimension.
l_slice = SliceLayer(l_lstm2_dropout, -1, 1)
l_out = DenseLayer(l_slice, 1, nonlinearity=lasagne.nonlinearities.linear)
return l_out
recurrent_lasagne_power.py 文件源码
python
阅读 21
收藏 0
点赞 0
评论 0
评论列表
文章目录