def __call__(self, hx, cx, xs, flag_train, args):
if hx is None:
hx = self.init_hx(xs)
if cx is None:
cx = self.init_hx(xs)
# hx, cx ? (layer?, minibatch???????)?tensor
# xs? (???, minibatch???????)?tensor
# Note: chaFunc.n_step_lstm() ?????????dropout?????
if args.chainer_version_check[0] == 2:
hy, cy, ys = chaFunc.n_step_lstm(
self.n_layers, self.dropout_rate, hx, cx, self.ws, self.bs, xs)
else:
hy, cy, ys = chaFunc.n_step_lstm(
self.n_layers, self.dropout_rate, hx, cx, self.ws, self.bs, xs,
train=flag_train, use_cudnn=self.use_cudnn)
# hy, cy ? (layer?, minibatch???????) ?????
# ys????????????????????
# ???? (minibatch???????)
# ??????????stack???????????chainer.Variable???
# (???, minibatch???????)?tensor
hlist = chaFunc.stack(ys)
return hy, cy, hlist
# LSTM???????????????????????????????????
评论列表
文章目录