def __init__(self, input_size, hidden_size, output_size, n_layers=1, gpu=-1):
self.decoder = RNN(input_size, hidden_size, output_size, n_layers, gpu)
if gpu >= 0:
print("Use GPU %d" % torch.cuda.current_device())
self.decoder.cuda()
self.optimizer = torch.optim.Adam(self.decoder.parameters(), lr=0.01)
self.criterion = nn.CrossEntropyLoss()
评论列表
文章目录