def train(self, lr, iters):
optimizer = optim.Adam(self.parameters(), lr=lr)
t = trange(iters)
for i in t:
optimizer.zero_grad()
loss = self.forward( () )
# print loss.data[0]
t.set_description( '%.3f | %.3f | %.3f | %.3f' % (self.mse, self.divergence, self.world_mse, self.location_mse) )
loss.backward()
optimizer.step()
U, V = self.__lookup()
recon = torch.mm(U, V.t())
# print U, V, recon
U = U.data.cpu().numpy()
V = V.data.cpu().numpy()
recon = recon.data.cpu().numpy()
return U, V, recon
constraint_factorizer.py 文件源码
python
阅读 22
收藏 0
点赞 0
评论 0
评论列表
文章目录