def eval_batch(dr_model, ub, up, batch_size, is_reordered = False):
'''
Using dr_model to predict (u,p) score in batch
Parameters:
- ub: users' baskets
- up: users' history purchases
- batch_size
'''
# turn on evaluation mode
dr_model.eval()
is_cuda = dr_model.config.cuda
item_embedding = dr_model.encode.weight
dr_hidden = dr_model.init_hidden(batch_size)
id_u, item_u, score_u, dynam_u = [], [], [], []
start_time = time()
num_batchs = ceil(len(ub) / batch_size)
for i, x in enumerate(batchify(ub, batch_size, is_reordered)):
if is_reordered is True:
baskets, lens, uids, r_baskets, h_baskets = x
else:
baskets, lens, uids = x
dynamic_user, _ = dr_model(baskets, lens, dr_hidden)
for uid, l, du in zip(uids, lens, dynamic_user):
du_latest = du[l - 1].unsqueeze(0)
# calculating <u,p> score for all history <u,p> pair
history_item = [int(i) for i in up[up.user_id == uid]['product_id'].values[0]]
history_item = torch.cuda.LongTensor(history_item) if is_cuda else torch.LongTensor(history_item)
score_up = torch.mm(du_latest, item_embedding[history_item].t()).cpu().data.numpy()[0]
id_u.append(uid), dynam_u.append(du_latest.cpu().data.numpy()[0]), item_u.append(history_item.cpu().numpy()),score_u.append(score_up)
# Logging
elapsed = time() - start_time; start_time = time()
print('[Predicting]| Batch {:5d} / {:5d} | seconds/batch {:02.02f}'.format(i, num_batchs, elapsed))
return id_u, item_u, score_u, dynam_u
评论列表
文章目录