def _worker_loop(dataset, index_queue, data_queue, collate_fn):
global _use_shared_memory
_use_shared_memory = True
# torch.set_num_threads(1)
while True:
r = index_queue.get()
if r is None:
data_queue.put(None)
break
idx, batch_indices = r
try:
samples = collate_fn([dataset[i] for i in batch_indices])
except Exception:
data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
data_queue.put((idx, samples))
# numpy_type_map = {
# 'float64': torch.DoubleTensor,
# 'float32': torch.FloatTensor,
# 'float16': torch.HalfTensor,
# 'int64': torch.LongTensor,
# 'int32': torch.IntTensor,
# 'int16': torch.ShortTensor,
# 'int8': torch.CharTensor,
# 'uint8': torch.ByteTensor,
# }
评论列表
文章目录