def build_coco_batches(dataset, setname, T, input_H, input_W):
im_dir = './data/coco/images'
im_type = 'train2014'
vocab_file = './data/vocabulary_Gref.txt'
data_folder = './' + dataset + '/' + setname + '_batch/'
data_prefix = dataset + '_' + setname
if not os.path.isdir(data_folder):
os.makedirs(data_folder)
if dataset == 'Gref':
refer = REFER('./external/refer/data', dataset = 'refcocog', splitBy = 'google')
elif dataset == 'unc':
refer = REFER('./external/refer/data', dataset = 'refcoco', splitBy = 'unc')
elif dataset == 'unc+':
refer = REFER('./external/refer/data', dataset = 'refcoco+', splitBy = 'unc')
else:
raise ValueError('Unknown dataset %s' % dataset)
refs = [refer.Refs[ref_id] for ref_id in refer.Refs if refer.Refs[ref_id]['split'] == setname]
vocab_dict = text_processing.load_vocab_dict_from_file(vocab_file)
n_batch = 0
for ref in refs:
im_name = 'COCO_' + im_type + '_' + str(ref['image_id']).zfill(12)
im = skimage.io.imread('%s/%s/%s.jpg' % (im_dir, im_type, im_name))
seg = refer.Anns[ref['ann_id']]['segmentation']
rle = cocomask.frPyObjects(seg, im.shape[0], im.shape[1])
mask = np.max(cocomask.decode(rle), axis = 2).astype(np.float32)
if 'train' in setname:
im = skimage.img_as_ubyte(im_processing.resize_and_pad(im, input_H, input_W))
mask = im_processing.resize_and_pad(mask, input_H, input_W)
if im.ndim == 2:
im = np.tile(im[:, :, np.newaxis], (1, 1, 3))
for sentence in ref['sentences']:
print('saving batch %d' % (n_batch + 1))
sent = sentence['sent']
text = text_processing.preprocess_sentence(sent, vocab_dict, T)
np.savez(file = data_folder + data_prefix + '_' + str(n_batch) + '.npz',
text_batch = text,
im_batch = im,
mask_batch = (mask > 0),
sent_batch = [sent])
n_batch += 1
评论列表
文章目录