def main(params):
input_train_json = json.load(open(params['input_train_json'], 'r'))
print("Load spaCy with GloVe vectors")
nlp = spacy.load('en', vectors='en_glove_cc_300_1m_vectors')
words_to_keep = build_vocab(
nlp.tokenizer,
[img['question'] for img in input_train_json],
int(params['word_count_threshold']))
vectors = sense2vec.vectors.VectorMap(nlp.vocab.vectors_length)
for string in words_to_keep:
word = nlp.vocab[string]
vectors.borrow(word.orth_, 1, numpy.ascontiguousarray(word.vector))
replaced = 0
paraphrases = []
for i, word in enumerate(nlp.vocab):
if word.orth_ in words_to_keep:
word.norm_ = word.orth_
elif word.lower_ in words_to_keep:
word.norm_ = word.lower_
elif word.is_alpha and word.has_vector:
vector = numpy.ascontiguousarray(word.vector, dtype='float32')
synonyms, scores = vectors.most_similar(vector, 1)
word.norm_ = synonyms[0]
paraphrases.append((word.orth_, word.norm_))
else:
word.norm_ = word.shape_
if i and i % 10000 == 0:
print(i, 'words processed. Example: %s --> %s' % random.choice(paraphrases))
print('%d vector-based paraphrases' % len(paraphrases))
if not os.path.exists(params['spacy_data']):
os.mkdir(params['spacy_data'])
if not os.path.exists(os.path.join(params['spacy_data'], 'vocab')):
os.mkdir(os.path.join(params['spacy_data'], 'vocab'))
if not os.path.exists(os.path.join(params['spacy_data'], 'tokenizer')):
os.mkdir(os.path.join(params['spacy_data'], 'tokenizer'))
nlp.vocab.dump(os.path.join(params['spacy_data'], 'vocab', 'lexemes.bin'))
with io.open(os.path.join(params['spacy_data'], 'vocab', 'strings.json'), 'w',
encoding='utf8') as file_:
nlp.vocab.strings.dump(file_)
评论列表
文章目录