def split_ingr(x):
wnl=WordNetLemmatizer()
cleanlist=[]
lst = x.strip('[]').split(',')
cleanlist=[' '.join(wnl.lemmatize(word.lower()) for word in word_tokenize(re.sub('[^a-zA-Z]',' ',item))) for item in lst]
return cleanlist
#remove low-information words from ingredients, could use more
评论列表
文章目录