def respond(sentences):
tokenized_sentence = sent_tokenize(sentences)
stop_words = set(stopwords.words("english")) # Getting the stop words from the Local DB
if len(tokenized_sentence) > 1: # if the length of the tokenized sentence is greater than one
# for sentence in tokenized_sentence:
# words = word_tokenize(sentence) # Each word is tokenized
pos_tagged = parts_of_speechtag(sentences)
print(tuple(pos_tagged))
# filtered_words = [w for w in words if w not in stop_words] # removing the additional stop words for
# portStemer_object = PorterStemmer()
# filtered_steam_words = [portStemer_object.stem(w) for w in filtered_words]
# return filtered_steam_words
else:
pos_tagged = parts_of_speechtag(sentences)
print(type(pos_tagged))
# words = word_tokenize(sentences)
# filtered_words = [w for w in words if w not in stop_words]
# portStemer_object = PorterStemmer()
# filtered_steam_words = [portStemer_object.stem(w) for w in filtered_words]
#return filtered_steam_words
评论列表
文章目录