def split_words(self, sentence: str) -> List[Token]:
# Import is here because it's slow, and by default unnecessary.
from nltk.tokenize import word_tokenize
return [Token(t) for t in word_tokenize(sentence.lower())]
评论列表
文章目录