def tokenize(self, text): """ Performs tokenization in addition to normalization. """ return self.normalize(nltk.wordpunct_tokenize(text))