def tokenize_sentences(text): import nltk.data sent_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') return sent_tokenizer.tokenize(text)