def load_retures_keras():
from keras.preprocessing.text import Tokenizer
from keras.datasets import reuters
max_words = 1000
print('Loading data...')
(x, y), (_, _) = reuters.load_data(num_words=max_words, test_split=0.)
print(len(x), 'train sequences')
num_classes = np.max(y) + 1
print(num_classes, 'classes')
print('Vectorizing sequence data...')
tokenizer = Tokenizer(num_words=max_words)
x = tokenizer.sequences_to_matrix(x, mode='binary')
print('x_train shape:', x.shape)
return x.astype(float), y
评论列表
文章目录