def __init__(self, mode=DatasetMode.small):
self.optimizer = \
Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
self.model = Sequential()
self.activation = 'softmax'
self.loss = 'categorical_crossentropy'
self.metrics = top_k_accuracy_func_list([50, 100, 200, 300, 400, 500])
early_stopping = EarlyStopping(monitor='val_loss', patience=3)
self.callbacks = [early_stopping]
'''
Index of songs in x_train(or test) starts from 1 because of zero padding.
Index of songs in y_train(or test) starts from zero like song hash.
For instance:
In dataset, index of songA is 21.
songA's index is 22 in x_train(or test)
songA's index is 21 in y_train(or test).
The goal is the neural network having the ability to ignore zero-paddings
'''
(x_train, y_train), (x_test, y_test), songs = load(mode)
self.max_length = max([len(playlist) for playlist in x_train])
self.song_hash = songs
self.x_train = np.asarray(sequence.pad_sequences(x_train, maxlen=self.max_length), dtype="int64")
self.y_train = to_categorical(y_train, len(self.song_hash) + 1) # Zero is included
self.x_test = np.asarray(sequence.pad_sequences(x_test, maxlen=self.max_length), dtype="int64")
self.y_test = to_categorical(y_test, len(self.song_hash) + 1) # Zero is included
评论列表
文章目录