def train_mlp1(x_train, y_train, x_test, y_test, input_dim, num_classes=24):
"""
:param x_train:
:param y_train:
:param x_test:
:param y_test:
:param input_dim:
:param num_classes:
:return:
"""
model = Sequential()
model.add(Dense(512, input_dim=input_dim))
model.add(Activation('relu')) # An "activation" is just a non-linear function applied to the output of the layer
# above. Here, with a "rectified linear unit", we clamp all values below 0 to 0.
model.add(Dropout(0.1)) # Dropout helps protect the model from memorizing or "overfitting" the training data
model.add(Dense(1024))
model.add(Activation('relu'))
model.add(Dropout(0.1))
model.add(Dense(386))
model.add(Activation('relu'))
model.add(Dropout(0.1))
model.add(Dense(num_classes))
model.add(Activation256('softmax')) # This special "softmax" activation among other things,
# ensures the output is a valid probability distribution, that is
# that its values are all non-negative and sum to 1.
model.compile(loss='categorical_crossentropy', optimizer=Adamax(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=1e-5), metrics=["accuracy"])
model.fit(x_train, y_train,
batch_size=40, nb_epoch=16, verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=1)
return score[1]
classification_mlp.py 文件源码
python
阅读 18
收藏 0
点赞 0
评论 0
评论列表
文章目录