def trainMLPWithData(self, input_vector, label_vector, printSteps = 250):
percent_split = 0.7
trX, teX, trY, teY = cross_validation.train_test_split(input_vector,
label_vector, test_size=(1.0-percent_split), random_state=0)
n_inputs = 10
n_outputs = 8
X = tf.placeholder("float", [None, n_inputs])
Y = tf.placeholder("float", [None, n_outputs])
w_h = tf.Variable(tf.random_normal([n_inputs, 10], stddev=0.01))
w_o = tf.Variable(tf.random_normal([10, n_outputs], stddev=0.01))
p_keep_input = tf.placeholder("float")
p_keep_hidden = tf.placeholder("float")
X = tf.nn.dropout(X, p_keep_input)
h = tf.nn.relu(tf.matmul(X, w_h))
h = tf.nn.dropout(h, p_keep_hidden)
py_x = tf.matmul(h, w_o)
learnRate = 0.01
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(py_x, Y))
train_step = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)
#train_step = tf.train.GradientDescentOptimizer(learnRate).minimize(cost)
# Add accuracy checking nodes
tf_correct_prediction = tf.equal(tf.argmax(py_x,1), tf.argmax(teY,1))
tf_accuracy = tf.reduce_mean(tf.cast(tf_correct_prediction, "float"))
# Init variables
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
k=[]
for i in range(10000):
sess.run(train_step, feed_dict={X: trX, Y: trY, p_keep_input: 0.8, p_keep_hidden: 0.5})
result = sess.run(tf_accuracy, feed_dict={X: teX, Y: teY, p_keep_input: 1.0, p_keep_hidden: 1.0})
# Save data
k.append(result)
if (i % printSteps == 0):
print("Run {},{}".format(i,result))
k=np.array(k)
print("Max accuracy: {}".format(k.max()))
print(('MLP training with %s datapoints :: Done \n\n') % (len(input_vector)))
self.trainedModel = sess
return (self.trainedModel, k.max())
评论列表
文章目录