def train():
#???
learning_rate = 0.01
x = tf.placeholder(tf.float32)
y = tf.placeholder(tf.float32)
net_out = inference(x)
#???????op
loss = tf.square(net_out - y)
#?????????????
opt = tf.train.GradientDescentOptimizer(learning_rate)
train_op = opt.minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
print("start traing....")
for i in range(1000000):
train_x, train_y = get_train_data()
sess.run(train_op, feed_dict={x: train_x, y: train_y})
if i % 10000 == 0:
times = int(i / 10000)
test_x_ndarray = np.arange(0, 2 * np.pi, 0.01)
test_y_ndarray = np.zeros([len(test_x_ndarray)])
ind = 0
for test_x in test_x_ndarray:
test_y = sess.run(net_out, feed_dict={x: test_x, y: 1})
np.put(test_y_ndarray, ind, test_y)
ind += 1
# ??????sin??????
# ??????????????sin?????
draw_correct_line()
pylab.plot(test_x_ndarray,test_y_ndarray,'--', label = str(times) + 'times' )
pylab.show()
评论列表
文章目录