model.py 文件源码

python
阅读 22 收藏 0 点赞 0 评论 0

项目:deeplearning 作者: zxjzxj9 项目源码 文件源码
def train(self):

        optimizer = tf.train.AdamOptimizer(learning_rate = self.config.learning_rate, \
                    beta1 = 0.9, beta2 = 0.999).minimize(self.loss)

        #grads = optimizer.compute_gradients(self.loss)
        #for i, (g, v) in enumerate(grads):
        #    if g is not None:
        #        grads[i] = (tf.clip_by_norm(g, 5), v)
        #train_op = optimizer.apply_gradients(grads)

        self.sess = tf.Session()
        s = self.sess

        writer = tf.summary.FileWriter("./log", graph = s.graph)
        tf.summary.scalar("loss", self.loss)


        merged_summary = tf.summary.merge_all()
        cnt_total = 0
        s.run(tf.global_variables_initializer())

        for epoch in range(self.config.epoch_num):
            print("In epoch %d " %epoch)
            cnt = 0

            for img, label, seq_len in self.datasrc.get_iter(16, self.config.batch_size):

                loss, _, summary = s.run([self.loss, optimizer, merged_summary], feed_dict = { \
                    self.input : img,
                    self.output : tf.SparseTensorValue(*label),
                    self.seq_len : [self.config.split_num]*len(seq_len),
                #    self.seq_len : seq_len,
                    self.keep_prob : 1.0,
                })

                #print("loss %f" %loss)

                writer.add_summary(summary, cnt_total)
                sys.stdout.write("Current loss: %.3e, current batch: %d \r" %(loss,cnt))
                cnt += 1
                cnt_total += 1

            if epoch % self.config.nsave == self.config.nsave - 1:
                self.saver.save(s, "./log/model_epoch_%d.ckpt" %(epoch + 1))
        print("")
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号