koth_lstm.py 文件源码

python
阅读 21 收藏 0 点赞 0 评论 0

项目:deeplearning 作者: fanfanfeng 项目源码 文件源码
def __init__(self):
        self.embeddingSize = nlp_segment.flags.embedding_size
        self.num_tags = nlp_segment.flags.num_tags
        self.num_hidden = nlp_segment.flags.num_hidden
        self.learning_rate = nlp_segment.flags.learning_rate
        self.batch_size = nlp_segment.flags.batch_size
        self.model_save_path = nlp_segment.model_save_path

        self.input = tf.placeholder(tf.int32,
                                  shape=[None, FLAGS.max_sentence_len],
                                  name="input_placeholder")

        self.label = tf.placeholder(tf.int32,
                                    shape=[None, FLAGS.max_sentence_len],
                                    name="label_placeholder")
        self.dropout = tf.placeholder(tf.float32,name="dropout")

        with tf.name_scope("embedding_layer"):
            self.word_embedding = tf.Variable(data_loader.load_w2v(nlp_segment.word_vec_path), name="word_embedding")
            inputs_embed = tf.nn.embedding_lookup(self.word_embedding,self.input)
            length = self.length(self.input)
            self.length_64 = tf.cast(length, tf.int64)
            reuse = None #if self.trainMode else True


            # if trainMode:
            #  word_vectors = tf.nn.dropout(word_vectors, 0.5)
            with tf.name_scope("rnn_fwbw") as scope:
                lstm_fw = rnn.LSTMCell(self.num_hidden,use_peepholes=True)
                lstm_bw = rnn.LSTMCell(self.num_hidden,use_peepholes=True)

                inputs = tf.unstack(inputs_embed, nlp_segment.flags.max_sentence_len, 1)
                outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw, lstm_bw, inputs, sequence_length=self.length_64,
                                                            dtype=tf.float32)
            output = tf.reshape(outputs, [-1, self.num_hidden * 2])
            #if self.trainMode:
            output = tf.nn.dropout(output, self.dropout)

        with tf.variable_scope('Softmax') as scope:
            self.W = tf.get_variable(shape=[self.num_hidden * 2, self.num_tags],
                                     initializer=tf.truncated_normal_initializer(stddev=0.01),
                                     name='weights',
                                     regularizer=l2_regularizer(0.001))
            self.b = tf.Variable(tf.zeros([self.num_tags], name='bias'))
            matricized_unary_scores = tf.matmul(output, self.W) + self.b
            # matricized_unary_scores = tf.nn.log_softmax(matricized_unary_scores)
            self.unary_scores = tf.reshape(
                matricized_unary_scores,
                [-1, FLAGS.max_sentence_len, self.num_tags])
        with tf.name_scope("crf"):
            self.transition_params = tf.get_variable(
                "transitions",
                shape=[self.num_tags, self.num_tags],
                initializer=self.initializer)
            log_likelihood, self.transition_params = crf.crf_log_likelihood(self.unary_scores, self.label, self.length_64,self.transition_params)
        self.loss = tf.reduce_mean(-log_likelihood)
        self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
        self.saver = tf.train.Saver()
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号