model.py 文件源码

python
阅读 25 收藏 0 点赞 0 评论 0

项目:tf_han 作者: AlbertXiebnu 项目源码 文件源码
def build(self, inputs, keep_prob, n_classes, word_embedding):

        inputs = tf.transpose(inputs,[1,0,2])
        inputs = tf.reshape(inputs,[-1,self.max_len])
        inputs = tf.split(0, self.max_sen, inputs)

        variable_dict = {
            "word_embedding_w": tf.get_variable(name="word_embedding",shape=[self.vocabsize,self.hiddensize],initializer=tf.constant_initializer(word_embedding),trainable=True),
            "attention_w" : tf.get_variable(name="word_attention_weights",shape=[2*self.rnnsize,2*self.rnnsize]),
            "attention_b" : tf.get_variable(name="word_attention_bias",shape=[2*self.rnnsize]),
            "attention_c" : tf.get_variable(name="word_attention_context",shape=[2*self.rnnsize,1]),
        }

        sent_embeddings = []
        with tf.variable_scope("embedding_scope") as scope:
            for x in inputs:
                embedding = self.sentence_embedding(x,keep_prob,variable_dict)
                sent_embeddings.append(embedding)
                scope.reuse_variables()

        with tf.variable_scope('forward'):
            lstm_fw_cell = rnn_cell.DropoutWrapper(rnn_cell.BasicLSTMCell(self.docsize,forget_bias=1.0,state_is_tuple=True),input_keep_prob=keep_prob,output_keep_prob=keep_prob)
        with tf.variable_scope('backward'):
            lstm_bw_cell = rnn_cell.DropoutWrapper(rnn_cell.BasicLSTMCell(self.docsize,forget_bias=1.0,state_is_tuple=True),input_keep_prob=keep_prob,output_keep_prob=keep_prob)
        outputs, _ , _ = rnn.bidirectional_rnn(lstm_fw_cell,lstm_bw_cell,sent_embeddings,dtype=tf.float32)

        atten_variable_dict = {
            "attention_w" : tf.get_variable(name="sent_attention_weights", shape=[2*self.docsize,2*self.docsize]),
            "attention_b" : tf.get_variable(name="sent_attention_bias", shape=[2*self.docsize]),
            "attention_c" : tf.get_variable(name="sent_attention_context", shape=[2*self.docsize,1]),
        }

        att = self.attention_layer(outputs,atten_variable_dict)
        # full connected layer
        W = tf.get_variable("fullconnect_weights",shape=[2 * self.docsize,n_classes])
        B = tf.get_variable("fullconnect_bias",shape=[n_classes])
        output = tf.add(tf.matmul(att,W),B,name="output")
        return output
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号