dnn_gru.py 文件源码

python
阅读 16 收藏 0 点赞 0 评论 0

项目:Deep-Query-Classificaion 作者: georgeiswang 项目源码 文件源码
def Forward(self, sess):
        #lstm= tf.nn.rnn_cell.BasicGRUCell(200, forget_bias=1.0)
        gru=tf.nn.rnn_cell.GRUCell(200)
        state=tf.zeros([200,400])# batch size, state_num=2*step_size
        x_in_batch = tf.transpose(self.x_in, [1, 0, 2])  # change to 20*1*200
        x_in = tf.reshape(x_in_batch, [-1, 200])  # change to 20*200
        x_in = tf.split(0, 20, x_in)  # this will return a list, i.e. 20 sequences of 1*200

        if self.i == 0:
            with tf.variable_scope('output'):
                output_gru, state= rnn.rnn(gru, x_in, dtype=tf.float32)#200*1
        else:
            with tf.variable_scope('output', reuse=True):
                output_gru, state= rnn.rnn(gru, x_in, dtype=tf.float32)
        self.i+=1

        output_gru = output_gru[-1]
        lin_h=tf.matmul(output_gru,self.hiddenLayer.W)+self.hiddenLayer.b
        #x_in=1*200, W=200*200

        reg_h = tf.reduce_sum(tf.gather(self.reg_lookup_table, self.reg_x), 0)#Num*200
        print "reg_h is"
        print reg_h
        h = self.activation(lin_h + tf.cast(reg_h,tf.float32))#1*200

        lin_output_pre = tf.matmul(h, self.outputLayer.W) + self.outputLayer.b
        lin_output = tf.nn.dropout(lin_output_pre, keep_prob=0.6)

        #h=1*200, outputLayer.W=200*63, lin_outupt=1*63
        #re.W:19156*63
        reg_output = tf.reduce_sum(tf.gather(self.skip_layer_re.W, self.reg_x), 0) + self.skip_layer_re.b
        print reg_output
        #x_in=1*200. ae.W=200*63
        ae_output = tf.matmul(x_in[-1], self.skip_layer_ae.W) + self.skip_layer_ae.b

        output = tf.nn.softmax(lin_output + ae_output + reg_output)#XXX*63

        print output
        return output
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号