softmax.py 文件源码

python
阅读 27 收藏 0 点赞 0 评论 0

项目:MovieComment2Rating 作者: yaokai1117 项目源码 文件源码
def __init__(self, sent_length, class_num, embedding_size, l2_lambda):
        self.input_x = tf.placeholder(tf.float32, [None, sent_length, embedding_size], name="input_x")
        self.input_y = tf.placeholder(tf.float32, [None, class_num], name="input_y")
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

        l2_loss = tf.constant(0.0)

        with tf.name_scope("flat"):
            self.flatted = tf.reshape(self.input_x, [-1, sent_length * embedding_size])

        with tf.name_scope("linear"):
            weights = tf.get_variable(
                "W",
                shape=[sent_length * embedding_size, class_num],
                initializer=tf.contrib.layers.xavier_initializer())
            bias = tf.Variable(tf.constant(0.1, shape=[class_num]), name="b")
            l2_loss += tf.nn.l2_loss(weights)
            l2_loss += tf.nn.l2_loss(bias)
            self.linear_result = tf.nn.xw_plus_b(self.flatted, weights, bias, name="linear")
            self.predictions = tf.arg_max(self.linear_result, 1, name="predictions")

        with tf.name_scope("loss"):
            losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.linear_result, labels=self.input_y)
            self.loss = tf.reduce_mean(losses) + l2_lambda * l2_loss

        with tf.name_scope("accuracy"):
            correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
            self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号