dmoz_spider.py 文件源码

python
阅读 27 收藏 0 点赞 0 评论 0

项目:Crawling-SinaWeibo 作者: Uzumaki-C 项目源码 文件源码
def parse(self, response):
        selector = Selector(response)
        text0 = selector.xpath('body/div[@class="u"]/div[@class="tip2"]').extract_first()
        info = InfoItem()
        if text0:
            num_tweets = re.findall(u'\u5fae\u535a\[(\d+)\]', text0)  # ???
            num_follows = re.findall(u'\u5173\u6ce8\[(\d+)\]', text0)  # ???
            num_fans = re.findall(u'\u7c89\u4e1d\[(\d+)\]', text0)  # ???

            if num_tweets:
                info["num_tweets"] = int(num_tweets[0])
            if num_follows:
                info["num_follows"] = int(num_follows[0])
            if num_fans:
                info["num_fans"] = int(num_fans[0])

        url_information1 = "http://weibo.cn/%s/info" % self.next_ID[-1]
        yield Request(url=url_information1, meta={"item":info,"ID":self.next_ID[-1]}, dont_filter=True, callback=self.parse1)


        # ???????????
        if random.random() > float(info["num_follows"])/(info["num_follows"] + info["num_fans"]):
            try:
                url_fans = "http://weibo.cn/%s/fans" % self.next_ID[-1]
                yield Request(url=url_fans, dont_filter=True, callback=self.parse3)  # ????
            except:
                url_follows = "http://weibo.cn/%s/follow" % self.next_ID[-1]
                yield Request(url=url_follows, dont_filter=True, callback=self.parse3)  # ?????
        else:
            try:
                url_follows = "http://weibo.cn/%s/follow" % self.next_ID[-1]
                yield Request(url=url_follows, dont_filter=True, callback=self.parse3)  # ?????
            except:
                url_fans = "http://weibo.cn/%s/fans" % self.next_ID[-1]
                yield Request(url=url_fans, dont_filter=True, callback=self.parse3)  # ????
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号