Tieba_search.py 文件源码

python
阅读 27 收藏 0 点赞 0 评论 0

项目:Crawlers 作者: mi-minus 项目源码 文件源码
def start_requests(self):

        #####################################################################################
        # topic_dict = {'1':[u'??', u'??'], '2':[u'??',u'??']}
        topic_dict = {'1':[u'??'], '2':[u'??'], '3':[u'????'], '4':[u'??']}

        index = 0
        for id, kws_list in topic_dict.iteritems():
            for kw in kws_list:
                print kw
                wd_code = urllib.quote(kw.encode('gbk'))
                search_url = 'http://tieba.baidu.com/f/search/res?isnew=1&kw=&qw='+wd_code+'&un=&rn=10&pn=0&sd=&ed=&sm=1&only_thread=1'
                                # http://tieba.baidu.com/f/search/res?isnew=1&kw=&qw=%B1%B1%BE%A9&un=&rn=10&pn=0&sd=&ed=&sm=1&only_thread=1
                                # http://tieba.baidu.com/f/search/res?isnew=1&kw=&qw=%B1%B1%BE%A9&un=&rn=10&pn=0&sd=&ed=&sm=1
                # print search_url
                self.Flag_List.append(True)
                self.Maxpage_List.append(self.MAX_PAGE_NUM)
                print search_url
                yield scrapy.Request(search_url,meta={'topic_id': id,'index':index, 'kw':kw},)
                index += 1

        #####################################################################################
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号