def parse(self, response):
self.logger.info('parse: %s' % response)
is_no_update = False
# Get list of news from the current page
articles = response.css('div.view-front > div.view-content > div.views-row')
if not articles:
raise CloseSpider('article not found')
for article in articles:
# Close the spider if we don't find the list of urls
url_selectors = article.css('span.field-content a::attr(href)')
if not url_selectors:
raise CloseSpider('url_selectors not found')
url = url_selectors.extract()[0]
# Example '19 Oct 2016'
info_selectors = article.css('span.field-content::text')
if not info_selectors:
raise CloseSpider('info_selectors not found')
info_time = info_selectors.extract()[1].strip()
# Parse date information
try:
published_at_wib = datetime.strptime(info_time, '%d %b %Y')
except ValueError as e:
raise CloseSpider('cannot_parse_date: %s' % e)
published_at = wib_to_utc(published_at_wib)
if self.media['last_scraped_at'] >= published_at:
is_no_update = True
break
# For each url we create new scrapy request
yield Request('http://www.qureta.com' + url, callback=self.parse_news)
if is_no_update:
self.logger.info('Media have no update')
return
if response.css('li.next'):
next_page_url = response.css('li.next > a::attr(href)')[0].extract()
yield Request('http://www.qureta.com' + next_page_url, callback=self.parse)
# Collect news item
评论列表
文章目录