Reading web crawlers simple case of the CrawlSpider

Under the project name py file

class DsSpider(CrawlSpider):
    name = 'ds'
    allowed_domains = ['dushu.com']
    start_urls = ['https://www.dushu.com/book/1163_1.html']

    rules = (
        Rule(LinkExtractor(restrict_xpaths='//div[@class="pages"]'), callback='parse_item', follow=True),
    )

    def parse_item(self, response):
        item = {}
        # print(response.url)
        lis = response.xpath('//div[@class="bookslist"]/ul/li')
        for li in lis:
            item['name'] = li.xpath('.//h3/a/text()').extract_first()
            item['link'] = li.xpath('.//h3/a/@href').extract_first()
            item['author'] = li.xpath('.//p[1]/a/text()').extract_first()
        #item['domain_id'] = response.xpath('//input[@id="sid"]/@value').get()
        #item['name'] = response.xpath('//div[@id="name"]').get()
        #item['description'] = response.xpath('//div[@id="description"]').get()
            yield item

 

Guess you like

Origin www.cnblogs.com/zry-yt/p/11743410.html