scrapy 圣墟

# -*- coding: utf-8 -*-
import scrapy
from sx.items import SxItem


class SkSpider(scrapy.Spider):
    name = 'sk'
    allowed_domains = ['biqiuge.com']
    start_urls = ['https://www.biqiuge.com/book/4772/']

    def parse(self, response):

        for box in response.xpath("//div[@class='listmain']/dl/dd"):
            #print(box)
            a = box.xpath('./a/@href')
            b = box.xpath('./a/text()')
            url =  'https://www.biqiuge.com' + a.extract()[0]
            yield scrapy.Request(url,callback=self.parse_2)

    def parse_2(self, response):
        item = SxItem()
        title = content = response.xpath('//div[@class="content"]/h1/text()').extract()
        item['title']=title[0]
        content = response.xpath('//div[@id="content"]/text()').extract()

        allcontent = ''
        for i in content:
            allcontent = allcontent + i + '\n'

        item['content'] = allcontent
        yield item

settings.py配置文件,要加延迟设置

BOT_NAME = 'sx'

SPIDER_MODULES = ['sx.spiders']
NEWSPIDER_MODULE = 'sx.spiders'


ROBOTSTXT_OBEY = False


DOWNLOAD_DELAY = 3

DEFAULT_REQUEST_HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    'Accept-Language': 'en',
}



ITEM_PIPELINES = {
    'sx.pipelines.SxPipeline': 300,
}
class SxPipeline(object):
    def __init__(self):
        self.file = open('圣墟.txt','a+')
    def process_item(self, item, spider):

        str = item['content']
        self.file.write(item['title']+'\n')
        self.file.write(str)


        return item

猜你喜欢

转载自www.cnblogs.com/52shaidan/p/10185104.html