# A school beauty network to download pictures of the case, also suitable for processing large files, multiple files of video, audio processing
Engineering process -
scrapy startproject xx
cd xx
scrapy genspider HH www.xx.com
reptile execution scrapy crawl hh
import scrapy from yyl.items import YylItem class ExampleSpider(scrapy.Spider): name = 'xiaohua' # allowed_domains = ['example.com'] start_urls = ['http://www.521609.com/daxuemeinv/'] def parse(self, response): li_lst = response.xpath('//*[@id="content"]/div[2]/div[2]/ul/li') # print(li_lst) for li in li_lst: item = YylItem() #实例化 # Item [ 'the src'] = '{} http://www.521609.com' .format (li.xpath ( './a/img/@src') .extract_first ()) Item [ ' the src ' ] = ' http://www.521609.com ' + li.xpath ( ' ./a/img/@src ' ) .extract_first () # splicing full address yield Item # pipeline responsible for download url video archive large files downloaded mechanism
import scrapy class YylItem(scrapy.Item): # define the fields for your item here like: src = scrapy.Field()
Import scrapy from scrapy.pipelines.images Import ImagesPipeline class YylPipeline (Object): DEF process_item (Self, Item, Spider): Print (Item) return Item # using a scrapy a packaged specifically for large pipe class files downloaded class ImgPipeline (ImagesPipeline): # requesting large file DEF get_media_requests (Self, Item, info): the yield scrapy.Request (Item URL = [ ' the src ' ]) # for designating the name of the downloaded file DEF file_path (Self, request , the Response = None, info = None): URL = request.url filename = url.split('/')[-1] return filename # def item_completed(self, results, item, info): print(results) # 结果 True,{url path checksum} return item # process_item中的return item 作用一致
BOT_NAME = 'yyl' SPIDER_MODULES = ['yyl.spiders'] NEWSPIDER_MODULE = 'yyl.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36' # Obey robots.txt rules ROBOTSTXT_OBEY = False # Configure maximum concurrent requests performed by Scrapy (default: 16) #CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0) # See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs #DOWNLOAD_DELAY = 3 # The download delay setting will honor only one of: #CONCURRENT_REQUESTS_PER_DOMAIN = 16 #CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default) #COOKIES_ENABLED = False # Disable Telnet Console (enabled by default) #TELNETCONSOLE_ENABLED = False # Override the default request headers: #DEFAULT_REQUEST_HEADERS = { # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Language': 'en', #} # Enable or disable spider middlewares # See https://doc.scrapy.org/en/latest/topics/spider-middleware.html #SPIDER_MIDDLEWARES = { # 'yyl.middlewares.YylSpiderMiddleware': 543, #} # Enable or disable downloader middlewares # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html #DOWNLOADER_MIDDLEWARES = { # 'yyl.middlewares.YylDownloaderMiddleware': 543, #} # Enable or disable extensions # See https://doc.scrapy.org/en/latest/topics/extensions.html #EXTENSIONS = { # 'scrapy.extensions.telnet.TelnetConsole': None, #} # Configure item pipelines # See https://doc.scrapy.org/en/latest/topics/item-pipeline.html ITEM_PIPELINES = { 'yyl.pipelines.YylPipeline': 301, 'yyl.pipelines.ImgPipeline': 300, } IMAGES_STORE = './imgs' LOG_LEVEL = 'ERROR'
Reproduced in: https: //www.cnblogs.com/zhangchen-sx/p/11023198.html