Scrapy Spider MiddleWare 设置

# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
# # See documentation in:
from newrecord.settings import NOW_Y, NOW_M, NOW_D, YES_Y, YES_M, YES_D
from scrapy import signals
import time       
import base64                    # DownloadMiddleware                # 0 47 167 宝蓝色RGB
                                            # 在process_request   downloadmiddleware 中添加代理
                                            # proxy_user_pass = 'USERNAME:PASSWORD'
                                            # encoded_user_pass = base64.b64encode(proxy_user_pass)
                                            # request.headers['Proxy-Authorization']='Basic'+encoded_user_passwd
                                            # request.meta['proxy']='IP:PORT'
                   
                  
class NewrecordSpiderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.
                  
    @classmethod   
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()  
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s  
 
# 进入spider 的response 的数据
# 和 downloadmiddle里面的process_response类似
# 先去downloadmiddleware那里的process_response  再到这里处理:               
    def process_spider_input(self, response, spider):            # 处理进入spider中的response数据,但返回的是None 
        print('-----------------------3--------------------')         # 对response筛选之后不能阻止进入spider  啥用?try--except---Exception,   
        print('---进入spidermiddleware----process_spider_input------response.url----%s--------'%(response.url))
        # Called for each response that goes through the spider
        try:      
        # middleware and into the spider.
        # Should return None or raise an exception.
            return None
        except Exception as e:
            print(e)
                   
    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.
        # Must return an iterable of Request, dict or Item objects.
# result :经过parse_item 处理过后的输出结果,等于item数据也可以在这里处理,不过是在Pipline处理过后的数据
# parse_item 输出的结果先进入pipeline管道里去处理item数据最后回到process_spider_output这里,再就是关闭spider:
        for i in result:
            yield i
                   
    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.
                   
        # Should return either None or an iterable of Response, dict
        # or Item objects.
        pass       
                   
# 处理start_urls  后面的url无关: 否则这方法不会运行,只能是start_urls参数
# 并且def 里面的东西只能是process_start_requests
# 处理start_urls  与后面的url无关:
    def process_start_requests(self, start_urls, spider):
        # Called with the start requests of the spider, and works # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.
        # Must return only start_urls (not items).
        for r in start_urls:
            if str(r).find('rank_news') >= 0:
                print('---------------------0-----------------------------')
                print('-------------------进入Spider MiddleWare里面的开始爬去网页url-----------start_requests===:%s', r)
                yield r
                  
    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s ' %spider.name)
                  
                   
class NewrecordDownloaderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.
                   
    @classmethod   
    def from_crawler(cls, crawler):
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s   
                  
# Proxy-Authorization base64代理账户验证
# request.meta['proxy'] = "http://YOUR_PROXY_IP:PORT"
# encoded_user_pass = base64.b64encode(proxy_user_pass)
# request.headers['Proxy-Authorization'] = 'Basic ' + encoded_user_pass
# request.meta['proxy'] = ['127.0.0.1:8000']
# request.meta['item']=''       在request meta 数据里面增加数据 可以用来传参
# request(url, meta['item']=item[], callback= '')
# request.cookies['']=''    往request里面增加cookies
    def process_request(self, request, spider):
        print('---------------1------------------')
        print('----------------进入DownloadMiddleWare中的request的url是:%s----------------' %(request.url))
        return None
 
# return None: continue processing this exception             
# return a Response object: stops process_exception() chain   
# return a Request object: stops process_exception() chain    
    def process_response(self, request, response, spider):         # 处理所有爬过的网站的response,通过response.url  可以筛选                                                                                        
        print('-----------------------------2---------------------------------')    #  需要的爬取的网址,但这个在Rules里面更方便
       print('----------------进入DownloadMiddleWare中的response的url是:%s----------------' %(response.url))
        return response                                                              # 返回的response 进入spider 中的process_spider_input                     
                                                              
    def process_exception(self, request, exception, spider):        
        pass                                                  
                                                              
    def spider_opened(self, spider):                          
        spider.logger.info('Spider opened: %s' % spider.name)
                                                                              121       1,19         顶端

猜你喜欢

转载自www.cnblogs.com/cqq20111688/p/10100266.html