一个自定义分布式爬虫框架。

一个分布式爬虫框架。比scrapy简单很多,不需要各种item pipeline middwares spider settings run文件之间来回切换写代码,这只需要一个文件,开发时候可以节约很多时间,形式非常松,需要重写一个方发,自己想怎么解析入库都可以,不需要定义item和写pipeline存储。自带的RequestClient支持cookie简单操作,支持一键切换ip代理的使用方式,不需要写这方面的中间件。

# coding=utf-8
from collections import OrderedDict
import abc
import json
import time
import queue
# noinspection PyUnresolvedReferences
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from threading import Lock
# noinspection PyUnresolvedReferences
from app.utils_ydf import LoggerMixin, MongoMixin, RedisMixin, RequestClient, decorators, RedisBulkWriteHelper, RedisOperation, MongoBulkWriteHelper, MysqlBulkWriteHelper


class BoundedThreadPoolExecutor(ThreadPoolExecutor):
    def __init__(self, max_workers=None, thread_name_prefix=''):
        super().__init__(max_workers, thread_name_prefix)
        self._work_queue = queue.Queue(max_workers * 2)


class StatusError(Exception):
    pass


lock = Lock()


class BaseCustomSpider(LoggerMixin, MongoMixin, RedisMixin, metaclass=abc.ABCMeta):
    """
    一个精简的自定义的基于reids任务调度的分布式基础爬虫框架(所谓分布式就是可以水平扩展,一台机器开启多进程不需要修改代码或者多次重复启动python程序,以及多个机器都可以启动此程序,任何节点都可以是生产者或消费者)。子类只需要几行重写_request_and_extract方法,就可以快速开发并发 分布式的爬虫项目,比scrapy简单很多。
    用法BookingListPageSpider继承BaseCustomSpider,重写_request_and_extract完成解析和入库。以下为启动方式。
    BookingListPageSpider('booking:listpage_urls', threads_num=500).set_request_timeout(100).set_request_proxy('kuai').start_craw()  # start_craw是非阻塞的命令,可以直接在当前主线程再运行一个详情页的spider

    """

    def __init__(self, seed_key: str, request_method='get', threads_num=100, proxy_name='kuai'):
        """
        :param seed_key: redis的seed键
        :param request_method: 请求方式get或者post
        :param threads_num:request并发数量
        :param proxy_name:可为None, 'kuai', 'abuyun', 'crawlera',为None不使用代理
        """
        self.__check_proxy_name(proxy_name)
        self._seed_key = seed_key
        self._request_metohd = request_method
        self._proxy_name = proxy_name
        self.theadpool = BoundedThreadPoolExecutor(threads_num)
        self._initialization_count()
        self._request_headers = None
        self._request_timeout = 60
        self._max_request_retry_times = 50

    @staticmethod
    def __check_proxy_name(proxy_name):
        if proxy_name not in (None, 'kuai', 'abuyun', 'crawlera'):
            raise ValueError('设置的代理ip名称错误')

    def _initialization_count(self):

        self._t1 = time.time()
        self._request_count = 0
        self._request_success_count = 0

    def set_max_request_retry_times(self, max_request_retry_times):
        self._max_request_retry_times = max_request_retry_times
        return self

    def set_request_headers(self, headers: dict):
        """
        self.request_headers = {'user-agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}
        """
        self._request_headers = headers
        return self  # 使其可以链式操作

    def set_request_timeout(self, timeout: float):
        self._request_timeout = timeout
        return self

    def set_request_proxy(self, proxy_name):
        self.__check_proxy_name(proxy_name)
        self._proxy_name = proxy_name
        return self

    def __calculate_count_per_minute(self, flag):
        with lock:
            if time.time() - self._t1 > 60:
                # _request_count, _request_success_count = self._request_count, self._request_success_count
                self.logger.info(f'一分钟内请求了 {self._request_count}次  成功了 {self._request_success_count}次, {self._seed_key} 键还有 {self.redis_db7.scard(self._seed_key)} 个种子')
                self._initialization_count()
            if flag == 0:
                self._request_count += 1
            if flag == 1:
                self._request_success_count += 1

    def start_craw(self):
        [self._schedu_a_task() for _ in range(20)]  # 如果是读取外网远程reids,获取任务会有一些网络延迟,开10个线程。

    @decorators.tomorrow_threads(300)
    @decorators.keep_circulating(time_sleep=1)
    def _schedu_a_task(self):
        while True:
            seed_bytes = self.redis_db7.spop(self._seed_key)
            if seed_bytes:
                seed_dict = json.loads(seed_bytes)
                self.theadpool.submit(self.__request_and_extract, seed_dict['url'], meta=seed_dict)
            else:
                self.logger.debug(f'redis的 {self._seed_key} 键是空的')
                time.sleep(2)

    # @decorators.handle_exception(50, )
    def _dispacth_request(self, url, current_url_request_times=0, data: dict = None):
        # self.__calculate_count_per_minute(0)
        """
        :param url: 请求url
        :param current_url_request_times:
        :param data: post亲戚逇数据
        :return:
        """
        if current_url_request_times < self._max_request_retry_times:
            if current_url_request_times > 0:
                pass
                # self.logger.debug(current_url_request_times)
            # noinspection PyBroadException
            try:
                resp = RequestClient(self._proxy_name, timeout=self._request_timeout).request_with_proxy(method=self._request_metohd, url=url, headers=self._request_headers, data=data)  # 使用快代
            except Exception as e:
                self.logger.error(f'request请求网络错误的原因是: {e}')
                self.__calculate_count_per_minute(0)
                return self._dispacth_request(url, current_url_request_times + 1)
            else:
                if resp.status_code == 200:
                    self.__calculate_count_per_minute(0)
                    self.__calculate_count_per_minute(1)
                    return resp
                else:
                    self.logger.critical(f'返回状态是 {resp.status_code}  --> {url}')
                    self.__calculate_count_per_minute(0)
                    return self._dispacth_request(url, current_url_request_times + 1)
        else:
            self.logger.critical(f'请求 {url} 达到最大返回次数后,仍然失败')
            return f'请求 {url} 达到最大返回次数后,仍然失败'

    def put_seed_task_to_redis(self, redis_key: str, seed_dict: OrderedDict):
        """
        添加种子或任务到redis中
        :param redis_key: 种子/任务在redis的键
        :param seed_dict: 任务,必须是一个有序字典类型,不能用字典,否则会插入相同的任务到redis中。字典中需要至少包含一个名叫url的键,可以添加其余的键用来携带各种初始任务信息。
        :return:
        """
        seed_str = json.dumps(seed_dict)
        # self.redis_db7.sadd(redis_key, seed_str)
        RedisBulkWriteHelper(self.redis_db7, threshold=50).add_task(RedisOperation('sadd', redis_key, seed_str))

    def __request_and_extract(self, url, meta: OrderedDict):  # 主要threadpoolexcutor没有毁掉结果时候会不记录错误,错误被隐藏了
        # noinspection PyBroadException
        try:
            self._request_and_extract(url, meta)
        except Exception as e:
            self.logger.exception(f'发生解析错误的url是 {url}  \n {e}')

    @abc.abstractmethod
    def _request_and_extract(self, url, meta: OrderedDict):
        """
        子类需要重写此方法,完成解析和数据入库或者加入提取的url二次链接和传递的参数到redis的某个键。爬虫需要多层级页面提取的,重新实例化一个此类运行即可。
        :param url:
        :param meta:
        :return:
        """
        """
        必须使用_dispacth_request方法来请求url,不要直接使用requests,否则不能够对请求错误成自动重试和每分钟请求数量统计
        response = self._dispacth_request(url)
        print(response.text)
        """
        raise NotImplementedError

写法,只要简单几行就可以,想怎么解析和入库都随你,没有任何约束。

class ExpediaEnglishEdetailSpider(BaseCustomSpider):
    def _request_and_extract(self, url, meta: dict):
        response = self._dispacth_request(url)
        item = dict()
        item['_id'] = meta['_id']
        item['d_country_en'] = re_search_group('<span class="country">(.*?)</span>', response.text)
        item['d_adress_en'] = re_search_group('<h3>Location</h3>[\s\S]*?<p>(.*?)</p>', response.text)
        item['d_adress_street_en'] = re_search_group('<span class="street-address">(.*?)</span>', response.text)
        item.update({'update_time': datetime.datetime.now()})
        self.logger.warning(f'更新item --> {item}')
        MongoBulkWriteHelper(self.mongo_199_client.get_database('hotel').get_collection('expedia_hotel_ydf3'), 100).add_task(UpdateOne({'_id': item['_id']}, {'$set': item},upsert=True))

 

运行方式:

ExpediaEnglishEdetailSpider('expedia:tasks', threads_num=500).set_request_timeout(100).set_request_proxy('kuai').start_craw()


测试单核单进程每分钟可以请求两万次,每分钟最大的具体请求次数与网速/网站响应速度/内容大小有关。


猜你喜欢

转载自www.cnblogs.com/ydf0509/p/9787669.html
今日推荐