简单分布式爬虫

这里写图片描述

控制节点

URL管理器

import pickle
import hashlib

class UrlManager():
    def __init__(self):
        self.new_urls=self.load_process('new_urls.txt')
        self.old_urls=self.load_process('old_urls.txt')

    def has_new_url(self):
        return self.new_url_size()!=0

    def get_new_url(self):
        new_url=self.new_urls.pop()
        m=hashlib.md5()
        m.update(new_url)
        self.old_urls.add(m.hexdigest()[8:-8])
        return new_url

    def add_new_url(self,url):
        if url is None:
            return
        m=hashlib.md5()
        m.update(url)
        url_md5=m.hexdigest()[8:-8]
        if url not in self.new_urls and url_md5 not in self.old_urls:
            self.new_urls.add(url)

    def add_new_urls(self,urls):
        if urls is None or len(urls)==0:
            return
        for url in urls:
            self.add_new_url(url)

    def new_url_size(self):
        return len(self.new_urls)

    def old_url_size(self):
        return len(self.old_urls)

    def save_progress(self,path,data):
        with open(path,'wb') as f:
            cPickle.dump(data,f)

    def load_progress(self,path):
        print('[+]从文件中加载进度: %s'%path)
        try:
            with open(path,'rb') as f:
                tmp=pickle.load(f)
                return tmp
        except:
            print('[!]无进度文件,创建: %s'%path)
        return set()

数据存储器

import codecs
import time

class DataOutput():
    def __init__(self):
        self.filepath='baike_%s.html'%(time.strftime('%Y_%m_%d_%H_%M_%S',time.localtimr()))
        self.ouput_head(self.filepath)
        self.datas=[]

    def store_data(self,data):
        if data is None:
            return
        self.datas.append(data)
        id len(self.datas)>10:
            self.output_html(self.filepath)

    def output_head(self,path):
        fout=codecs.open(path,'w',encoding='utf-8')
        fout.write('<html>')
        fout('<body>')
        fout.write('<table>')
        fout.close()

    def output_html(self,path):
        fout=codes.open(path,'a',encoding='utf-8')
        for data in self.datas:
            fout.write('<tr>')
            fout.write('<td>%s<td>'%data['url'])
            fout.write('<td>%s<td>'%data['title'])
            fout.write('<td>%s<td>'%data['summary'])
            fout.write('</tr>')
            self.datas.remove(data)
        fout.close()

    def output_end(self,path):
        fout=codes.open(path,'a',encoding='utf-8')
        fout.write('</table>')
        fout.write('</body>')
        fout.write('</html>')
        fout.close()

控制调度器

def start_Manager(self,url_q,result_q):
    BaseManager.register('get_task_quue',callable=lambda:url_q)
    BaseManager.register('get_result_queue',callable=lambda:result_q)
    manager=BaseManager(address={"",8001},authkey='baike')
    return manager

def url_manager_proc(self,url_q,conn_q,root_url):
    url_manager-UrlManager()
    url_manager.add_new_url(root_url)
    while True:
        while(url_manager.has_new_url()):
            new_url=url_manager.get_new_url()
            url_q.put(new_url)
            print('old_url=',url_manager.old_url_size())
            if (url_manager.old_url_size()>2000):
                url_q.put('end')
                print('控制节点发起结束通知!')
                url_manager.save_progress('new_urls.txt',url_manager.new_urls)
                url_manager.save_progress('old_urls.txt',url_manager.old_urls)
                return
    try:
        if not conn_q.empty():
            urls=conn_q.get()
            url_manager.add_new_urls(urls)
    except BaseException as e:
        time.sleep(0.1)

def result_solve_proc(self,result_q,conn_q,store_q):
    while(True):
        try:
            if not result_q.empty():
                content=result_q.get(True)
                if content['new_urls']=='end':
                    print('结果分析进程接收通知然后结束!')
                    store_q.put('end')
                    return
                conn_q.put(content['new_urls'])
                store_q.put(cntent['data'])
            else:
                time.sleep(0.1)
        except BaseException as e:
            time.sleep(0.1)

def store_proc(self,store_q):
    output=DataOutput()
    while True:
        if not store_q.empty():
            data=store_q..get()
            if data=='end':
                print('存储进程接收通知然后结束!')
                output.output_end(output.filepath)
                return
            output.store_data(data)
        else:
            time.sleep(0.1)

if __name__=='__main__':
    url_q=Queue()
    result_q=Queue()
    store_q=Queue()
    conn_q=Queue()
    node=NodeManager()
    manager=node.start_Manager(url_q,result_q)
    url_manager_proc=Process(target=node.url_manager_proc,args=(url_q,conn_q,'https://baike.baidu.com/item/%E7%BD%91%E7%BB%9C%E7%88%AC%E8%99%AB/5162711?fr=aladdin',))
    result_solve_proc=Process(target=node.result_solve_proc,args=(result_q,conn_q,store_q,))
    store_proc=Process(target=node.store_proc,args=(store_q,))
    url_manager_proc.start()
    result_solve_proc.start()
    store_proc.start()
    manager.get_server().serve_forever()

爬虫节点

HTML下载器

import requests

class HtmlDownloader():
    def download(self,url):
        if url is None:
            return None
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.90 Safari/537.36 2345Explorer/9.3.2.17331',
            'Referer': r'https://baike.baidu.com',
            'Connection': 'keep-alive'
        }
        r=requests.get(url,headers=headers)
        if r.status_code==200:
            f.encoding='utf-8'
            return r.text
        return None

HTML解析器

import re
from urllib.parse import urlparse
from bs4 import BeautifulSoup

class HtmlParser():
    def parser(self,page_url,html_cont):
        if page_url is None or html_cont is None:
            return 'Empty !!!'
        soup=BeautifulSoup(html_cont,'html.parser',from_encoding='utf-8')
        new_urls=self._get_new_urls(page_url,soup)
        new_data=self._get_new_data(page_url,soup)
        return new_urls,new_data

    def _get_new_urls(self,page_url,soup):
        new_urls=set()
        links=soup.find_all('a',href=re.compile(r'\b(/item/)'))
        for link in links:
            new_url=link['href']
            new_full_url=urlparse.urljoin(page_url,new_url)
            new_urls.add(new_full_url)
        return new_urls

    def _get_new_data(self,page_url,soup):
        data={}
        data['url']=page_url
        title=soup.find('dd','lemmaWgt-lemmaTitle-title').find('h1')
        data['title']=title.get_text()
        summary=soup.find('div','lemma-summary')
        data['summary']=summary.get_text()
        return data

爬虫调度器

class SpiderWork():
    def __init__(self):
        BaseManager.register('get_task_queue')
        BaseManager.register('get_result_queue')
        server_addr='127.0.0.1'
        print('Connect to server %s ...'%server_addr)
        self.manager=UrlManager()
        self.m=BaseManager(address=(server_addr,8001),authkey='baike')
        self.m.connect()
        self.task=self.m.get_task_queue()
        self.result=self.m.get_result_queue()
        self.downloader=HtmlDownloader()
        self.parser=HtmlParser()
        print('init finish')

    def crawl(self):
        while(True):
            try:
                if not self.task.empty():
                    url=self.task.get()
                    if url=='end':
                        print('控制节点通知爬虫节点停止工作...')
                        self.result.put({'new_urls':'end','data':'end'})
                        return
                    print('爬虫节点正在解析:%s'%url.encode('utf-8'))
                    content=self.downloader.download(url)
                    new_urls,data=self.parder.parser(url,content)
                    self.result.put({'new_urls':new_urls,'data':data})
            except EOFError as e:
                print('连接工作节点失败')
                return
            except Exception as e:
                print(e,'\nCraw fail !')
        self.manager.add_new_url(root_url)
        while(self.manager.has_new_url() and self.manager.old_url_size()<100):
            try:
                new_url=self.manager.get_new_url()
                html=self.downloader.download(new_url)
                new_urls,data=self.parser.parser(new_url,html)
                self.manager.add_new_urls(new_urls)
                self.output.store_data(data)
                print('已抓取 %s 个链接'%self.manager.old_url_size())
            except Exception as e:
                print('crawl failed')

if __name__=='__main__':
    spider=SpiderWor()
    spider.crawl()

猜你喜欢

转载自blog.csdn.net/weixin_39777626/article/details/81564409
今日推荐