【爬虫小程序:爬取斗鱼所有房间信息】Xpath(多进程版)

# 本程序亲测有效,用于理解爬虫相关的基础知识,不足之处希望大家批评指正
 1 import requests
 2 from lxml import etree
 3 from multiprocessing import JoinableQueue as Queue
 4 from multiprocessing import Process
 5 
 6 """爬取目标:http://www.qiushibaike.com/8hr/page/1
 7     利用多进程实现
 8 """
 9 
10 class QiuShi:
11     def __init__(self):
12         # url和headers
13         self.base_url = 'http://www.qiushibaike.com/8hr/page/{}'
14         self.headers = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'
15 
16         # 定义队列,用来传递数据
17         self.url_queue = Queue()
18         self.request_queue = Queue()
19         self.html_queue = Queue()
20 
21 
22     def get_url_list(self):
23         """获取所有的url"""
24         for i in range(1, 14):
25             target_url = self.base_url.format(i)
26             print(target_url)
27             # 计数需要请求的url
28             self.url_queue.put(target_url)
29 
30     def request_url(self):
31         """向url发起请求"""
32         while True:
33             target_url = self.url_queue.get()
34             response = requests.get(target_url, self.headers)
35             print(response)
36             self.request_queue.put(response)
37             self.url_queue.task_done()
38 
39     def get_content(self):
40         """获取数据"""
41         while True:
42             html_text = self.request_queue.get().content.decode()
43             html = etree.HTML(html_text)
44             div_list = html.xpath('//div[@id="content-left"]/div')
45             content_list = []
46             for div in div_list:
47                 item = {}
48                 item['author'] = div.xpath('.//h2/text()')[0].strip()
49                 item['content'] = div.xpath('.//span/text()')[0].strip()
50                 print(item)
51                 content_list.append(item)
52             self.html_queue.put(content_list)
53             self.request_queue.task_done()
54 
55     def save_data(self):
56         """保存入库"""
57         while True:
58             data_list = self.html_queue.get()
59             for data in data_list:
60                 with open('qiushi.text', 'a+') as f:
61                     f.write(str(data))
62                     f.write('\r\n')
63             self.html_queue.task_done()
64 
65     def main(self):
66 
67         # 获取所有url
68         self.get_url_list()
69         # 创建一个进程盒子,用于收集进程
70         process_list = []
71         p_request = Process(target=self.request_url)
72         process_list.append(p_request)
73 
74         p_content = Process(target=self.get_content)
75         process_list.append(p_content)
76 
77         p_save_data = Process(target=self.save_data)
78         process_list.append(p_save_data)
79 
80         # 让所有进程跑起来
81         for process in process_list:
82             process.daemon = True  # 设置为守护进程:主进程结束,子进程任务完不完成,都要随着主进程结束而结束
83             process.start()
84 
85         # 等待进程任务完成,回收进程  director:主任:普通员工都下班了,主任才能下班
86         for director in [self.url_queue,self.request_queue,self.html_queue]:
87             director.join()
88 
89 
90 
91 if __name__ == '__main__':
92     qiushi = QiuShi()
93     qiushi.main()

猜你喜欢

转载自www.cnblogs.com/888888CN/p/10070246.html