该爬虫的目的是下载一个素材网站的装饰类素材图片保存到本地文件夹,总共有800多个分页,为了显示多线程效果只下载10页,每一页是一个url。
首先是下载类
import requests
from lxml import html
from urllib.parse import urlparse, unquote
import os
import re
import time
from threading import Thread
# 流程:
# 下载网页的html
# 从html中解析每张图片的地址
# 下载图片并保存
class Download:
# 第一个参数是要下载图片的网页地址,第二个参数是css选择器用于选择到有图片链接的标签,第三个参数是请求头的User-Agent
def __init__(self, url, css_select, user_agent=None):
self.url = url
self.css_select = css_select
self.link_list = None
self.path = None
self.user_agent = user_agent if user_agent else 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36'
self.headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Referer': self.url,
'Upgrade-Insecure-Requests': '1',
'User-Agent': self.user_agent,
}
# 根据css选择器返回html中标签的src属性的值
def get_url(self):
link_list = []
protocol = urlparse(self.url).scheme
domain = urlparse(self.url).netloc
page_html = requests.get(self.url, headers=self.headers).text
lxml_str = html.fromstring(page_html)
element_list = lxml_str.cssselect(self.css_select)
# 如果是相对链接则补全链接
for element in element_list:
link = element.get('src')
if link.startswith('//'):
link = f"{protocol}:{link}"
link_list.append(link)
elif link.startswith('/'):
link = f"{protocol}://{domain}{link}"
link_list.append(link)
else:
link_list.append(link)
self.link_list = link_list
# 创建存放下载图片的目录
def mark_path(self):
root_path_name = re.sub('\.', '_', urlparse(self.url).netloc)
root_path = os.path.join(os.getcwd(), root_path_name)
save_path_name = re.sub("/", "_", unquote(urlparse(self.url).path))
save_path = os.path.join(root_path, save_path_name)
os.makedirs(save_path, exist_ok=True)
self.path = save_path
# 下载网页的图片并保存,每次下载间隔至少1秒是为了让爬虫更像人类,不过这里为了显示多线程下载速度注释掉这行代码,每次下载没有时间间隔
def download(self):
print('Parse %s' % self.url)
self.get_url()
self.mark_path()
for link in self.link_list:
file_name = re.search("[^/]*(jpg|png)", link).group()
print('Download: %s' % link)
with open(os.path.join(self.path, file_name), 'wb') as f:
f.write(requests.get(link, headers=self.headers).content)
#time.sleep(1)
print('%s all done!' % self.url)
然后是多线程下载,设置最大线程数5
if __name__ == "__main__":
# 待爬取url列表,任务是爬取每个页面的素材图片并保存
url_list = []
for i in range(1, 11):
url_list.append(f'https://sc.enterdesk.com/tag-%E8%A3%85%E9%A5%B0/{i}.html')
css_select = '.egeli_pic_dl img'
thread_list = []
max_thread = 5
time_start = time.time()
while url_list or thread_list:
for thread in thread_list:
if not thread.is_alive():
thread_list.remove(thread)
print(thread.name, 'be over.')
while url_list and len(thread_list) < max_thread:
t = Thread(target=Download(url_list.pop(), css_select).download, args=())
print(t.name, 'start...')
t.start()
thread_list.append(t)
time_end = time.time()
print('Total time %.2f second.' % (time_end-time_start))
爬虫输出(省略大部分)
Thread-1 start...
Parse https://sc.enterdesk.com/tag-%E8%A3%85%E9%A5%B0/10.html
Thread-2 start...
Thread-3 start...
Parse https://sc.enterdesk.com/tag-%E8%A3%85%E9%A5%B0/9.html
Thread-4 start...
Parse https://sc.enterdesk.com/tag-%E8%A3%85%E9%A5%B0/8.html
Parse https://sc.enterdesk.com/tag-%E8%A3%85%E9%A5%B0/7.html
Thread-5 start...
Parse https://sc.enterdesk.com/tag-%E8%A3%85%E9%A5%B0/6.html
...
...
...
https://sc.enterdesk.com/tag-%E8%A3%85%E9%A5%B0/1.html all done!
Thread-10 be over.
Total time 10.69 second.
用时不到11秒,个人觉得作为多线程速度还是可以的。