爬虫爬取大量高清壁纸图片

爬虫爬取大量高清壁纸

思路:

首先从网页入手,观察网页整体结构,因为本段代码使用的是Xpath解析(想了解Xpath解析可以看上一篇)

import requests
from lxml import etree

class Spider:
    def __init__(self):
        self.toplist_image =[] # 初始化一级URL列表
        self.a = 0 # 防止名字重复,定义数量值
        self.file_name = "" # 文件名
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.104 Safari/537.36",
            "Connection": "keep-alive",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36",
            "Accept": " text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Encoding": "gzip,deflate",
            "Accept-Language": "zh-CN,zh;q=0.8"
        } # 加上反扒请求头
    def req(self):
        """请求数据"""
        print("正在获取壁纸...")
        for i in range(1,3): # 定义爬取的页数
            url = "https://wallhaven.cc/toplist?page=" + f"{i}" # 定义初始 URL,由于网页原因,定义变量i进行翻页操作
            result = requests.get(url).content # 发起请求
            html = etree.HTML(result)
            title = html.xpath('//a[@class = "preview"]/@href')#  用XPATH解析网页,提取需要的一级URL,返回一个列表
            for url1 in title:
                self.toplist_image.append(url1)# 把一级URL添加到toplist_image列表中


    def download(self):
        for i in self.toplist_image: # 遍历一级URL列表
            res = requests.get(i).content # 再次请求
            html = etree.HTML(res)
            title = html.xpath('//div[@class = "scrollbox"]/img/@src') # 获取二级URL列表,返回列表
            self.file_name = "C:\\bz\\"+f"壁纸{self.a+1}.jpg" # 定义图片本地存储路径和名字
            self.a+=1
            print(f"正在下载-壁纸{self.a}.jpg")
            for img in title: # 遍历二级URL列表
                resa = requests.get(img) # 请求二级URL
                with open(self.file_name, mode="wb") as file:
                    file.write(resa.content) # 写入本地文件
                    file.close() # 关闭
s = Spider()
s.req()
s.download()
发布了21 篇原创文章 · 获赞 21 · 访问量 1530

猜你喜欢

转载自blog.csdn.net/Assassin567/article/details/103864412