Python 多线程实现爬取妹子图

前阵子网上看到有人写爬取妹子图的派森,不过都是调用的第三方模块,今天就给大家开开荤,使用原生库来爬,并且扩展实现了图片鉴定,图片去重等操作,经过了爬站验证,稳如老狗,只要你硬盘够大。

作者忠告:我们是研究技术的,请勿沉迷,沉迷伤身,O(∩_∩)O~

今天就来个最简单的吧,网站走起 https://www.meitulu.com/ 先分析页面结构,图片URL每次递增,从第二个开始。

前端,被一个 img标签包起来 <img src="https://mtl.gzhuibei.com/images/img/10431/5.jpg" alt= 直接正则匹配

先来生成页面链接,代码如下

# 传入参数,对页面进行拼接并返回列表
def SplicingPage(page,start,end):
    url = []
    for each in range(start,end):
        temporary = page.format(each)
        url.append(temporary)
    return url

接着使用内置库爬行

# 通过内置库,获取到页面的URL源代码
def GetPageURL(page):
    head = GetUserAgent(page)
    req = request.Request(url=page,headers=head,method="GET")
    respon = request.urlopen(req,timeout=3)
    if respon.status == 200:
        html = respon.read().decode("utf-8")
        return html

最后正则匹配爬取,完事了。代码自己研究一下就明白了,太简单了,

    page_list = SplicingPage(str(args.url),2,100)
    for item in page_list:
            respon = GetPageURL(str(item))
            subject = re.findall('<img src="([^"]+\.jpg)"',respon,re.S)
            for each in subject:
                img_name = each.split("/")[-1]
                img_type = each.split("/")[-1].split(".")[1]
                save_name = str(random.randint(1111111,99999999)) + "." + img_type
                print("[+] 原始名称: {} 保存为: {} 路径: {}".format(img_name,save_name,each))
                urllib.request.urlretrieve(each,save_name,None)

也可以通过外部库提取。

from lxml import etree
 
html = etree.HTML(response.content.decode())
src_list = html.xpath('//ul[@id="pins"]/li/a/img/@data-original')
alt_list = html.xpath('//ul[@id="pins"]/li/a/img/@alt')

一些请求头信息,用于绕过反爬虫策略

user_agent = [
    "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
    "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
    "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
    "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
    "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
    "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
    "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
    "Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
    "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
    "MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
    "Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10",
    "Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
    "Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+",
    "Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0",
    "Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)",
    "UCWEB7.0.2.37/28/999",
    "NOKIA5700/ UCWEB7.0.2.37/28/999",
    "Openwave/ UCWEB7.0.2.37/28/999",
    "Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999",
    # iPhone 6:
	"Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25",

]

headers = {'User-Agent': random.choice(user_agent)}

# 随机获取一个请求头
def get_user_agent():
    return random.choice(USER_AGENTS)

运行结果,就是这样,同学们,都把裤子给我穿上!好好学习!

接着我们来扩展一个知识点,如何使用Python实现自动鉴别图片,鉴别黄色图片的思路是,讲图片中的每一个位读入内存然后将皮肤颜色填充为白色,将衣服填充为黑色,计算出整个人物的像素大小,然后计算身体颜色与衣服的比例,如果超出预定义的范围则认为是黄图,这是基本的原理,实现起来需要各种算法的支持,Python有一个库可以实现 pip install Pillow porndetective 鉴别代码如下。

>>> from porndetective import PornDetective
>>> test=PornDetective("c://1.jpg")
>>> test.parse()
c://1.jpg JPEG 1600×2400: result=True message='Porn Pic!!'
<porndetective.PornDetective object at 0x0000021ACBA0EFD0>
>>>
>>> test=PornDetective("c://2.jpg")
>>> test.parse()
c://2.jpg JPEG 1620×2430: result=False message='Total skin percentage lower than 15 (12.51)'
<porndetective.PornDetective object at 0x0000021ACBA5F5E0>
>>> test.result
False

鉴定结果如下,识别率不是很高,其实第一张并不算严格意义上的黄图,你可以使用爬虫爬取所有妹子图,然后通过调用这个库对其进行检测,如果是则保留,不是的直接删除,只保留优质资源。

他这个库使用的算法有一些问题,如果照这样来分析,那肚皮舞之类的都会被鉴别为黄图,而且一般都会使用机器学习识别率更高,这种硬编码的方式一般的还可以,如果更加深入的鉴别根本做不到,是不是黄图,不能只从暴露皮肤方面判断,还要综合考量,姿势,暴露尺度,衣服类型,等各方面。

福利送给大家,妹子图要多少有多少

成品链接(每次运行都有惊喜):https://pan.baidu.com/s/1sEdYWi_slGPjBRltiMStAw 提取码:8t4e

妹子图去重,代码如下,这个代码我写了好一阵子,一开始没思路,后来才想到的,其原理是利用CRC32算法,计算图片hash值,比对hash值,并将目录与hash关联,最后定位到目录,只删除多余的图片,保留其中的一张,这里给出思路代码。

import zlib,os

def Find_Repeat_File(file_path,file_type):
    Catalogue = os.listdir(file_path)
    CatalogueDict = {}  # 查询字典,方便后期查询键值对对应参数
    for each in Catalogue:
        path = (file_path + each)
        if os.path.splitext(path)[1] == file_type:
            with open(path,"rb") as fp:
                crc32 = zlib.crc32(fp.read())
                # print("[*] 文件名: {} CRC32校验: {}".format(path,str(crc32)))
                CatalogueDict[each] = str(crc32)
    CatalogueList = []
    for value in CatalogueDict.values():
    # 该过程实现提取字典中的crc32特征组合成列表 CatalogueList
        CatalogueList.append(value)

    CountDict = {}
    for each in CatalogueList:
    # 该过程用于存储文件特征与特征重复次数,放入 CountDict
        CountDict[each] = CatalogueList.count(each)
        
    RepeatFileFeatures = []
    for key,value in CountDict.items():
    # 循环查找字典中的数据,如果value大于1就存入 RepeatFileFeatures
        if value > 1:
            print("[-] 文件特征: {} 重复次数: {}".format(key,value))
            RepeatFileFeatures.append(key)

    for key,value in CatalogueDict.items():
        if value == "1926471896":
            print("[*] 重复文件所在目录: {}".format(file_path + key))

if __name__ == "__main__":
    Find_Repeat_File("D://python/",".jpg")

来来来,小老弟,我们去探讨一下技术,学好技术,每天都开荤

最终代码:

import os,re,random,urllib,argparse
from urllib import request,parse

# 随机获取一个请求体
def GetUserAgent(url):
    # 此处是请求头,可以从网上找一些。
    UserAgent = {"User-Agent": UsrAgent,"Referer":UsrRefer}
    return UserAgent

# 通过内置库,获取到页面的URL源代码
def GetPageURL(page):
    head = GetUserAgent(page)
    req = request.Request(url=page,headers=head,method="GET")
    respon = request.urlopen(req,timeout=3)
    if respon.status == 200:
        html = respon.read().decode("utf-8") # 或是gbk根据页面属性而定
        return html

# 传入参数,对页面进行拼接并返回列表
def SplicingPage(page,start,end):
    url = []
    for each in range(start,end):
        temporary = page.format(each)
        url.append(temporary)
    return url
 
if __name__ == "__main__":

    urls = "https://www.meitulu.com/item/{}_{}.html".format(str(random.randint(1000,20000)),"{}")
    
    page_list = SplicingPage(urls,2,100)
    for item in page_list:
        try:
            respon = GetPageURL(str(item))
            subject = re.findall('<img src="([^"]+\.jpg)"',respon,re.S)
            for each in subject:
                img_name = each.split("/")[-1]
                img_type = each.split("/")[-1].split(".")[1]
                save_name = str(random.randint(11111111,999999999)) + "." + img_type
                print("[+] 原始名称: {} 保存为: {} 路径: {}".format(img_name,save_name,each))
                #urllib.request.urlretrieve(each,save_name,None)  # 无请求体的下载图片方式
                head = GetUserAgent(str(urls))                # 随机弹出请求头
                ret = urllib.request.Request(each,headers=head)   # each = 访问图片路径
                respons = urllib.request.urlopen(ret,timeout=10)  # 打开图片路径
                with open(save_name,"wb") as fp:
                    fp.write(respons.read())
        except Exception:
            exit(1)

最后的效果,高并发下载(代码分工明确:有负责清理重复的,有负责删除小于150kb的,有负责爬行的,包工头非你莫属)今晚通宵

另外两个网站的爬虫程序公开: 那啥地址不易公开,经过了base64加密自己解密,我不说你懂得。

import os,urllib,random,argparse,sys
from urllib import request,parse
from bs4 import BeautifulSoup

def GetUserAgent(url):
    return UserAgent

def GetPageURL(page):
    head = GetUserAgent(page)
    req = request.Request(url=page,headers=head,method="GET")
    respon = request.urlopen(req,timeout=30)
    if respon.status == 200:
        html = respon.read().decode("utf-8")
        return html

if __name__ == "__main__":
    runt = []
    waibu = GetPageURL("https://aHR0cHM6Ly93dXNvLm1lL2ZvcnVtLnBocD9tb2Q9Zm9ydW1kaXNwbGF5JmZpZD00OCZ0eXBlaWQ9MTE0JmZpbHRlcj10eXBlaWQmdHlwZWlkPTExNA==")
    soup1 = BeautifulSoup(waibu,"html.parser")
    ret1 = soup1.select("div[id='threadlist'] ul[id='waterfall'] a")
    for x in ret1:
        runt.append(x.attrs["href"])
    
    for ss in runt:
        print("[+] 爬行: {}".format(ss))
        try:
            resp = []
            respon = GetPageURL(str(ss))
            soup = BeautifulSoup(respon,"html.parser")
            ret = soup.select("div[class='pct'] div[class='pcb'] td[class='t_f'] img")
            try:
                for i in ret:
                    url = "https://aHR0cHM6Ly93dXNvLm1lLw== " + str(i.attrs["file"])
                    print(url)
                    resp.append(url)
            except Exception:
                pass
                
            for each in resp:
                try:
                    img_name = each.split("/")[-1]
                    print("down: {}".format(img_name))
                    head=GetUserAgent("https://aHR0cHM6Ly93dXNvLm1lLw== ")
                    ret = urllib.request.Request(each,headers=head)
                    respons = urllib.request.urlopen(ret,timeout=60)
                    with open(img_name,"wb") as fp:
                        fp.write(respons.read())
                        fp.close()
                except Exception:
                    pass
        except Exception:
            pass

第二个爬虫程序: 这个开一个多线程,用另外一个程序开多进程,爬取速度非常快,CPU 100%利用率

import os,sys
import subprocess
# 每行一个人物名称。
fp = open("lis.log","r")
aaa = fp.readlines()

for i in aaa:
    nam = i.replace("\n","")
    cmd = "python thread.py " + nam
    os.popen(cmd)

多线程代码。

import requests,random
from bs4 import BeautifulSoup
import os,re,random,urllib,argparse
from urllib import request,parse
import threading,sys

def GetUserAgent(url):
    refer = url
    UserAgent = {"User-Agent": agent,"Referer":refer}
    return UserAgent

def run(user):
    head = GetUserAgent("https://aHR0cHM6Ly93d3cuYW1ldGFydC5jb20v")
    ret = requests.get("https://aHR0cHM6Ly93d3cuYW1ldGFydC5jb20vbW9kZWxzL3t9Lw==".format(user),headers=head,timeout=3)
    scan_url = []
    if ret.status_code == 200:
        soup = BeautifulSoup(ret.text,"html.parser")
        a = soup.select("div[class='thumbs'] a")
        for each in a:
            url = "https://aHR0cHM6Ly93d3cuYW1ldGFydC5jb20v" + str(each["href"])
            scan_url.append(url)

    rando = random.choice(scan_url)
    print("随机编号: {}".format(rando))

    try:
        ret = requests.get(url=str(rando),headers=head,timeout=10)
        if ret.status_code == 200:
            soup = BeautifulSoup(ret.text,"html.parser")
            img = soup.select("div[class='container'] div div a")
            try:
                for each in img:
                    head = GetUserAgent(str(each["href"]))
                    down = requests.get(url=str(each["href"]),headers=head)
                    img_name = str(random.randint(100000000,9999999999)) + ".jpg"
                    print("[+] 图片解析: {} 保存为: {}".format(each["href"],img_name))
                    with open(img_name,"wb") as fp:
                        fp.write(down.content)
            except Exception:
                pass
    except Exception:
        exit(1)

if __name__ == "__main__":
    args = sys.argv
    user = str(args[1])
    try:
        os.mkdir(user)
        os.chdir("D://python/save/" + user)
        for item in range(100):
            t = threading.Thread(target=run,args=(user,))
            t.start()
    except FileExistsError:
        exit(0)

开20个进程,每个进程里面驮着100个线程,并发访问每秒,1500次请求,因为有去重程序在不断地扫描,所有图片无重复,并保留质量最高的图片,突然发现,妹子图多了之后,妹子都不好看了 ,哈哈哈

经过爬站点,之后我们得到了几万张妹子图,但是如果我们想看其中的一个,肿么办? 接下来登场的是人脸识别军团,来筛选我们想要看的妹子图。

import cv2
import numpy as np

def DisplayFace(img_path):
    img = cv2.imread(img_path)  # 读取一张图片
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # 将图片转化成灰度
    face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml") #加载级联分类器模型
    face_cascade.load("haarcascade_frontalface_default.xml")
    faces = face_cascade.detectMultiScale(gray, 1.3, 5)
    for (x, y, w, h) in faces:
    # 在原先的彩图上画出包围框(蓝色框,边框宽度为2)
        img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 3)
        cv2.namedWindow("img",0);
        cv2.resizeWindow("img", 300, 400);
        cv2.imshow('img', img)
        cv2.waitKey()

def detect_face(img):
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
    faces = face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5)
    if (len(faces) == 0):
        return None,None
    (x, y, w, h) = faces[0]
    return gray[y:y + w, x:x + h], faces[0]
DisplayFace("c:\\1.jpg")

未完待续。。。

猜你喜欢

转载自www.cnblogs.com/LyShark/p/12607853.html
今日推荐