多线程爬取妹子图 python 爬虫

多线程的时候, 控制台输出"开始下载第几张图片"的时候会乱, 我也没啥好的处理方法, 

还有就是我运行了一次来着, 最后不出意料的崩了 好像是爬的图片太多了 最后就莫名其妙的不继续运行了 也没报错 每次少爬几页 多爬几次 还是没问题的

import requests
from bs4 import BeautifulSoup
from urllib import request
import urllib.request
from urllib import error
import os
import random
import time
from multiprocessing import Pool
headers = {
'Cache-Control': 'max-age=43200',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'

}
def download(url):
    res = requests.get(url, headers = headers)
    res.encoding = 'gb2312'
    # print(res.text)
    soup = BeautifulSoup(res.text, 'html.parser')
    img = soup.select('#picture p img')
    header = soup.select('.metaRight h2 a')[0].text
    #print(header)
    path = 'F://heiheihei//{}'.format(header)
    if not os.path.isdir(path):
        os.makedirs(path)
    print(path)
    x = 0
    for k in img:
        try:
            x = x + 1
            #print(k['src'])
            print('开始下载第{}张图片'.format(x) + k['src'])
            #print(k['src'])
            urllib.request.urlretrieve(k['src'], '{}//{}.jpg'.format(path, x))
            print('第{}张图片下载完成'.format(x))
            time.sleep(random.random())
        except urllib.error.HTTPError as e:
            print(e)

url = 'http://www.meizitu.com/a/5507.html'
# download(url)
same_url = 'http://www.meizitu.com/a/more_'
if __name__ =='__main__':
# 线程池中线程数
    pool = Pool(5)
    for n in range(1, 6):
        each_url = same_url + '{}.html'.format(n)
        print(each_url)
        start_url = requests.get(each_url, headers = headers)
        soup = BeautifulSoup(start_url.text, 'html.parser')
        all_a = soup.select('.wp-item .con .pic a')
        for k in all_a:
            pool.apply_async(download, args=(k['href'], ))
    pool.close()
    pool.join()
    print('所有图片已下完')

爬完的效果 嘤嘤嘤

猜你喜欢

转载自blog.csdn.net/weixin_40889558/article/details/84625433