Python2爬取美女图片(mzitu)

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/lcr_happy/article/details/85010276

这个和上一篇差不多,因为是不同的网站,也放在博客上留作记录吧。

#coding=utf-8
import requests
from bs4 import BeautifulSoup
import os
all_url = 'http://www.mzitu.com'
#http请求头
Hostreferer = { 'User-Agent':'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)', 'Referer':'http://www.mzitu.com' }
Picreferer = { 'User-Agent':'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)', 'Referer':'http://i.meizitu.net' }
#此请求头破解盗链
start_html = requests.get(all_url,headers = Hostreferer)
#保存地址
path = 'D:/mzitu/'
#找寻最大页数
soup = BeautifulSoup(start_html.text,"html.parser")
page = soup.find_all('a',class_='page-numbers')
max_page = page[-2].text

same_url = 'http://www.mzitu.com/page/'
index = 1
for n in range(1,int(max_page)+1):
    ul = same_url+str(n)
    start_html = requests.get(ul, headers = Hostreferer)
    soup = BeautifulSoup(start_html.text,"html.parser")
    all_a = soup.find('div',class_='postlist').find_all('a',target='_blank')

    for a in all_a:
        title = a.get_text().encode('utf-8')
        #提取文本
        if title != '':
            print type(title)
            print("准备扒取:"+title)
            #win不能创建带?的目录
            curr_path = path+str(index)
            if os.path.exists(curr_path):
                #print('目录已存在')
                flag=1
            else:
                os.makedirs(curr_path)
                index = index + 1
                # print index
                flag=0
            os.chdir(curr_path)
            href = a['href']
            html = requests.get(href,headers = Hostreferer)
            mess = BeautifulSoup(html.text,"html.parser")
            pic_max = mess.find_all('span')
            pic_max = pic_max[10].text
            #最大页数
            if flag == 1 and len(os.listdir(curr_path)) >= int(pic_max):
                print('已经保存完毕,跳过')
                continue
            for num in range(1,int(pic_max)+1):
                pic = href+'/'+str(num)
                html = requests.get(pic,headers = Hostreferer)
                mess = BeautifulSoup(html.text,"html.parser")
                pic_url = mess.find('img',alt = title)
                print(pic_url['src'])
                #exit(0)
                html = requests.get(pic_url['src'],headers = Picreferer)
                file_name = pic_url['src'].split(r'/')[-1]
                f = open(file_name,'wb')
                f.write(html.content)
                f.close()
                print('完成')
    print('第',n,'页完成')

其实,你会发现每个系列的url格式都是这样的:

https://www.mzitu.com/7427(更改的是数字而已)

所以只要向这个地址请求然后在页面上拿到这个系列的总图片数,然后一张一张的请求下载即可。


猜你喜欢

转载自blog.csdn.net/lcr_happy/article/details/85010276