爬虫下载妹子图片

import urllib.request
import os
import random
count = 0
def url_open(url):
req = urllib.request.Request(url)
response = urllib.request.urlopen(url)
html = response.read()
return html

def get_page(url):
html = url_open(url).decode('utf-8')
a = html.find('page-numbers current')+22
b = html.find('<',a)
return html[a:b]

def find_imgs(page_url):
html = url_open(page_url).decode('utf-8')
img_addrs= []
a = html.find('img src=')
while a !=-1:
b = html.find('.jpg',a,a+255)
if b !=-1:
img_addrs.append(html[a+9:b+4])
else:
b = a+9
a = html.find('img src=',b)
return img_addrs
def save_imgs(folder, img_addrs):

for each in img_addrs:
filename =each.split('/')[-1]
with open(filename, 'wb') as f:
try:
img = url_open(each)
f.write(img)
global count
count =count+1
print(count)
except:
continue


def download_mm(folder=str(random.choices(range(1000))), pages=367):
os.mkdir(folder)
os.chdir(folder)
print(folder)
url = 'http://www.mzitu.com/zipai/'
page_num = 367

for i in range(1,pages):
page_num -= i
page_url = url + 'comment-page-' + str(page_num) + "/#comments"
try:
img_addrs = find_imgs(page_url)
save_imgs(folder, img_addrs)
except:
continue

if __name__ == '__main__':
download_mm()
print('OK')
input('按任意键退出')

猜你喜欢

转载自www.cnblogs.com/nnty/p/9927150.html
今日推荐