Python爬虫案例:爬取百度图片

纠结于爬取百度图片,竟然花费了一天的时间才让程序顺利跑起来。其中踩坑无数。而且还发现公司电脑实在是比较差劲。。。

1 import requests
2 import urllib  
3 import os , re
4 from os.path import join 
5 import time
 1 def getPages(keyword,pages=5):  
 2     params = []  
 3     for i in range(30, 30*pages+30, 30):  
 4         # 通过网上资料,可以使用 requests.get() 解析 json 数据,能够得到对应 url
 5         # 其中一个坑是,原来并不是以下的每个数据都是需要的,某些不要也可以!
 6         # Python学习交流群125240963每天更新资料视频,大牛指导
 7         params.append({  
 8                       'tn': 'resultjson_com',  
 9                       'ipn': 'rj',  
10                       'ct': 201326592,  
11                       'is': '',  
12                       'fp': 'result',  
13                       'queryWord': keyword,  
14                       'cl': 2,  
15                       'lm': -1,  
16                       'ie': 'utf-8',  
17                       'oe': 'utf-8',  
18                       'adpicid': '',  
19                       'st': -1,  
20                       'z': '',  
21                       'ic': 0,  
22                       'word': keyword,  
23                       's': '',  
24                       'se': '',  
25                       'tab': '',  
26                       'width': '',  
27                       'height': '',  
28                       'face': 0,  
29                       'istype': 2,  
30                       'qc': '',  
31                       'nc': 1,  
32                       'fr': '', 
33                       'pn': i,  
34                       'rn': 30,  
35                       #'gsm': '1e',  
36                       #'1488942260214': '' 
37                   })  
38     url = 'https://image.baidu.com/search/acjson'  
39     urls = []  
40     for param in params: 
41         # url 与 param 合成完整 url  
42         urls.append(requests.get(url,param,headers = headers,timeout = 3).url)     # 
43     #print (urls)
44     return urls  
 1 def get_Img_url(keyword,pages=5):
 2     # 每页的 URL 集合
 3     pageUrls = getPages(keyword,pages)             
 4     # 图片url : "thumbURL":"https://ss0.bdstatic.com/70cFuHSh_Q1YnxGkpoWK1HF6hhy/it/u=1789805203,3542215675&fm=27&gp=0.jpg"
 5     # 正则写的很差!
 6     exp = re.compile(r'"thumbURL":"[\:\,\_\w\d\=\.\+\s\/\%\$\&]+\.jpg')
 7     imgUrls = []
 8     for url in pageUrls:
 9         # 逐个读取每页 URL
10         # Python学习交流群125240963 每天更新资料大牛指导
11         try:
12             with urllib.request.urlopen(url,timeout = 3) as pageUrl:
13                 imgUrl = pageUrl.read().decode('utf-8') 
14                 urls = re.findall(exp,imgUrl)
15                 for url in urls:
16                     # 除去 thumbURL":"
17                     imgUrls.append(url.replace('"thumbURL":"',''))
18         # 正则提取 ImgUrl
19         except:
20             print('SomePage is not opened!')           
21             continue
22     # 所有照片的 urls
23     return imgUrls
 1 def getImg(urlList,localPath):  
 2     if not os.path.exists(localPath):  
 3         os.makedirs(localPath)  
 4     x = 1 
 5     for url in urlList:
 6         # 将 for 循环写在 try 外面
 7         try: 
 8             # 什么时候应该转义?这点还没弄明白
 9             # 没有打开特定文件夹!
10             with open(keyword+str(x)+'.jpg','wb') as f:        # 原本写 ‘\.jpg’ 会出错,打印 \\.jpg
11                 img = urllib.request.urlopen(url,timeout = 3).read()
12                 f.write(img)
13             print('%d.jpg is downloaded!' % x)
14             x += 1
15         except Exception:  
16             print("\n  Failed downloading NO. %d image\n" % x)
1 if __name__ == '__main__':
2     keyword = '美女'
3     pages = 500
4     localPath = 'F:/pythonCode/day1001/'
5     headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'}
6     urlList = get_Img_url(keyword,pages)
7     getImg(urlList,localPath)

猜你喜欢

转载自www.cnblogs.com/huohuohuo1/p/9061973.html