爬取baidu高清原图

参考了这位老哥的代码:
https://blog.csdn.net/m0_52521883/article/details/120232250

支持下载原图或缩略图,直接上代码,只是加了一个解密objURL的代码块

import os
import urllib.request
from urllib.parse import quote
import re
import urllib.error
import requests
import time

get_cookie_headers = {
    
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                                  "Chrome/92.0.4515.159 Safari/537.36 "}
get_cookie_html = "https://www.baidu.com/"
get_cookie_target = requests.session()
cookie_target = get_cookie_target.get(get_cookie_html, headers=get_cookie_headers)
cookie = requests.utils.dict_from_cookiejar(cookie_target.cookies)
#print(cookie)

key = []
value = []
result_cookie = ""

for i in cookie.keys():
    key.append(i)

for i in cookie.values():
    value.append(i)

for i in range(len(key)):
    result_cookie += key[i] + '=' + value[i] + ";"

print(result_cookie)

myheaders = {
    
    
    "Cookie": result_cookie,
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                  "Chrome/92.0.4515.159 Safari/537.36 "
}

print("提示:选择爬取原图保存在good_images下,小图保存在images下")
word = input("输入想要爬取的主题:")
quality = input("爬取原始图还是网页小图?(0=原始图,1=小图):")

if quality == '0':
    base_dir = "./good_images/"
    key  = r'objURL":"(.*?)"'
    pic_url = re.compile(key)
else:
    base_dir = "./images/"
    key = r'thumbURL":"(.*?)"'
    
pic_url = re.compile(key)  # 预编译
pic_dir = base_dir + word

if not os.path.exists(base_dir):
    os.mkdir(base_dir)
if not os.path.exists(pic_dir):
    os.mkdir(pic_dir)

keyword = quote(word, encoding='utf-8')

start_number = 0
base_url = "https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&queryWord=" + keyword + "&word=" + keyword + "&pn%d=&rn=60" % start_number

#解密用
str_table = {
    
     '_z2C$q': ':', '_z&e3B': '.','AzdH3F': '/'}
char_table = {
    
    
'w': 'a','k': 'b','v': 'c','1': 'd','j': 'e','u': 'f','2': 'g','i': 'h','t': 'i','3': 'j','h': 'k','s': 'l','4': 'm','g': 'n','5': 'o','r': 'p','q': 'q',
'6': 'r','f': 's','p': 't','7': 'u','e': 'v','o': 'w','8': '1','d': '2','n': '3','9': '4','c': '5','m': '6','0': '7','b': '8','l': '9','a': '0'}
char_table = {
    
    ord(key): ord(value) for key, value in char_table.items()}
def get_ori_url(i):
    for key, value in str_table.items():
            i = i.replace(key, value)
    return i.translate(char_table)

number = 1
while start_number < 1800:
    response = urllib.request.Request(base_url, headers=myheaders)
    result = urllib.request.urlopen(response).read().decode("utf-8")
    for i in re.findall(pic_url, result):
        i = i if quality!='0' else get_ori_url(i)
        print(str(number) + " : " + i)
        try:
            response = urllib.request.Request(i, headers=myheaders)
            pic_result = urllib.request.urlopen(response).read()
            with open(pic_dir + "/" + word + str(number) + ".jpg", "wb+") as f:
                f.write(pic_result)
            number += 1
            time.sleep(0.5)
        except urllib.error.URLError:
            print("下载失败")        
    start_number += 60

猜你喜欢

转载自blog.csdn.net/DavieChars/article/details/120606080
今日推荐