百度图片抓取

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 11/23/2019 4:06 PM
# @Author  : DeltaT
# @File    : 百度图片爬虫.py

"""爬虫下载百度图片"""
import re
import os
import urllib
import requests

search_kw = input('请输入需要下载图片种类: ')
begin_page_num = 0  # 请求次数
end_page_num = 30  # 每页编号的增加值
page_num = 1  # 爬取的最大页数
all_pic_urls = list()  # 保存所有url

# 循环抓取每一页的图片地址
while True:
    if begin_page_num > page_num:
        break
    print("第{}次发送请求".format(begin_page_num + 1))
    current_page_num = (begin_page_num - 1) * end_page_num  # 计算页面url所需要的参数, 根据该参数拼凑url进行翻页
    search_kw = urllib.parse.quote(search_kw, safe='/')
    url_begin = "http://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word="
    url = url_begin + search_kw + "&pn=" + str(current_page_num) + "&gsm=" + str(hex(current_page_num)) + \
          "&ct=&ic=0&lm=-1&width=0&height=0"

    # 获取当前页面内图片的url列表
    pic_urls = list()
    try:
        resp = requests.get(url)
        html = resp.text
        pic_urls = re.findall('"objURL":"(.*?)",', html, re.S)  # 正则匹配出需要的url
    except Exception as e:
        print(e)

    begin_page_num += 1
    all_pic_urls.extend(pic_urls)  # 将当前页的url保存到所有url列表中

# 创建文件夹
dir_path = './baidu_images'
if not os.path.exists(dir_path):  # 判断文件夹是否存在
    os.mkdir('./baidu_images')

# 下载所有图片
pic_url_list = list(set(all_pic_urls))  # url去重
for index, pic_url in enumerate(pic_url_list):
    try:
        resp = requests.get(pic_url, timeout=5)
        pic_data = resp.content  # 图片的二进制数据
        pic_path = dir_path + '/' + str(index) + '.jpg'  # 图片的路径文件名
        # 保存图片的二进制数据
        with open(pic_path, 'wb') as f:
            f.write(pic_data)
            print('已下载第{}张图片:{}'.format(str(index + 1), str(pic_url)))
    except Exception as e:
        print('第{}张图片下载失败:'.format(str(index + 1), str(pic_url)))
        print(e)
        continue

猜你喜欢

转载自www.cnblogs.com/llflifei/p/11934246.html