Quickly crawl image data from web pages

1. Code Implementation

import os
import re
import time
from selenium import webdriver
from bs4 import BeautifulSoup
from urllib.request import urlretrieve

def selelnium_test(url, save_path, num):
    driver = webdriver.Chrome()
    driver.get(url)

    for i in range(num):
        driver.execute_script("window.scrollTo(0,document.body.scrollHeight)")
        time.sleep(1)
    html = driver.page_source
    bsObj = BeautifulSoup(html)
    find_imgs = bsObj.findAll("img", {'src': re.compile(r'http[^\s]*')})
    print(find_imgs)
    i = 1
    for img in find_imgs:
        imgurl = img.attrs['src']
        path = os.path.join(save_path, "%s.jpg" % i)
        urlretrieve(imgurl, path)
        print("正在下载第{}张图片".format(i))
        i += 1
    driver.quit()


if __name__ == '__main__':
    # 搜索的网页
    url = 'https://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=result&fr=&sf=1&fmq=1607916436860_R&pv=&ic=&nc=1&z=&hd=&latest=&copyright=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&sid=&word=%E9%A9%BE%E9%A9%B6%E8%AF%81'
    # 图片存放地址
    save_path = "驾驶证"
    # 搜索的页数
    num = 50

    if not os.path.exists(save_path):
        os.mkdir(save_path)

    selelnium_test(url, save_path, num)

2. Possible problems

      If the code runs into the following problems: Message:'chromedriver' executable needs to be in PATH

      Please refer to: Blog

 

Comment: The crawler is still very good to use as a tool!

Guess you like

Origin blog.csdn.net/Guo_Python/article/details/111166336