Python爬虫之xpath 开启多线程爬取素材网图片-xpath应用、多线程应用、批量下载

开始撸代码,
注意运行在python3.x环境下
注意:由于时效性,这里的URL可能会在后期被妹子图网站所修改,请届时查看对应URL是否可用。

from lxml import etree
import os,threading
import urllib.request

headers = {
    'User-Agent':'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_6; en-US) AppleWebKit/530.9 (KHTML, like Gecko) Chrome/ Safari/530.9 '
}
url = 'http://sc.chinaz.com/tupian/meinvtupian'
# request = urllib.request.Request(url=url,headers=headers)
# response = urllib.request.urlopen(request)
# print(response.read().decode())

def download(image_url_list,image_name_list,image_fenye_path):
    try:
        # 创建每个页保存的目录
        os.mkdir(image_fenye_path)
    except Exception as e:
        pass
    for i in range(len(image_url_list)):
        houzhui = (os.path.splitext(image_url_list[i]))[-1]
        file_name = image_name_list[i] + houzhui
        save_path = os.path.join(image_fenye_path,file_name)
        # 开始下载图片
        try:
            urllib.request.urlretrieve(image_url_list[i],save_path)
            print('%s 下载完毕'%save_path)
        except Exception as e:
            print('%s xxxxxxxx图片丢失'%save_path)


def read(url,image_fenye_path):
    request = urllib.request.Request(url=url, headers=headers)
    response = urllib.request.urlopen(request)
    html = response.read().decode('utf-8')
    html_tree = etree.HTML(html)
    image_url_list = html_tree.xpath('//div[@id="container"]/div/div/a/img/@src2')
    image_name_list = html_tree.xpath('//div[@id="container"]/div/div/a/@alt')
    download(image_url_list,image_name_list,image_fenye_path)

if __name__ == '__main__':
    start_page = int(input('请输入起始页面:'))
    end_page = int(input('请输入结束页面:'))
    # 创建保存的文件夹
    try:
        global father_path
        father_path = (os.path.dirname(os.path.abspath(__file__)))
        mkdir_name = father_path + '/imgefiles'
        os.mkdir(mkdir_name)
    except Exception as e:
        print(e)
    print('开始下载...')
    t_list = []
    # 开启多线程下载图片
    for page_num in range(start_page,end_page + 1):
        if page_num == 1:
            sure_url = url + '.html'
        else:
            sure_url = url + '_' + str(page_num) + '.html'
        image_fenye_path = father_path + '/imgefiles' + '/第%s页'%page_num
        t = threading.Thread(target=read,args=(sure_url,image_fenye_path))
        t.start()
        t_list.append(t)
    for j in t_list:
        j.join()
    print('所有图片下载完毕!')

猜你喜欢

转载自blog.csdn.net/haeasringnar/article/details/80078405