python爬虫 - 爬取图片

import urllib.request
import urllib.parse
import re
import os
import time

def handle_request(url, page):
    url += str(page) + '/'
    header = {'User-Agent' : ' Mozilla/5.0 (Windows NT 6.1; Win64;'
                             ' x64) AppleWebKit/537.36 (KHTML, like'
                             ' Gecko) Chrome/71.0.3578.98 Safari/537.36',}
    request = urllib.request.Request(url=url, headers=header)
    return request

def parse_content(request):
    response = urllib.request.urlopen(request)
    content = response.read().decode('utf8')
    with open('F.html', 'w', encoding='utf8') as fp:
        fp.write(content)

    #解析内容,提取这一页所有的图片链接
    pattern = re.compile(r'<div class="thumb">.*?<a href=".*?" target="_blank">.*?<img src="(.*?)" alt="(.*?)" />.*?</div>', re.S)

    ret = pattern.findall(content)

    download(ret)

def download(ret):
    for image_info in ret:
        #去除图片的链接
        image_src = image_info[0]
        #去除图片的标题
        image_alt = image_info[1]
        #拼接完整的url
        image_src = 'https:' + image_src
        dirName = 'tupian'
        #创建文件夹
        if not os.path.exists(dirName):
            os.mkdir(dirName)

        #得到后缀
        suffix = image_src.split('.')[-1]
        #得到文件名
        fileName = image_alt + '.' + suffix
        filePath = os.path.join(dirName, fileName)
        print(filePath)

        print("正在下载%s....."%fileName)
        #下载图片
        urllib.request.urlretrieve(image_src, filePath)
        print("结束下载%s"%fileName)
        time.sleep(2)

def main():
    start_page = int(input('请输入开始爬取的页面:'))
    end_page = int(input("请输入结束的页面:"))
    #要爬取网站的网址
    url = 'https://www.qiushibaike.com/pic/page/'
    for page in range(start_page, end_page+1):
        print('正在下载%s页.....'%page)
        #构建请求对象
        request = handle_request(url, page)
        #发送请求,获取响应,并解析内容
        parse_content(request)
        print("结束下载第%s页"%page)
        time.sleep(1)


if __name__ == '__main__':
    main()
    
爬取美女图片
import urllib.request
import urllib.parse
import time
import os
from lxml import etree
def handle_request(page):
    if page == 1:
        url = 'http://sc.chinaz.com/tupian/xingganmeinvtupian.html'
    else:
        url = 'http://sc.chinaz.com/tupian/xingganmeinvtupian_'+str(page)+'.html'
    #print(url)
    header = {'User-Agent': 'Mozilla/5.0 (X11; U; Linux x86_64;'
                            ' zh-CN; rv:1.9.2.10) Gecko/20100922'
                            ' Ubuntu/10.10 (maverick) Firefox/3.6.10'}
    request = urllib.request.Request(url = url, headers = header)
    return request

def parse_content(content):
    tree = etree.HTML(content)

    div_list = tree.xpath('//div[@id="container"]/div')
    #print(div_list,'\n',len(div_list))
    for div in div_list:
        name_list = div.xpath('.//div/a/img/@alt')
        #懒加载
        images_list = div.xpath('.//div/a/img/@src2')
        #print(name,image_list)
        #下载图片
        for img_src in images_list:
            download_image(img_src)

def download_image(img_src):
    dir_path = 'xinggan'
    #创建一个文件夹
    if not os.path.exists(dir_path):
        os.mkdir(dir_path)
    filePath = os.path.basename(img_src)
    fileName = os.path.join(dir_path,filePath)
    print(fileName)
    header = {'User-Agent': 'Mozilla/5.0 (X11; U; Linux x86_64;'
                            ' zh-CN; rv:1.9.2.10) Gecko/20100922'
                            ' Ubuntu/10.10 (maverick) Firefox/3.6.10'}
    request = urllib.request.Request(url=img_src, headers=header)
    response = urllib.request.urlopen(request)
    with open(fileName, 'wb') as fp:
        fp.write(response.read())
def main():
    start_page = int(input("请输入起始页:"))
    end_page = int(input("请输入结束页:"))
    for page in range(start_page,end_page+1):
        request = handle_request(page)
        content = urllib.request.urlopen(request).read().decode()
        parse_content(content)
        time.sleep(1)
if __name__ == "__main__":
    main()
    
发布了51 篇原创文章 · 获赞 29 · 访问量 2384

猜你喜欢

转载自blog.csdn.net/fangweijiex/article/details/103748015