街拍爬虫

最近要好好学习爬虫了,于是先找了个图片爬虫练练手,本篇文章参考资料为崔庆才 的python3 网络爬虫实战,

但是今日头条最近把imagelist改了,所以废了一段功夫。发现头条的人把高清图片都藏在一个小角落里,我们要善用crif+f 来找到图片地址。方法都是大同小异。只不过发现json 比我们之前用的html方式提取信息要方便多了。最后提醒善用正则表达式呀!

废话不多直接上代码。

import requests
import re,json,os
from urllib.parse import urlencode
from bs4 import BeautifulSoup
from requests.exceptions import RequestException
import pymongo
from hashlib import md5
from multiprocessing import Pool

def get_page_index(offset):
    params ={
        'offset':offset,
        'format':'json',
        'keyword':'街拍',
        'autoload': 'true',
        'count': '20',
        'cur_tab': '1',
        'from': 'search_tab',
        'aid':'24'
    }
    try:
        url = 'https://www.toutiao.com/api/search/content/?'+urlencode(params)
        response = requests.get(url)
        if response.status_code== 200:
            return response.text
        return print("空")
    except RequestException:
        print('请求失败')
        return  None

def parse_page_index(html):
    data = json.loads(html)
    if data and 'data' in data.keys():
        for item in data.get('data'):
            yield item.get('article_url')
def get_page_detail(url):
    try:
        headers ={
            'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36',

        }
        response = requests.get(url,headers=headers)
        if response.status_code ==200:
            return response.text
        return None
    except RequestException:
        print("详情页请求失败!")
        return None

def parse_page_detail(html):
    soup = BeautifulSoup(html,'lxml')
    title = soup.select( 'title')[0].get_text()
    #得到图片的 地址
    pattern = re.compile('.*?gallery: JSON.parse\("(.*?)\"\)',re.S)
    res =re.search(pattern,html)
    if res:
        data = json.loads(res.group(1).replace('\\',''))
        if data and 'sub_images' in data.keys():
            sub_images = data.get('sub_images')
            # 提取图片
            images = [item.get('url') for item in sub_images]
            for img in images:
                down_load_img(title,img)

        # 保存图片到本地
def save_image(title,result):
    drectory_path = '{0}\{1}'.format(os.getcwd(), str(title))
    file_path = '{0}\{1}{2}'.format(drectory_path,md5(result).hexdigest(),'.jpg')
    if not os.path.exists(file_path):
        if not os.path.exists(drectory_path):
            os.mkdir(drectory_path)
        with open(file_path, 'wb') as f:
            f.write(result)
def down_load_img(title,url):
    try:
        print("正在下载.....")
        r = requests.get(url)
        if r.status_code ==200:
            save_image(title,r.content)
    except:
        print("请求图片失败!")

def main(offset):
    html = get_page_index(offset)
    #print('########')
    for url in parse_page_index(html):
        html = get_page_detail(url)
        if html:
            parse_page_detail(html)

if __name__=='__main__':
    pool = Pool()
    group = ([x*20 for x in range(1,10)])
    pool.map(main,group)
    pool.close()
    pool.join()










猜你喜欢

转载自blog.csdn.net/PythonstartL/article/details/87299289