python爬去ajax类型---今日头条

一开始是根据别人的教程学习,但是发现被人的代码好多都有坑

 为什么会有坑呢,主要在以下几个地方

爬取得过程和结果,以今天的新闻赵丽颖为关键词吧:

扫描二维码关注公众号,回复: 3658031 查看本文章

 

import requests
from urllib.parse import urlencode
from requests.exceptions import RequestException
import json
import re
from bs4 import BeautifulSoup
from toutiaofengjing.config import *
import pymongo
import os
from hashlib import md5
from multiprocessing import Pool

client = pymongo.MongoClient(MONGO_URL,connect=False)
db = client[MONGO_DB]


headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.9'
}
def get_page_index(offset,keyword):
    data = {
        'offset': offset,
        'format': 'json',
        'keyword': keyword,
        'autoload':'true',
        'count': 20,
        'cur_tab': 3,
        'from': 'gallery'
    }
    url = 'https://www.toutiao.com/search_content/?'+urlencode(data)
    try:
        response = requests.get(url,headers = headers)
        if response.status_code == 200:
            return response.text
        return None
    except RequestException:
        print('请求失败')
        return None


def parse_page_index(html):
    data = json.loads(html)
    if data and 'data' in data.keys():
        for item in data.get('data'):
            #加一层过滤
            if item.get('article_url'):
                yield item.get('article_url')

def get_page_detail(url):
    try:
        response = requests.get(url,headers = headers)
        if response.status_code == 200:
            return response.text
        return None
    except RequestException:
        print('请求详情页',url)
        return None


def parse_page_detail(html,url):
    soup = BeautifulSoup(html, features="lxml")
    title = soup.select('title')[0].get_text()
    print(title)
    image_pattern = re.compile('.*?gallery: JSON.parse\("(.*?)\"\)', re.S)
    result = re.search(image_pattern, html)
    if result:
        #print(result.group(0))
        data = json.loads(result.group(1).replace('\\', ''))
        if data and 'sub_images' in data.keys():
            sub_images = data.get('sub_images')
            images = [item.get('url') for item in sub_images]
            for image in images:download_image(image)
            return {
                'title':title,
                'image':images,
                'url':url
            }

def save_to_mongo(result):
    if db[MONGO_TABLE].insert(result):
        #print('存储成功',result)
        return True
    return False

def download_image(url):
    print('正在下载...',url)
    try:
        response = requests.get(url,headers = headers)
        if response.status_code == 200:
            save_image(response.content)
            return response.text
        return None
    except RequestException:
        print('请求图片失败')
        return None


def save_image(result):
    file_path = '{0}/{1}.{2}'.format(os.getcwd(), md5(result).hexdigest(), 'jpg')
    if not os.path.exists(file_path):
        with open(file_path, 'wb') as f:
            f.write(result)
            f.close()

def main(offset):
    html = get_page_index(offset, KEY_WORD)
    # print(html)
    for url in parse_page_index(html):
        html = get_page_detail(url)
        if html:
            result = parse_page_detail(html,url)
            if result:
                save_to_mongo(result)
            #print(result)


if __name__ == '__main__':
    #main()
    groups = [x*20 for x in range(GROUP_START,GROUP_END + 1)]
    pool = Pool()
    pool.map(main,groups)

 好了,打完收工。

猜你喜欢

转载自blog.csdn.net/OYY_90/article/details/83070740