python爬虫-爬取今日头条街图片

import json
import os
from urllib.parse import urlencode
import requests
from hashlib import md5
from bs4 import BeautifulSoup
from lxml import etree
from base import Tools
from requests.exceptions import RequestException
import re
import pymongo
from config import *
from multiprocessing import Pool
from json.decoder import JSONDecodeError
#请求索引页方法(索引页面offset,关键词)
#连接mongodb数据库,此处请自行连接
client = pymongo.MongoClient(MONGO_URL,connect=False)
db = client[MONGO_DB]

class Splider(Tools):
    @staticmethod
    def get_page_index(offset,keyword):
        data={
            'aid':'24',
            'app_name':'web_search',
            'offset':offset,
            'format':'json',
            'keyword':keyword,
            'autoload':'true',
            'count':'20',
            'en_qc':1,
            'cur_tab':1,
            'from':' search_tab',
            'pd':' synthesis',
            'timestamp':1553044615675,
        }
        url = 'https://www.toutiao.com/api/search/content/?'+urlencode(data)
        try:
            response = requests.get(url)
            if response.status_code == 200:
                return response.text
            return None
        except RequestException:
            print('请求索引页出错')
            return None
    #解析html得到图片url
    @staticmethod
    def parse_page_index(html):
        data = json.loads(html)
        if data and 'data' in data.keys():
            for item in data.get('data'):
                yield item.get('article_url')
    #得到详情页方法
    @staticmethod
    def get_page_detail(self,url):
        response = Tools.get_text(self,url,encoding="utf-8")
        return response
# 解析html
    @staticmethod
    def parse_page_detail(self,html,url):
        soup = BeautifulSoup(html,'lxml')
        # print(soup.select('title'))
        title = soup.select('title')[0].get_text()
        # print(title)
        images_pattern = re.compile('gallery: JSON.parse\("(.*)"\)', re.S)
        result = re.search(images_pattern,html)
        if result:
            # print("***")
            str = result.group(1).replace('\\',"")
            # print(str)
            data = json.loads(str)
            if data and 'sub_images' in data.keys():
                sub_images = data.get('sub_images')
                images = [item.get('url') for item in sub_images]
                for image in images: self.download_image(self,image)
                return{
                    'title':title,
                    'url':url,
                    'images':images
                }
	#存数据到mongodb
    @staticmethod
    def save_to_mongo(result):
        if db[MONGO_TABLE].insert(result):
            print('存储到Mongodb成功',result)
            return True
        return False
    @staticmethod
    #下载图片
    def download_image(self,url):
        print('正在下载',url)
        try:
            response = requests.get(url)
            if response.status_code == 200:
                self.save_image(response.content)
            return None
        except RequestException:
            print('请求图片出错',url)
            return None
     #存储图片
    @staticmethod
    def save_image(content):
        file_path = '{0}/{1}.{2}'.format(os.getcwd(), md5(content).hexdigest(), 'jpg')
        print(file_path)
        if not os.path.exists(file_path):
            with open(file_path, 'wb') as f:
                f.write(content)
                f.close()
	#运行,传入offset(页面号)
    def run(self,offset):
        html = self.get_page_index(offset,KEYWORD)
        # print(html)
        for url in self.parse_page_index(html):
            if url==None:
                continue
            # print(url)
            html = self.get_page_detail(self,url)
            # print('---------*******************----------------------------')
            # print(html)
            # print('---------*******************----------------------------')
            if html:
                result = self.parse_page_detail(self,html,url)
                if result != None:
                    self.save_to_mongo(result)

if __name__ == '__main__':
    p = Splider()
    # p.run()
    pool = Pool()
    groups = [x*20 for x in range(GROUP_START,GROUP_END+1)]
    pool.map(p.run,groups)`

猜你喜欢

转载自blog.csdn.net/smsmtiger/article/details/88995230