Douban reading new book crawling

# -*- coding: utf-8 -*
import requests
import urllib
import sys
import re
import json
from requests.exceptions import RequestException  #异常代码模块

def get_one_page(url):
    headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
                'Accept-Encoding':'gzip, deflate, br',
                'Accept-Language':'zh-CN,zh;q=0.9',
                'Cache-Control':'max-age=0',
                'Connection':'keep-alive',
                'Host':'book.douban.com',
                'Referer':'https://www.baidu.com/s?ie=utf8&oe=utf8&wd=%E8%B1%86%E7%93%A3&tn=98012088_5_dg&ch=1',
                'Upgrade-Insecure-Requests':'1',
                'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.5083.400 QQBrowser/10.0.988.400'
               }
    try:
        response = requests.get(url,headers=headers) #打开网址
        if response.status_code == 200: #判断状态码
            return response.text      #状态码成功返回网页源代码
        return None       #状态码其他结果返回None
    except RequestException: #弄一个错误的总类就好,子类太多这里不考虑
        return None

def pare_one_page(html):
    pattern =re.compile('<li.*?class="cover">.*?<a href="(.*?)" title="(.*?)">'
                        +'.*?src="(.*?)".*?class="author">(.*?)</div>'
                        +'.*?class="year">(.*?)</span>.*?class="publisher">(.*?)</span>.*?</li>', re.S)  #正则表达式模板
    items = re.findall(pattern,html)
    #print(items)
    for item in items:
        yield {
            'href': item[0],
            'title': item[1],
            'img': item[2],
            'author': item[3].strip(), #strip()去除开头或结尾的空格及特殊符号 
            'time': item[4].strip(),
            'Press': item[5].strip()
            }
def write_to_file(content):
    with open('result.txt','a',encoding='utf-8') as f: #创建文本
        f.write(json.dumps(content,ensure_ascii=False)+'\n') #json.dumps将字典转换成字符串
        f.close()

def main():
    url = 'https://book.douban.com/'
    non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd) #解决网站中 python自身编码不能映射奇怪的符号
    page = urllib.request.urlopen(url)  #模块urllib发送HTTP请求,打开网站
    html = page.read().decode().translate(non_bmp_map) #返回得到的网站,并解码网站中奇怪的符号
    #print(html)
    for item in pare_one_page(html):  #正则表达式调用
        print(item)
        write_to_file(item)  #调用写入文本

if __name__ == '__main__':
    main()

Guess you like

Origin http://43.154.161.224:23101/article/api/json?id=325980406&siteId=291194637
Recommended