爬虫類のtxt 20200312件のBaiduライブラリ文学ダウンロードフォーマット

今日は非常に怒って、回答列挙結果は、Baiduのライブラリの内部を見つけ、答えはどのなかった見に行く、そんなに怒っ私はこの爬虫類を書きました。


import requests
from lxml import etree
import re
def get_text(url):
    #设置函数用于保存代码
    page_url = url
    #获取页面网址
    page_headers ={
        "User-Agent": "Mozilla/5.0 (Linux; U; en-us; KFAPWI Build/JDQ39) AppleWebKit/535.19 (KHTML, like Gecko) Silk/3.13 Safari/535.19 Silk-Accelerated=true",
        "Referer": "https://wk.baidu.com/?pcf=2",
        "Accept-Encoding":"gzip, deflate, br",
        }
    #获取页面headers
    html_code  = requests.get(url=page_url,headers=page_headers)
    #发起一个向变量page_url,存放的url发起一个requests.get请求,
    #请求头使用我们预先设置的page_headers,并将网页的返回内容存放到变量html_code中
    html_code.encoding = "gb2312"
    #转换网页编码为gb2312
    html_etree = etree.HTML(html_code.text)
    #将网页内容转换为Element对象,并存放到变量 html_etree中
    info = html_etree.xpath('//script[@type="text/javascript"]/text()')
    #从利用xpath网页转换的Element对象中提取名为script,且text属性为text/javascript
    #的标签下的文本内容
    find_need_infos_reg = re.compile('WkInfo.htmlUrls(.*)WkInfo.verify_user_info')
    find_need_infos = find_need_infos_reg.search(str(info))
    need_infos =  (find_need_infos.group().strip("WkInfo.htmlUrls = ").strip(r";\n        WkInfo.verify_user_i").replace(r"\\x22","").replace(r"\\\\\\/","\\").replace("'",'"'))
    #通过正则提取除需要的信息
    url_find_reg = re.compile(r'pageLoadUrl:(.+?)}')
    url_lists = url_find_reg.findall(need_infos)
    #提取出url

    for info_url in url_lists:
        info_url = info_url.replace("\\","/")
        text_html = requests.get(url=info_url,headers=page_headers)
        text_find_reg = re.compile(r',{"c":(.*?),')
        text_lists = text_find_reg.findall(text_html.text)
        #提取出内容

        for text in text_lists:
            try:
                text_str=eval(text)
                if text_str !="\n":
                    text_str= text_str.replace('\n','')
                    DownWrite(text_str)
            except:
                print("出错")
                pass


#text打印模块
def DownWrite(str):
    # FileName='DATAs/dataokedata'+str(contYM )+".txt"
    global down_times
    down_times = down_times + 1
    writeLine =str+"\n"

    with open('百度文库.txt','a+',encoding='utf8') as f:
            f.write(writeLine)
            print("第%d条txt数据写入成功!"%(down_times))

if __name__ == '__main__':
    url = 'https://wenku.baidu.com/view/0cfd025e0875f46527d3240c844769eae109a34a.html'
    down_times=0
    get_text(url)

公開された96元の記事 ウォン称賛23 ビュー9540

おすすめ

転載: blog.csdn.net/Captain_DUDU/article/details/104828851
おすすめ