爬虫百度贴吧下载20页html

 根据搜索内容爬取前20页的html

import os

import requests

search_name = input('请输入搜索内容:')
dir_name = search_name
# print(os.path.dirname(os.path.abspath(__file__))+'\\'+dir_name)
# 如果不存在保存20页html的文件夹就建立一个
if not os.path.exists(os.path.dirname(os.path.abspath(__file__))+'\\'+dir_name):
    os.mkdir(dir_name)
    # print(os.path.dirname(os.path.abspath(__file__))+'\\'+dir_name)
if os.path.exists(os.path.dirname(os.path.abspath(__file__))+'\\'+dir_name):
    for i in range(20):
        # url = 'https://tieba.baidu.com/f?kw=%E7%81%AB%E5%BD%B1%E5%BF%8D%E8%80%85&ie=utf-8&pn=%d'%((i-1)*50)
        url = 'https://tieba.baidu.com/f?kw=%s&ie=utf-8&pn=%d'%(dir_name,((i-1)*50))
        response = requests.get(url)
        file_path = dir_name+'\\%d.html'%i
        with open(file_path,'w',encoding='utf-8') as fp:
            fp.write(response.text)

猜你喜欢

转载自blog.csdn.net/sinat_38068807/article/details/89514147