python爬虫 爬取诗词名句网

  • 使用requests库,xpath库
import requests
import time
from lxml import etree


# 去请求页面的函数
def request_Header(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36',
    }

    request = requests.get(url=url,headers=headers)
    return request


def get_content_text(all_href):

    request = request_Header(all_href)
    etrees = etree.HTML(request.content)
    # 获取所有章节的内容  得到一个列表 这是由多个p标签组成的
    content_text = etrees.xpath('//div[@class="layui-col-md8 layui-col-sm7"]/div/div/p/text()')
    strs_cont = ' '
    for con in content_text:
        strs_cont+=con+'\n'
    print(strs_cont)
    return strs_cont




def main():
    url = 'http://www.shicimingju.com/book/sanguoyanyi.html'
    req = request_Header(url)
    # 得到网页的内容
    content = req.content
    etrees = etree.HTML(content)
    # 获取所有章节内容
    text = etrees.xpath('//div[@class="book-mulu"]/ul/li/a')
    fp = open('三国演义.txt','w',encoding='utf8')
    # 遍历这个列表,挨个获取
    for a in text:
        # 获取标题
        title = a.xpath('text()')[0]
        print('正在下载>>>%s'%title)
        # 获取连接
        href = a.xpath('@href')[0]
        # 拼接url
        all_href = 'http://www.shicimingju.com' + str(href)
        # print(all_href)
        # 去网页中获取内容
        get_content = get_content_text(all_href)
        fp.write(title+'\n' + str(get_content)+'\n')
        # time.sleep(2)
        print('结束下载%s' % title)
    fp.close()

if __name__ == '__main__':
    main()

猜你喜欢

转载自blog.csdn.net/qq_42815634/article/details/84862924
今日推荐