python爬虫爬取网站小说

#加载模块
import requests
from bs4 import BeautifulSoup

# 定义所有章节和链接函数
def get_novel_chapters():
    url = "https://www.89wxw.com/read/1037/"
    r = requests.get(url)
    main_page = BeautifulSoup(r.text, "html.parser")
    list1 = []
    for dd in (main_page.find_all("dd")):
        link = dd.find("a")
        if not link:
            continue
        list1.append(("https://www.89wxw.com" + (link["href"]), (link.get_text())))
    return list1

# 定义获取所有章节内容
def get_chapters_content(url):
    r = requests.get(url)
    main_page = BeautifulSoup(r.text, "html.parser")
    data = main_page.find("div", id="content").get_text()
    return data

#循环调用两个自定义函数将鸟啼和内容写入文本文档
j = 0
f = open("都市超级医圣.txt", "w")
alist = get_novel_chapters()
for i in alist:
    j+=1
    url,title=i
    print("循环爬取第{}次,写入文本文档,标题是:{}".format(j,title))
    f.write(title+"\n"+get_chapters_content(url))
f.close()

猜你喜欢

转载自blog.csdn.net/sunjiaxing_1/article/details/115427596