http://www.allitebooks.org/
I've seen most conscientious website, download all the books free
weekend boredom, try to collect all this station Pdf books.
Use of technology
- python3.5
Beautiful soup
Share Code
The easiest reptiles, do not think too much of the fault-tolerant, it is recommended that you try the time, be gentle, do not take this out and hung up the conscience of the site
# www.qingmiaokeji.cn 30
from bs4 import BeautifulSoup
import requests
import json
siteUrl = 'http://www.allitebooks.org/'
def category():
response = requests.get(siteUrl)
# print(response.text)
categoryurl = []
soup = BeautifulSoup(response.text,"html.parser")
for a in soup.select('.sub-menu li a'):
categoryurl.append({'name':a.get_text(),'href':a.get("href")})
return categoryurl
def bookUrlList(url):
# urls = []
response = requests.get(url['href'])
soup = BeautifulSoup(response.text,"html.parser")
a = soup.select(".pagination a[title='Last Page →']")
nums = 0
for e in a:
nums = int(e.get_text())
# print(e.get_text())
for i in range(1,nums+1):
# print(url+"page/"+str(i))
# urls.append(url+"page/"+str(i))
bookList(url['href']+"page/"+str(i))
def bookList(url):
response = requests.get(url)
soup = BeautifulSoup(response.text,"html.parser")
article = soup.select(".main-content-inner article .entry-title a")
for i in article:
url = i.get("href")
getBookDetail(url)
def getBookDetail(url):
response = requests.get(url)
soup = BeautifulSoup(response.text,"html.parser")
title = soup.select(".single-title")[0].text
imgurl = soup.select(".entry-body-thumbnail .attachment-post-thumbnail")[0].get("src")
downLoadPdfUrl = soup.select(".download-links a")[0].get("href")
with open('d:/booklist.txt', 'a+',encoding='utf-8') as f:
f.write(title+" | ![]("+imgurl+") | "+ downLoadPdfUrl+"\n")
if __name__ == '__main__':
list = category()
for url in list:
bookUrlList(url)