Web crawlers crawling Chinese university rankings, and stored in the database

#CrawUnivRanjingA.py
import requests
from bs4 import BeautifulSoup
import bs4
import pymysql

db=pymysql.connect(host="localhost",user="root",password="admin",db="test",port=3306)
print('数据库连接成功')
cursor=db.cursor()
# cursor.execute()
# sql = """CREATE TABLE Daxue (
# 排名 int(3) NOT NULL,
# 学校名称 CHAR(10),
# 总分 float (2),
# 省市 varchar(10))"""





def getHTMLtEXT(url):
try:
r=requests.get(url,timeout=30)
r.raise_for_status()
r.encoding=r.apparent_encoding
return r.text
except:
return ""

def fillUnivList(ulist,html):
a=0
soup=BeautifulSoup(html,"html.parser")
for tr in soup.find('tbody').children:
if isinstance(tr,bs4.element.Tag):
tds=tr('td')
ulist.append([tds[0].string,tds[1].string,tds[3].string,tds[2].string])
paiming=tds[0].text.strip()
xuexiaomingcheng=tds[1].text.strip()
zongfeng=tds[3].text.strip()
shengshi=tds[2].text.strip()
if a<20:
insert_into = ("INSERT INTO Daxue(排名,学校名称,总分,省市)""VALUES(%s,%s,%s,%s)")
data_into=(paiming,xuexiaomingcheng,zongfeng,shengshi)
cursor.execute(insert_into,data_into)
the db.commit ()
A. 1 = +

DEF PrintUnivlist (Ulist, NUM):
TPLT = "{0: <10} \ T {. 1:. 4} {<10} \ {2 T: <10} \ {T. 3: < } 10 "
Print (tplt.format (" rank "," school name "," Total "," provinces "CHR (12288)))
for i in the Range (NUM):
U = Ulist [i]
Print ( tplt.format (U [0], U [. 1], U [2], U [. 3], CHR (12288)))

DEF main ():
uInfo = []
URL = "HTTP: //www.zuihaodaxue. COM / zuihaodaxuepaiming2019.html "
HTML = getHTMLtEXT (URL)
fillUnivList (uInfo, HTML)
PrintUnivlist (uInfo, 20 is)
main ()


after stored in the database:

 

Guess you like

Origin www.cnblogs.com/doudouhaha521/p/11522076.html