利用MySql数据库接受维基百科信息

from bs4 import  BeautifulSoup
from urllib  import  urlopen
from bs4 import  BeautifulSoup
import re
import datetime
import  random
import  pymysql

conn  = pymysql.connect(host='localhost',user='root',passwd='001',db='mysql',charset='utf8' )

cur = conn.cursor()
cur.execute('USE scraping')
random.seed(datetime.datetime.now())

def store(title,content):
	cur.execute("INSERT INTO pages (title,content) VALUES  (\"%s\",\"%s\")",(title,content))
	cur.connection.commit()
	
def getLinks(articleUrl):
	html=urlopen("http://en.wikipedia.org"+articleUrl)
	bsObj = BeautifulSoup(html)
	title = bsObj.find("h1").get_text()
	content = bsObj.find("div",{"id":"mw-content-text"}).find("p").get_text()
	store(title,content)
	return bsObj.find("div",{"id":"bodyContent"}).findAll("a",href = re.compile("^(/wiki/)((?!:).)*$"))
	
links = getLinks("/wiki/Kevin_Bacon") 
try:
	while len(links)>0:
		newArticle = links[random.randint(0,len(links)-1)].attrs["href"]
		print(newArticle)
		links = getLinks(newArticle)
finally:
	cur.close()
	conn.close()		

猜你喜欢

转载自blog.csdn.net/lzh_86/article/details/79547541