python 爬虫 爬取csdn

import urllib2.request
import re.request
from bs4 import BeautifulSoup

def getHtml(url,headers):
    req = urllib2.Request(url,headers=headers)
    page = urllib2.urlopen(req)
    html = page.read()
    return html

def parse(data):
    content = BeautifulSoup(data,'lxml')
    return content

def getReadNums(data,st):
    reg = re.compile(st)
    return re.findall(reg,data)

url = 'http://blog.csdn.net/marksinoberg/article/details/51493318'
headers = {
    'referer':'http://blog.csdn.net/',
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.94 Safari/537.36'
}
i = 0
while i<24:
    html = getHtml(url,headers)
    content = parse(html)
    result = content.find_all('span',class_='link_view')
 #  print (result[0].get_text())
    i = i +1
 

猜你喜欢

转载自blog.csdn.net/qq_36958104/article/details/81298356