python爬虫学习第二天,利用BeautifulSoup库和Requests库爬取网站

一开始我是打算拿div里的标签做文章,使用它们的class来寻找内容,但是这样寻找出来的内容存在重复的可能,所以我就直接寻找到了它们的整个table(因为整个html里只有一个table),让在整个table里寻找我们需要的td标签,利用循环找到我们需要的内容(我这里一开始使用的是t.string,但是当td里没有内容是返回的是None,所以我就改用了t.text这样返回的就是一个空字符串)。
这里会直接返回我们的td里的内容,所以并没有修改encoding。

import urllib
import requests
from bs4 import BeautifulSoup
import time
import random

def getip():
proxy_list = [
“115.196.48.33”,
“111.177.189.202”,
“163.204.240.4”,
“123.163.97.249”,
“121.13.252.58”
]
proxy = random.choice(proxy_list) #随机选择一个ip
proxies = {
‘http’: ‘http://’ + proxy,
‘https’: ‘https://’ + proxy
}
return proxies

#随机返回一个请求头
def getheaders():
user_agent_list = [
“Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1”
“Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11”,
“Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6”,
“Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6”,
“Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1”,
“Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5”,
“Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5”,
“Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3”,
“Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3”,
“Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3”,
“Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3”,
“Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3”,
“Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3”,
“Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3”,
“Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3”,
“Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3”,
“Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24”,
“Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24”
]
UserAgent=random.choice(user_agent_list)
headers = {‘User-Agent’: UserAgent}
return headers

for num in range(1,20):
url = “http://www.credithebei.gov.cn:8082/was5/web/detail?record={}&channelid=270264”.format(num)
#res = urllib.request.urlopen(url)
try:
res = requests.get(url, timeout=30, headers=getheaders(),proxies=getip()).text
soup = BeautifulSoup(res, ‘html.parser’)
tabel = soup.find(“table”)
td = tabel.find_all(“td”)
time.sleep(1)
for t in td:
print(t.text)
except:
print(“爬取失败”)

还有问题没有解决(代理ip问题),后续解决会进行更新!!

猜你喜欢

转载自blog.csdn.net/Forrest_Gump_sad/article/details/89359347