爬虫爬取太平洋汽车的评论

#encoding:utf-8
import urllib
import urllib.request
from bs4 import BeautifulSoup
import re
import random
import time
import html5lib

# 设置目标url,使用urllib.request.Request创建请求
url0 = "http://price.pcauto.com.cn/comment/sg3225/"
req0 = urllib.request.Request(url0)

# 使用add_header设置请求头,将代码伪装成浏览器
req0.add_header("User-Agent","Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0")

# 使用urllib.request.urlopen打开页面,使用read方法保存html代码
#html0 = urllib.request.urlopen(req0).read()
html0 = urllib.request.urlopen(req0)

# 使用BeautifulSoup创建html代码的BeautifulSoup实例,存为soup0
#soup0 = BeautifulSoup(html0)
soup0 = BeautifulSoup(html0.read(),"html5lib")
# 获取尾页(对照前一小节获取尾页的内容看你就明白了)
total_page = int(soup0.find("div",class_= "pcauto_page").findAll("a")[-2].get_text())
myfile = open("langyi.txt","a",encoding="utf_8_sig")

for i in list(range(1,total_page+1)):
    # 设置随机暂停时间
    stop = random.uniform(1, 3)
    url = "http://price.pcauto.com.cn/comment/sg3225/p" + str(i) + ".html"
    print(url)
    req = urllib.request.Request(url)
    req.add_header("User-Agent","Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36")
    html = urllib.request.urlopen(req).read()
    soup = BeautifulSoup(html, "lxml")

    #contents = soup.find_all('div', class_="dianPing clearfix")
    contents = soup.find_all('div', class_="conLit youdian")
    contents1 = soup.find_all('div', class_="conLit quedian")
    contents2 = soup.find_all('div', class_="conLit")
    for k in contents:
        myfile.write(k.get_text() + '\n')

    for k in contents1:
        myfile.write(k.get_text()+'\n')

    for k in contents2:
        myfile.write(k.get_text()+'\n')



myfile.close()




猜你喜欢

转载自blog.csdn.net/weixin_38987362/article/details/81737889
今日推荐