【汽车口碑分析】3.爬取汽车评论数据

环境配置

  • Ubuntu 16.04
  • Python 3.5

技术框架

  • Scrapy

需求目标

本项目为汽车口碑分析,第一步需要爬取对于不同车型的评论数据。

选择58车的车型分类爬取评论数据。

爬取流程

  1. 先获取每个车型的链接,以下图中红框内的车型为例

    mark

  2. 打开链接后,抓取下图红框中的总评分,写入文件中。

    mark

  3. 写入总评分后,通过拼接链接进入该车型的用户评论页面。

    通过第一步中获取的链接拼接上list_s1_p1.html,组成用户评论页面的链接。

    【注】此为第一页的链接,若还有下一页,下述步骤会提及处理方法。

    mark

  4. 抓取评论页面中的各种数据,如id评分评论等。

    mark

  5. 若该评论页面还有下一页,则继续抓取下一页中的评论数据。

    【方法】

    判断页面中是否有下一页元素,若有则回调解析评论页面的方法。

  6. 将爬取的数据保存到文件中。

详细步骤

创建新工程

先创建工程目录

cd /home/t/dataset/
mkdir carSpider

创建新工程

scrapy startproject carSpider

编辑items.py文件

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html

import scrapy
class CarspiderItem(scrapy.Item):
    file=scrapy.Field() #文件名
    car=scrapy.Field() #车型
    score=scrapy.Field() #总评分
    u_id=scrapy.Field() #用户ID
    u_score=scrapy.Field() #用户评分
    u_merit=scrapy.Field() #用户评论优点
    u_demerit=scrapy.Field() #用户评论缺点
    u_summary=scrapy.Field() #用户评论综述
    u_flower=scrapy.Field() #用户评论鲜花数
    u_brick=scrapy.Field() #用户评论板砖数

编写carSpider.py文件

import scrapy
from carSpider.items import CarspiderItem

baseDir = '/home/t/dataset/carRemark/'
startUrl='http://www.58che.com/brand.html'

class CarSpider(scrapy.Spider):

    name='spider' #爬虫名
    def __init__(self):
        self.start_urls=[startUrl] 

    #第一层解析方法
    def parse(self,response):
        #定位到车型元素
        subclasses=response.css('body > div.fltop > div.marcenter > div > div > div.r > ul > li > dl > dt > a')
        for subclass in subclasses:
            subclass_name=subclass.xpath('text()').extract_first() #获取车型名称文本
            subclass_link=subclass.xpath('@href').extract_first() #获取车型链接
            yield scrapy.Request(url=subclass_link,callback=self.parse_car_subclass,meta={'file':subclass_name}) #回调下一层解析方法,并把车型名称传递给该方法作为文件名

    #第二层解析方法
    def parse_car_subclass(self,response):
        infos=response.css('#line1 > div.cars_line2.l > div.dianpings > div.d_div1.clearfix > font') #定位到总评分元素
        for info in infos:
            score=info.xpath('text()').extract_first() #获取总评分元素文本
            file=response.meta['file'] #获取上个Request传递来的meta['file']
            self.writeScore(file,score) #将总评分写入文件中
            link=response.url+'list_s1_p1.html' #拼接用户评论第一页链接
            yield scrapy.Request(url=link,callback=self.parse_remark,meta={'file':file}) #回调下一层解析方法,把车型名称传递给该方法作为文件名

    #第三层解析方法
    def parse_remark(self,response):
        #定位到用户评论元素
        infos=response.css('body > div.newbox > div > div.xgo_cars_w760.l > div.xgo_dianping_infos.mb10 > div.xgo_cars_dianping > div > dl')
        for info in infos:
            uid=info.xpath('dd[1]/strong/a/text()')[0].extract() #获取用户ID
            score=info.xpath('dd[1]/div/div/@style')[0].extract() #获取用户评分星级
            score=self.getScore(score) #将用户评分星级转化为5分制评分

            try:
                #先获取是否有‘优点’元素,若有则定位‘优点’元素的下一个兄弟节点,即‘优点评语’,若无则为空
                node=info.xpath('dd[2]/div/div[contains(@class,"l redc00")]')[0] 
                if node is not None:
                    merit=node.xpath('following-sibling::*[1]/text()')[0].extract()
                else:
                    merit=''
            except:
                merit=''


            try:
                #先获取是否有‘缺点’元素,若有则定位‘缺点’元素的下一个兄弟节点,即‘缺点评语’,若无则为空
                node=info.xpath('dd[2]/div/div[contains(@class,"l hei666")]')[0]
                if node is not None:
                    demerit=node.xpath('following-sibling::*[1]/text()')[0].extract()
                else:
                    demerit=''
            except:
                demerit=''

            try:
                #先获取是否有‘综述’元素,若有则定位‘综述’元素的下一个兄弟节点,即‘综述评语’,若无则为空
                node=info.xpath('dd[2]/div/div[contains(@class,"l")]')[0]
                if node is not None:
                    summary=node.xpath('following-sibling::*[1]/text()')[0].extract()
                else:
                    summary=''
            except:
                summary=''

            flower=info.xpath('dd[2]/div[contains(@class,"apply")]/a[3]/span/text()')[0].extract() #获取鲜花数
            brick=info.xpath('dd[2]/div[contains(@class,"apply")]/a[4]/span/text()')[0].extract() #获取板砖数

           #创建Item
            item=CarspiderItem()
            item['file']=response.meta['file']
            item['u_id']=uid
            item['u_score']=score
            item['u_merit']=merit
            item['u_demerit']=demerit
            item['u_summary']=summary
            item['u_flower']=flower
            item['u_brick']=brick

            #生成Item
            yield item

        #获取`下一页`元素,若有则回调`parse_remark`第三层解析方法,即继续获取下一页用户评论数据
        #定位`下一页`元素
        next_pages=response.css('body > div.newbox > div > div.xgo_cars_w760.l > div.xgo_dianping_infos.mb10 > div.xgo_cars_dianping > div > div > a.next')
        for next_page in next_pages:
            #若有`下一页`元素,则拼接`下一页`元素链接,并回调第三层解析方法,用来获取下一页用户评论数据
            if next_page is not None:
                next_page_link=next_page.xpath('@href')[0].extract()
                next_page_link='http://www.58che.com'+next_page_link
                file=response.meta['file']
                yield scrapy.Request(url=next_page_link, callback=self.parse_remark, meta={'file': file})


    #将总评分写入文件
    def writeScore(self,file,score):
        with open('/home/t/dataset/carRemark/'+file+'.json','a+') as f:
            f.write(score+'\n')

    #将用户评分星级转为5分制分数,类似switch功能
    def getScore(self,text):
        text=text.split(':')[1] #分割文本,原文本格式形如`width:100%`,分割并截取`:`后的文本
        return {
            '100%':5,
            '80%':4,
            '60%':3,
            '40%':2,
            '20%':1,
            '0%':0
        }.get(text)

【解析】

        #定位到用户评论元素
        infos=response.css('body > div.newbox > div > div.xgo_cars_w760.l > div.xgo_dianping_infos.mb10 > div.xgo_cars_dianping > div > dl')

此句代码定位的元素如下图所示,定位到的是评论页面每条评论的元素整体。

mark

 for info in infos:
            uid=info.xpath('dd[1]/strong/a/text()')[0].extract() #获取用户ID
            score=info.xpath('dd[1]/div/div/@style')[0].extract() #获取用户评分星级
            score=self.getScore(score) #将用户评分星级转化为5分制评分

uid定位到的元素如下图所示,

mark

score定位到的元素如下图所示,获取score元素的style属性,值形如width:80%,需要通过getScore()方法转换为五分制分数。

mark

try:
    #先获取是否有‘优点’元素,若有则定位‘优点’元素的下一个兄弟节点,即‘优点评语’,若无则为空
    node=info.xpath('dd[2]/div/div[contains(@class,"l redc00")]')[0] 
    if node is not None:
        merit=node.xpath('following-sibling::*[1]/text()')[0].extract()
    else:
        merit=''
except:
    merit=''

先定位是否有优点元素,如下图红框所示,若有该元素,则获取优点元素的下一个兄弟节点内容,如下图蓝框所示,若无则为空。

mark

#获取`下一页`元素,若有则回调`parse_remark`第三层解析方法,即继续获取下一页用户评论数据
#定位`下一页`元素
next_pages=response.css('body > div.newbox > div > div.xgo_cars_w760.l > div.xgo_dianping_infos.mb10 > div.xgo_cars_dianping > div > div > a.next')
    for next_page in next_pages:
        #若有`下一页`元素,则拼接`下一页`元素链接,并回调第三层解析方法,用来获取下一页用户评论数据
        if next_page is not None:
            next_page_link=next_page.xpath('@href')[0].extract()
             next_page_link='http://www.58che.com'+next_page_link
             file=response.meta['file']
             yield scrapy.Request(url=next_page_link, callback=self.parse_remark, meta={'file': file})

解析完上述内容,判断用户评论页面是否有分页,定位是否有下一页元素,如下图红框所示,若有则获取该元素链接,如下图橙框所示。

获取之后,回调parse_remark方法解析下一页的评论页面。

mark

编辑pipelines.py文件

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html

import json
import codecs

baseDir = '/home/t/dataset/carRemark/'
class CarspiderPipeline(object):
    def process_item(self, item, spider):
        print(item['file'])
        with codecs.open(baseDir+item['file']+'.json','a+',encoding='utf-8') as f:
            line=json.dumps(dict(item),ensure_ascii=False)+'\n'
            f.write(line)

        return item

编辑settings.py文件

# -*- coding: utf-8 -*-

# Scrapy settings for carSpider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     http://doc.scrapy.org/en/latest/topics/settings.html
#     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'carSpider'

SPIDER_MODULES = ['carSpider.spiders']
NEWSPIDER_MODULE = 'carSpider.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'carSpider (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'carSpider.middlewares.CarspiderSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    'carSpider.middlewares.MyCustomDownloaderMiddleware': 543,
#}

# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
    'carSpider.pipelines.CarspiderPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
HTTPCACHE_ENABLED = False
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

【解析】

ROBOTSTXT_OBEY = False

将原来的True改为False

ITEM_PIPELINES = {
    'carSpider.pipelines.CarspiderPipeline': 300,
}

将原来的注释去掉,即注册pipelines,否则无法使用该pipelines。

运行爬虫

在项目根目录下新建文件entrypoint.py

mark

from scrapy.cmdline import execute
execute(['scrapy','crawl','spider'])

项目源码

Github地址

猜你喜欢

转载自blog.csdn.net/tiweeny/article/details/79277889