scrapy爬取阳光政务投诉

先说好,本博客都是自己练手的,没有任何商业化什么的,如果要求删除请私聊,看到后会第一时间删掉,不要发律师函,谢谢,鸡你太美

yg.py

# -*- coding: utf-8 -*-
import scrapy
from yangguang.items import YangguangItem	#item文件里定义了几个参数,下面放items.py

class YgSpider(scrapy.Spider):
    name = 'yg'						#爬虫名字
    allowed_domains = ['sun0769.com']			#限制url
    start_urls = ['http://wz.sun0769.com/index.php/question/questionType?type=4&page=0']			#初始url

    def parse(self, response):
        tr_list = response.xpath('//div[@class="greyframe"]/table[2]//tr//tr')
        for tr in tr_list:
            item = YangguangItem()			#实例化
            item["title"] = tr.xpath('./td[2]/a[2]/@title').extract_first()
            item["href"] = tr.xpath('./td[2]/a[2]/@href').extract_first()
            item["publish_data"] = tr.xpath("./td[5]/text()").extract_first()

            yield scrapy.Request(					#request对象,将item["href"]和meta字典传递给parse_href中
                item["href"],
                callback = self.parse_href,		#callback是定义下一个将要使用的方法
                meta = {"item":item}				#meta必须为字典,作用是传递数据给下一个需要调用的地方
            )

        # next_url,获取详情页的具体信息
        next_url = response.xpath('//a[text()='>']/@href').extract_first()		#获取下一页的链接
        if next_url is not None:
            yield scrapy.Request(				#next_url传递到parse中
                next_url,
                callback=self.parse
            )


    def parse_href(self,response):	
        item = response.meta["item"]			#接受meta中数据
        item["content"] = response.xpath('//td[@class="txt16_3"]//text()').extract()
        item["content_img"] = response.xpath('//td[@class="txt16_3"]//img/@src').extract()
        item["content_img"] = ['http://wz.sun0769.com' + i for i in item["content_img"]]
        yield item

items.py

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html

import scrapy


class YangguangItem(scrapy.Item):
    title = scrapy.Field()
    href = scrapy.Field()
    publish_data = scrapy.Field()
    content_img = scrapy.Field()
    content = scrapy.Field()
    _id = scrapy.Field()

pipelines.py

import  re

class YangguangPipeline(object):
    def process_item(self, item, spider):
        item["content"] = self.process_content(item["content"])		#处理一下content中的数据格式
        return item

    def process_content(self,content):
        content = [re.sub(r"\r\n|\xa0|\t|\s*","",i) for i in content]		#把\r\n|\xa0|\t|\s*替换成‘’
        while '' in content:
            content.remove('')												#删掉‘’
        content = ''.join(content)							#拼接内容
        return content

猜你喜欢

转载自blog.csdn.net/dh0805dh/article/details/90029799