python3利用scrapy_redis将pos后台数据包含价格、规格、起订量、销售区域等信息全部保存到excel中

 1、创建Scrapy项目

scrapy startproject PosProductRedis

2.进入项目目录,使用命令genspider创建Spider

scrapy genspider posproductredis XXXX.com

 3、定义要抓取的数据(处理items.py文件)

# -*- coding: utf-8 -*-
import scrapy

class PosproductredisItem(scrapy.Item):
    # 获取序号
    number_list = scrapy.Field()
    # 获取ID
    id_list = scrapy.Field()
    # 获取商家名称
    qiye_list = scrapy.Field()
    # 获取分类
    product_list = scrapy.Field()
    # 获取产品名称
    product_name_list = scrapy.Field()
    # 获取销售情况
    sale_list = scrapy.Field()
    # 销售标题(有空格)
    sales_title = scrapy.Field()
    # 获取销售区域
    sales_area = scrapy.Field()
    # 获取规格
    product_size = scrapy.Field()
    # 获取起订量
    product_quantity = scrapy.Field()
    # 获取零售价
    retail_price = scrapy.Field()
    # 获取零售促销价
    promotion_price = scrapy.Field()
    # 获取skuid,可以不写
    # skuid = scrapy.Field()

4、编写提取item数据的Spider(在spiders文件夹下:posproductredis.py) 

# -*- coding: utf-8 -*-
# 利用scrapy_redis将pos后台数据包含价格、规格、起订量、销售区域等信息全部保存到excel中
import scrapy
from PosProductRedis.items import PosproductredisItem
from scrapy_redis.spiders import RedisSpider
import re

class PosproductredisSpider(RedisSpider):
    name = 'posproductredis'
    allowed_domains = ['XXXX.com']
    redis_key = "PosproductredisSpider:start_urls"
    # lpush PosproductredisSpider:start_urls https://pos.XXX.com/item/itemonlist.html?d-49489-p=1

    login_page = "https://pos.XXXX.com/login.html"

    def start_requests(self):
        yield scrapy.Request(url=self.login_page,callback=self.login)

    def login(self, response):
        self.username = input("请输入账号:")
        self.password = input("请输入密码:")
        yield scrapy.FormRequest.from_response(
            response,
            formdata={"j_username":self.username, "j_password":self.password},
            callback = self.parse_page
        )
    # 获取登录成功的状态,访问需要登录后才能访问的页面
    def parse_page(self, response):
        if "loginerror" in response.body.decode('utf-8'):
            print("登录失败,错误的手机号或密码!")
        if "</span>首页" in response.body.decode('utf-8'):
            print("欢迎您'%s',成功登录POS管理系统!" % (self.username))
            print("请在slaver端(爬虫程序执行端)输入:lpush %s 爬取列表页网址"%(self.redis_key))
            # 登录成功后获取在线产品的列表页,并回调parse()函数处理数据
            # yield scrapy.Request(response.url, callback=self.parse)
    def parse(self, response):
        # print("数据处理中......")
        items =[]
        # 获取下一页的链接地址,列表,需要和“https://pos.XXXX.com/item/itemonlist.html”进行拼接
        next_url_list = response.xpath('//body//div//div/span/span[@class="paginate_button"]/a/@href').extract()
        for each in response.xpath('//div[@class="dataTables_wrapper"]'):
            # 序号
            number_list = each.xpath('.//td[1]/text()').extract()
            # 获取ID
            id_list = each.xpath('.//tbody//tr//td//input[@onclick="homeShow(this)"]/@value').extract()
            # 获取商家名称
            qiye_list = each.xpath('.//td[2]/text()').extract()
            # 获取分类
            product_list = each.xpath('.//td[4]/text()').extract()
            # 获取产品名称
            product_name_list = each.xpath('.//td[3]/a/text()').extract()
            for i in range(len(id_list)):
                item = PosproductredisItem()
                item['number_list'] = number_list[i].strip()
                item['id_list'] = id_list[i]
                item['qiye_list'] = qiye_list[i].strip()
                item['product_list'] = product_list[i].strip()
                item['product_name_list'] = product_name_list[i].strip()
                # yield item
                items.append(item)
        for item in items:
            id_url = "https://pos.XXXX.com/item/showitem.html?item.id="+ item['id_list']
            yield scrapy.Request(url=id_url,meta={'meta_1':item},callback=self.parse_id)
        pattern = re.compile(r"/?d-49489-p=(\d+)")
        for url in next_url_list:
            i =pattern.search(url).group(1)
            print("第%s页数据处理中...."%i)
            fullurl = 'https://pos.XXXX.com/item/itemonlist.html'+ str(url)
            yield scrapy.Request(url=fullurl,callback=self.parse)
    # 处理id链接,获取价格、规格、起订量等信息
    def parse_id(self,response):
        # 提取每次response的meta数据
        meta_1 = response.meta['meta_1']
        # print("meta_1",meta_1)
        item = PosproductredisItem()
        # 获取销售标题
        sales_title = response.xpath('//div[@id="tabs-1"]/p[8]/span[@class="field"]/text()').extract()
        # 获取销售情况(有空格)
        sale_list =  response.xpath('//div[@id="tabs-1"]/p[6]/span/text()').extract()
        # 获取销售区域
        sales_area = response.xpath('//div[@id="tabs-6"]/table/tbody[@id="review_list"]/tr/td[2]/text()').extract()
        # 获取规格
        product_size = response.xpath('//div[@id="tabs"]/div[@id="tabs-5"]/table/tbody/tr/td[1]/text()').extract()
        # 获取起订量
        product_quantity = response.xpath('//div[@id="tabs"]/div[@id="tabs-5"]/table/tbody/tr/td[8]/text()').extract()
        # 获取规格对应的skuid号码》》》javascript:show('688')
        skuid_list = response.xpath('//div[@id="tabs"]/div[@id="tabs-5"]/table/tbody/tr/td[9]/a/@href').extract()
        # 对skuid_list结果进行正则匹配出数字
        pattent = re.compile("\d+")
        # items = []
        # 有规格必然有起订量,这两个是一一对应,并且是必填项不可能为空,只要是在线的产品都会有规格和起订量
        for i in range(len(product_size)):
            # items = []
            item['product_size'] = product_size[i]
            item['product_quantity'] = product_quantity[i]
            # 如果是多个销售区域,那么用分号隔开
            if len(sales_area)>1:
                item['sales_area'] = ";".join(sales_area)
            elif len(sales_area) == 1:
                area_list ="北京市,天津市,河北省,山西省,内蒙古,辽宁省,吉林省,黑龙江省,上海市,江苏省,浙江省,安徽省,福建省,江西省,山东省,河南省,湖北省,湖南省,广东省,广西,海南省,重庆市,四川省,贵州省,云南省,西藏,陕西省,甘肃省,青海省,宁夏,新疆"
                area_list2 = "北京市,天津市,河北省,山西省,内蒙古,辽宁省,吉林省,黑龙江省,上海市,江苏省,浙江省,安徽省,福建省,江西省,山东省,河南省,湖北省,湖南省,广东省,广西,海南省,重庆市,四川省,贵州省,云南省,西藏,陕西省,甘肃省,青海省,宁夏,新疆省"
                full_area_list = "北京市,天津市,河北省,山西省,内蒙古,辽宁省,吉林省,黑龙江省,上海市,江苏省,浙江省,安徽省,福建省,江西省,山东省,河南省,湖北省,湖南省,广东省,广西,海南省,重庆市,四川省,贵州省,云南省,西藏,陕西省,甘肃省,青海省,宁夏,新疆,台湾省,香港,澳门"
                if sales_area[0] == area_list or sales_area[0] == area_list2:
                    item['sales_area'] = "全国(不含港澳台)"
                elif sales_area[0] == full_area_list:
                    item['sales_area'] = "全国"
                else:
                    item['sales_area'] = sales_area[0]
            else:
                item['sales_area'] = "无区域"

            item['sales_title'] = sales_title[0].strip()
            item['sale_list'] = sale_list[0].strip()
            item['number_list'] = meta_1['number_list']
            item['id_list'] = meta_1['id_list']
            item['qiye_list'] = meta_1['qiye_list']
            item['product_list'] = meta_1['product_list']
            item['product_name_list'] = meta_1['product_name_list']
            # items.append(item)
            # 提取javascript:show('688')里面skuid号码
            skuid_number = pattent.search(skuid_list[i]).group()
            # 可以把skuid保存下来,这里无用就不保存了
            # item['skuid'] = skuid_number
            skuid_url = "https://pos.XXXX.com/item/showitemprice.html?sku.id="+ skuid_number
            yield scrapy.Request(url=skuid_url,meta={'meta_2':item},callback=self.parse_skuid)

    def parse_skuid(self,response):
        # 提取每次response的meta数据
        meta_2 = response.meta['meta_2']
        item = PosproductredisItem()
        # 零售价,将重复的价格筛选掉,用set去掉重复项,并转换为列表
        retail_price_list = response.xpath('//div[@id="tabs-1"]/table[@id="item"]/tbody/tr/td[2]/text()').extract()
        retail_price = list(set(retail_price_list))
        for i in range(len(retail_price)):
            if retail_price[i] == "0.0":
                retail_price[i] = '零售价待定'
            elif retail_price[i] == "0.00":
                retail_price[i] = '零售价数据0.00有误'
        # 如果是多个价格,用分号隔开
        if len(retail_price)>1:
            item['retail_price'] = ";".join(retail_price)
        elif len(retail_price) ==1:
            item['retail_price'] = retail_price[0]

        # 获取零售促销价,将重复的促销价筛选掉,用set去掉重复项,并转换为列表
        promotion_price_list = response.xpath('//div[@id="tabs-1"]/table[@id="item"]/tbody/tr/td[3]/text()').extract()
        promotion_price = list(set(promotion_price_list))
        for i in range(len(promotion_price)):
            if promotion_price[i] == "0.0":
                promotion_price[i] = '无促销价'
            elif promotion_price[i] == "0.00":
                promotion_price[i] = '促销价数据0.00有误'
        # 如果是多个促销价格,用分号隔开
        if len(promotion_price)>1:
            item['promotion_price'] = ";".join(promotion_price)
        elif len(promotion_price) ==1:
            item['promotion_price'] = promotion_price[0]

        item['number_list'] = meta_2['number_list']
        # item['skuid'] = meta_2['skuid']
        item['id_list'] = meta_2['id_list']
        item['qiye_list'] = meta_2['qiye_list']
        item['product_list'] = meta_2['product_list']
        item['product_name_list'] = meta_2['product_name_list']
        item['sales_title'] = meta_2['sales_title']
        item['sale_list'] = meta_2['sale_list']
        item['product_size'] = meta_2['product_size']
        item['product_quantity'] = meta_2['product_quantity']
        item['sales_area'] = meta_2['sales_area']
        yield item

5.处理pipelines管道文件保存数据,可将结果保存到文件中(pipelines.py)

# -*- coding: utf-8 -*-
import json
from openpyxl import Workbook
import time

# 转码操作
class MyEncoder(json.JSONEncoder):
    def default(self, o):
        if isinstance(o, bytes):
            return str(o, encoding='utf-8')
        return json.JSONEncoder.default(self, o)

class PosproductredisPipeline(object):
    def __init__(self):
        self.wb = Workbook()
        self.ws = self.wb.active
        # 创建表头
        self.ws.append(['序号', 'ID', '商家名称', '产品分类',
                        '产品名称', '销售标题','销售情况', '零售价',
                        '促销价', '规格', '起订量', '销售区域'
                        ])

    def process_item(self, item, spider):
        text = [item['number_list'], item['id_list'], item['qiye_list'], item['product_list'],
                item['product_name_list'], item['sales_title'], item['sale_list'], item['retail_price'],
                item['promotion_price'], item['product_size'], item['product_quantity'], item['sales_area']]
        self.ws.append(text)
        return item

    def close_spider(self, spider):
        # 给保存的文件名字加上个当天的日期年月日
        file_end_name = time.strftime("%Y-%m-%d", time.localtime())
        self.wb.save("pos_product_redis"+file_end_name+'.xlsx')
        print("数据处理完成,谢谢使用!")

6.配置settings文件(settings.py)

# 使用scrapy-redis里的去重组件,不再使用scrapy默认的去重
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
# 使用了scrapy-redis里的调度器组件,不再使用scrapy默认的调度器
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
# 允许暂停,redis请求记录不丢失
SCHEDULER_PERSIST = True
 
# 不写默认存储到本地数据库
# REDIS_HOST = "192.168.0.109"
# REDIS_PORT = 6379
 
# 默认的scrapy-redis请求队列形式
SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderPriorityQueue"
# 队列形式,先进先出,选这个会报错:Unhandled error in Deferred
# SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderQueue"
# 栈形式,先进后出
#SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderStack"
 
# Configure item pipelines去掉下面注释,打开管道文件,添加RedisPipeline
ITEM_PIPELINES = {
   'PosProductRedis.pipelines.PosproductredisPipeline': 300,
    'scrapy_redis.pipelines.RedisPipeline': 400,
}
  
 
# Obey robots.txt rules,具体含义参照:https://blog.csdn.net/z564359805/article/details/80691677        
ROBOTSTXT_OBEY = False    
        
# Override the default request headers:添加User-Agent信息        
DEFAULT_REQUEST_HEADERS = {        
  'User-Agent': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0);',        
  # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',        
  # 'Accept-Language': 'en',        
}    
         
# 还可以将日志存到本地文件中(可选添加设置)        
LOG_FILE = "posproductredis.log"       
LOG_LEVEL = "DEBUG"
# 包含print全部放在日志中
LOG_STDOUT = True

7.参照以下链接打开redis数据库:

https://blog.csdn.net/z564359805/article/details/80808155

8.以上设置完毕,进行爬取:进入到spiders文件夹下执行项目命令,启动Spider:

scrapy runspider posproductredis.py

9.在Master端(核心服务器)的redis-cli输入push指令,参考格式:

输入:lpush PosproductredisSpider:start_urls https://pos.XXXX.com/item/itemonlist.html?d-49489-p=1

猜你喜欢

转载自blog.csdn.net/z564359805/article/details/81393029