淘宝天猫多线程爬虫

# -*- coding:utf-8 -*-
import requests
from queue import Queue
import threading
import re
import pandas as pd


class Thread_crawl(threading.Thread):
    '''
    抓取线程类
    '''

    def __init__(self, threadID, q):
        threading.Thread.__init__(self)
        self.threadID = threadID
        self.q = q

    def run(self):
        print("Starting " + self.threadID)
        self.taobao_spider()
        print("Exiting ", self.threadID)

    def taobao_spider(self):
        # page = 1
        while True:
            if self.q.empty():
                break
            else:
                page = self.q.get()
                print('taobao_spider=', self.threadID, ',page=', str(page))
                url = 'https://s.taobao.com/search?q=T%E6%81%A4%E7%94%B7&filter_tianmao=tmall&s=' + str(page)
                session = requests.Session()
                session.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36'
                # 多次尝试失败结束、防止死循环
                timeout = 4
                while timeout > 0:
                    timeout -= 1
                    try:
                        content = session.get(url)
                        data_queue.put(content.text)
                        break
                    except Exception as e:
                        print('taobao_spider', e)
                if timeout < 0:
                    print('timeout', url)


class Thread_Parser(threading.Thread):
    '''
    页面解析类;
    '''

    def __init__(self, threadID, queue, lock):
        threading.Thread.__init__(self)
        self.threadID = threadID
        self.queue = queue
        self.lock = lock

    def run(self):
        print('starting ', self.threadID)
        global total, exitFlag_Parser
        while not exitFlag_Parser:
            try:
                item = self.queue.get(False)
                if not item:
                    pass
                self.parse_data(item)
                self.queue.task_done()
                print('Thread_Parser=', self.threadID, ',total=', total)
            except:
                pass
        print('Exiting ', self.threadID)

    def parse_data(self, item):
        global total, taobao_files
        json_file = re.findall(r'"auctions":(.*?),"recommendAuctions"', item)[0]

        with self.lock:
            if len(json_file):
                table = pd.read_json(json_file)
                # print(table.head(2))
                taobao_files = pd.concat([taobao_files, table], axis=0, ignore_index=True)

        with self.lock:
            total += 1


data_queue = Queue()
exitFlag_Parser = False
lock = threading.Lock()
total = 0
taobao_files = pd.DataFrame()


def main():
    # 初始化网页页码page从1-10个页面
    pageQueue = Queue(50)
    first_page = 0
    for page in range(0, 10):
        pageQueue.put(first_page)
        first_page += 44

    # 初始化采集线程
    crawlthreads = []
    crawlList = ["crawl-1", "crawl-2", "crawl-3"]

    for threadID in crawlList:
        thread = Thread_crawl(threadID, pageQueue)
        thread.start()
        crawlthreads.append(thread)

    # 初始化解析线程parserList
    parserthreads = []
    parserList = ["parser-1", "parser-2", "parser-3"]
    # 分别启动parserList
    for threadID in parserList:
        thread = Thread_Parser(threadID, data_queue, lock)
        thread.start()
        parserthreads.append(thread)

    # 等待队列清空
    while not pageQueue.empty():
        pass

    # 等待所有线程完成
    for t in crawlthreads:
        t.join()

    while not data_queue.empty():
        pass
    # 通知线程是时候退出
    global exitFlag_Parser
    exitFlag_Parser = True

    for t in parserthreads:
        t.join()
    print("Exiting Main Thread")

    taobao_files.to_excel('taobao.xlsx', index=None)


if __name__ == '__main__':
    main()

猜你喜欢

转载自blog.csdn.net/weixin_40958757/article/details/80339318
今日推荐