爬虫学习之18:使用selenium和chrome-headerless爬取淘宝网商品信息(异步加载网页)

       登录淘宝网,使用F12键观察网页结构,会发现淘宝网也是异步加载网站。有时候通过逆向工程区爬取这类网站也不容易。这里使用selenium和chrome-headerless来爬取。网上有结合selenium和PlantomJS来爬取的,但是最新版的Selenium已经放弃对PlantomJS的支持,所以这里使用chrome-headerless,方法其实差不多,由于selenium可以模拟浏览器行为,所以对这类异步加载的网站爬取起来会更容易些。本实验模拟从浏览器登录淘宝,并搜索淘宝中的口红商品,抓取的内容包括口红的名称、链接、店铺、售价,购买人数和地址信息等,结果存储在MongoDB中。代码如下:

from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from lxml import etree
import time
import pymongo

client = pymongo.MongoClient('localhost',27017)
mydb = client['mydb']
taobao = mydb['taobao']
#driver  =webdriver.PhantomJS()
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
driver = webdriver.Chrome(executable_path='E:/Pyhton_lib/chromedriver_win32/chromedriver.exe', chrome_options=chrome_options)
driver.maximize_window()

def get_info(url,page):
    page = page+1
    driver.get(url)
    driver.implicitly_wait(10)
    selector = etree.HTML(driver.page_source)
    infos = selector.xpath('//div[@class="item J_MouserOnverReq  "]')
    for info in infos:
        goods_url = infos[0].xpath('div[1]/div/div/a/@href')[0]
        #print("goods_url:%s" % (goods_url))
        goods = infos[0].xpath('div[1]/div/div/a/img/@alt')[0]
        print("goods:%s" % (goods))
        price = infos[0].xpath('div[2]/div/div/strong/text()')[0]
        #print("price:%s" % (price))
        sell = infos[0].xpath('div[2]/div/div[@class="deal-cnt"]/text()')[0]
        #print("sell:%s" % (sell))
        shop = infos[0].xpath('div[2]/div[3]/div[1]/a/span[2]/text()')[0]
        #print("shop:%s" % (shop))
        address = infos[0].xpath('div[2]/div[3]/div[2]/text()')[0]
        #print("address:%s" % (address))
        commodity = {
            'goods_url':goods_url,
            'goods':goods,
            'price': price,
            'sell': sell,
            'shop': shop,
            'address': address,
        }
        taobao.insert_one(commodity)
        if page<=100:
            NextPage(url,page)
        else:
            pass

def NextPage(url,page):
    driver.get(url)
    driver.implicitly_wait(10)
    driver.find_element_by_xpath('//a[@trace="srp_bottom_pagedown"]').click()
    time.sleep(4)
    driver.get(driver.current_url)
    driver.implicitly_wait(10)
    get_info(driver.current_url,page)

if __name__=="__main__":
    page=1
    url = 'https://www.taobao.com'
    driver.get(url)
    driver.implicitly_wait(10)
    driver.find_element_by_id('q').clear()
    driver.find_element_by_id('q').send_keys('口红')
    driver.find_element_by_class_name('btn-search').click()
    get_info(driver.current_url,page)

部分结果截图如下:

猜你喜欢

转载自blog.csdn.net/cskywit/article/details/81237359