PYTHON爬虫(实战二)

用selenium爬取淘宝的商品信息

# -*- coding: utf-8 -*-
import re
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from pyquery import PyQuery as pq
import config
import pymongo


options = Options()
options.add_argument('-headless')  # 无头参数
#driver = Firefox(executable_path='geckodriver', firefox_options=options)
brower = webdriver.Chrome(chrome_options=options)
wait = WebDriverWait(brower, 10)
client = pymongo.MongoClient(config.MONGO_URL)
db = client[config.MONGO_DB]


def search():
    try:
        brower.get('https://www.taobao.com/')
        element = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "#q"))
        )
        submit = wait.until(
            EC.element_to_be_clickable((By.CSS_SELECTOR, "#J_TSearchForm > div.search-button > button"))
        )
        element.send_keys('美食')
        submit.click()
        page = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "#mainsrp-pager > div > div > div > div.total"))
        )
        return page
    except TimeoutException:
        return search()


def next_page(page_number):
    try:
        input = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "#mainsrp-pager > div > div > div > div.form > input"))
        )
        submit = wait.until(
            EC.element_to_be_clickable((By.CSS_SELECTOR, "#mainsrp-pager > div > div > div > div.form > span.btn.J_Submit"))
        )
        input.clear()
        input.send_keys(page_number)
        submit.click()
        wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR, "#mainsrp-pager > div > div > div > ul > li.item.active > span"),str(page_number)))
        get_products()
    except TimeoutException:
        next_page(page_number)


def get_products():
    wait.until(
        EC.presence_of_element_located((By.CSS_SELECTOR, "#mainsrp-itemlist .items .item"))
    )
    html = brower.page_source
    doc = pq(html)
    items = doc('#mainsrp-itemlist .items .item').items()
    for item in items:
        product = {
            'pic':'https:' + item.find('.pic .img').attr('src'),
            'price':item.find('.price').text().replace('\n', ''),
            'count':item.find('.deal-cnt').text()[:-3],
            'title':item.find('.title').text().replace('\n', ''),
            'shop':item.find('.shop').text(),
            'city':item.find('.location').text()
        }
        #print(product)
        save_to_mongo(product)


def save_to_mongo(result):
    try:
        if db[config.MONGO_TABLE].insert(result):
            print('save to mongo success')
    except Exception:
        print('fail to save to mongo',result)


def main():
    try:
        total = search().text
        pattern = re.compile('(\d+)')
        total = int(re.search(pattern, total).group(1))
        for i in range(2, total+1):
            next_page(i)
    except Exception:
        print('error')
    finally:
        brower.close()


if __name__ == '__main__':
    main()

selenium 和 phantomJS 分手此处使用 Chrome 的无头模式

猜你喜欢

转载自blog.csdn.net/stanwuc/article/details/82115063