from selenium import webdriver from selenium.common.exceptions import TimeoutException, StaleElementReferenceException from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from pyquery import PyQuery as pq browser=webdriver.Chrome() def search(): try: browser.get('https://www.jd.com/') input=WebDriverWait(browser,10).until( EC.presence_of_element_located((By.CSS_SELECTOR,'#key')) ) submit=WebDriverWait(browser,10).until( EC.element_to_be_clickable((By.CSS_SELECTOR,'#search > div > div.form > button > i')) ) input.send_keys("佩奇") submit.click() total_pages=WebDriverWait(browser,10).until( EC.presence_of_element_located((By.CSS_SELECTOR,'#J_bottomPage > span.p-skip > em:nth-child(1) > b')) ) get_product_media() pages=int(total_pages.text) return pages except TimeoutException: search() def search_page(number): try: input = WebDriverWait(browser, 20).until( EC.presence_of_element_located((By.CSS_SELECTOR, '#J_bottomPage > span.p-skip > input')) ) submit = WebDriverWait(browser, 20).until( EC.element_to_be_clickable((By.CSS_SELECTOR, '#J_bottomPage > span.p-skip > a')) ) input.clear() input.send_keys(number) submit.click() get_product_media() # WebDriverWait(browser, 10).until( # EC.text_to_be_present_in_element((By.CSS_SELECTOR,'#J_bottomPage > span.p-num > a.curr'),str(number)) # ) except StaleElementReferenceException: search_page(number) def get_product_media(): # try: WebDriverWait(browser, 10).until( EC.presence_of_element_located((By.CSS_SELECTOR,'#J_goodsList .gl-item .p-img')) ) html=browser.page_source doc=pq(html) items=doc('#J_goodsList .gl-i-wrap ').items() for item in items: product={ 'image': item.find('.p-img').attr('src'), # 'price': item.find('.p-price').text() # 'image': item.find('.p-img a img').attr('data-lazy-img') } print(product) # print(item) def main(): pages=search() print(type(pages)) for i in range(2,pages+1): search_page(i) if __name__ == '__main__': main()
运行的时候如果弹出的chrome不是全屏模式,翻页会不能运行。。。
另:一直无法解析到正确的src,直到看了https://www.cnblogs.com/airnew/p/10101698.html,发现把html = browser.page_source.replace('xmlns', 'another_attr'),后就可以正确解析了replace('xmlns', 'another_attr')这是什么意思