开始使用的是phantomJS浏览器 但是出现警告,所以换成火狐的无头浏览器,也可以使用谷歌的
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import re
from pyquery import PyQuery as pq
import pymysql
#from config import *
import logging
import sys
#使用selenium +火狐无头浏览器:给webdriver设置参数
firefox_options = Options()
firefox_options.set_headless()
browser = webdriver.Firefox(firefox_options=firefox_options)
#设置请求等待时间
wait = WebDriverWait(browser, 10)
#连接mysql生成接口
conn = pymysql.connect('localhost','root','123456','taobao',charset='utf8')
cur = conn.cursor()
#生成日志文件
logger = logging.getLogger('meishierr')
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
#file_handler = logging.FileHandler("meishi.log")
#file_handler.setFormatter(formatter)
console_hander = logging.StreamHandler(sys.stdout)
console_hander.setFormatter(formatter)
logger.setLevel(logging.ERROR)
#logger.addHandler(file_handler)
logger.addHandler(console_hander)
def search(keywords):
print('正在搜索')
# 使用selenium访问目标网站,通过CSS_SELECTOR找到需要的输入框和按钮,输入并点击
try:
browser.get('https://www.taobao.com')
input = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "#q")))
submit = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,
'#J_TSearchForm > div.search-button > button')))
input.send_keys(keywords)
submit.click()
total = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,'.total')))
get_page()
return total.text
# 如果出现超时异常,重新调用自身
except TimeoutException:
print('###')
return search()
# 找到页面需要输入页码的位置找到css选择器
def next_page(page_num):
print("正在翻页",page_num)
try:
input = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "#mainsrp-pager > div > div > div > div.form > input")))
submit = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,
'#mainsrp-pager > div > div > div > div.form > span.btn.J_Submit')))
# 先清空输入框然后发送页码 并点击
input.clear()
input.send_keys(page_num)
submit.click()
wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR,'#mainsrp-pager > div > div > div > ul > li.item.active > span'),str(page_num)))
get_page()
except TimeoutException:
next_page(page_num)
def get_page():
print('开始获取详情')
# 等待加载完成,获取整个源代码,使用pyquery进行筛选
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,'#mainsrp-itemlist .items .item')))
html = browser.page_source
doc = pq(html)
items = doc('#mainsrp-itemlist .items .item').items()
#遍历出每个信息通过pyquery接口函数对目标参数信息抓取
for item in items:
prodyct ={'image':item.find('.pic .img').attr('src'),
'prince':item.find('.price').text(),
'deal':item.find('.deal-cnt').text()[:-3],
'title':item.find('.title').text(),
'shop':item.find('.shop').text(),
'location':item.find('.location').text()
}
print(prodyct)
save_to_mysql(prodyct)
def save_to_mysql(prodyct):
# 使用SQL语句将目标信息插入到数据库,并提交,并进行异常处理
sql = 'insert into meishi(image,prince,deal,title,shop,location) values(%s,%s,%s,%s,%s,%s);'
parime = (prodyct['image'],prodyct['prince'],prodyct['deal'],prodyct['title'],prodyct['shop'],prodyct['location'])
try:
cur.execute(sql,parime)
conn.commit()
except Exception as e:
logging.error(e)
def main():
keywords = '美食'
total = search(keywords)
total = int(re.search('\d+',total).group(0))
print(total)
# 生成页数
for i in range(2,total+1):
next_page(i)
browser.close()
if __name__ =="__main__":
main()
#logger.removeHandler(file_handler)
logger.removeHandler(console_hander)
cur.close()
conn.close()