python爬虫Day2:selenium选择器

''''''
from selenium import webdriver  # web驱动
from selenium.webdriver.common.keys import Keys  # 键盘按键操作
import time

import time

driver = webdriver.Chrome()

try:

    # 隐式等待: 需要在get之前调用
    # 等待任意元素加载10秒
    driver.implicitly_wait(10)

    driver.get('https://www.baidu.com/')

    # 显式等待: 需要在get之后调用
    time.sleep(5)

    '''
    ===============所有方法===================
        element是查找一个标签
        elements是查找所有标签
    '''
    # 自动登录百度 start
    # 1、find_element_by_link_text # 通过链接文本去找
    login_link = driver.find_element_by_link_text('登录')
    login_link.click()  # 点击登录

    time.sleep(1)

    # 2、find_element_by_id # 通过id去找
    user_login = driver.find_element_by_id('*****************')
    user_login.click()

    time.sleep(1)

    # 3、find_element_by_class_name
    user = driver.find_element_by_class_name('pass-text-input-userName')
    user.send_keys('*****')

    # 4、find_element_by_name
    pwd = driver.find_element_by_name('password')
    pwd.send_keys('*****')

    submit = driver.find_element_by_id('*********')
    submit.click()
    # end

    # 5、find_element_by_partial_link_text
    # 局部链接文本查找
    login_link = driver.find_element_by_partial_link_text('登录')
    login_link.click()

    # 6、find_element_by_css_selector
    # 根据属性选择器查找元素
    # .: class
    # #: id
    login2_link = driver.find_element_by_css_selector('.*********')
    login2_link.click()

    # 7、find_element_by_tag_name
    div = driver.find_elements_by_tag_name('div')
    print(div)


    time.sleep(20)

finally:
    # 关闭浏览器释放操作系统资源
    driver.close()

  

猜你喜欢

转载自www.cnblogs.com/Auraro997/p/11119939.html
今日推荐