Selenium other operations/xpath usage/action chain/coding platform

selenium waits for element to load

# 程序执行速度很快---》获取标签---》标签还没加载好---》直接去拿会报错

# 显示等待:当你要找一个标签的时候,给它加单独加等待时间
# 隐士等待:只要写一行,代码中查找标签,如果标签没加载好,会自动等待
	browser.implicitly_wait(10)

selenium element operation

# 输入框输入内容,删除内容
	tag.send_keys(写文字)
    tag.clear()
# 按钮点击
	tag.click
    

Selenium executes js

#1  其实再页面中,可能有些变量,全局的,直接可以把变量打印出来
#2 js操作页面

from selenium import webdriver
import time
bro = webdriver.Chrome()
bro.get('https://www.pearvideo.com/')
bro.implicitly_wait(10)

# bro.execute_script('alert(urlMap.loginUrl);')
# 获取当前页面cookie
# bro.execute_script('alert(document.cookie)')
# bro.execute_script('alert(window.location)')


bro.execute_script('scrollTo(0,document.documentElement.scrollHeight)')
# 可以干的事
# 	-获取当前访问的地址  window.location
#     -打开新的标签
#     -滑动屏幕--》bro.execute_script('scrollTo(0,document.documentElement.scrollHeight)')
#     -获取cookie,获取定义的全局变量

time.sleep(5)
bro.close() # 关闭选项卡
bro.quit()  # 关闭页面

selenium switch tab

from selenium import webdriver
import time
bro = webdriver.Chrome()
bro.get('https://www.pearvideo.com/')
bro.implicitly_wait(10)
print(bro.window_handles)
# 开启选项卡
bro.execute_script('window.open()')
# 获取出所有选项卡

bro.switch_to.window(bro.window_handles[1]) # 切换到某个选项卡
bro.get('http://www.taobao.com')

time.sleep(2)
bro.switch_to.window(bro.window_handles[0]) # 切换到某个选项卡
bro.get('http://www.baidu.com')

time.sleep(2)
bro.close() # 关闭选项卡
bro.quit()  # 关闭页面

Selenium forward and backward, exception handling

bro.back()
time.sleep(2)
bro.forward()

selenium login cnblogs

from selenium import webdriver
from selenium.webdriver.common.by import By
import time
import json

##### 先登录
# bro = webdriver.Chrome()
# bro.get('https://www.cnblogs.com/')
# bro.implicitly_wait(5)
# bro.maximize_window()
# login_btn = bro.find_element(By.LINK_TEXT, '登录')
# login_btn.click()
# time.sleep(2)
# username = bro.find_element(By.CSS_SELECTOR, '#mat-input-0')
# password = bro.find_element(By.ID, 'mat-input-1')
# submit_button = bro.find_element(By.CSS_SELECTOR,
#                                  'body > app-root > app-sign-in-layout > div > div > app-sign-in > app-content-container > div > div > div > form > div > button > span.mat-button-wrapper')
# username.send_keys('[email protected]')
# password.send_keys('LiuQingzheng12#')
#
# submit_button.click()
#
# # 有可能出现验证码
# input('')  # 手动操作验证码,操作完后,敲回车程序继续执行
# time.sleep(2)  # 登录成功了,有cookie了
# cookies = bro.get_cookies()
# print(cookies)
# with open('cnblogs.json', 'w', encoding='utf-8') as f:
#     json.dump(cookies, f)
#
# time.sleep(2)
# bro.close()


### 再次打开
bro = webdriver.Chrome()
bro.get('https://www.cnblogs.com/')
bro.implicitly_wait(5)
bro.maximize_window()
time.sleep(3)
# 本地的cookie,从cookie池中拿的
with open('./cnblogs.json','r',encoding='utf-8') as f:
    cookies=json.load(f)
for item in cookies:  # 存起来的是列表套字典,add_cookie是add字典
    bro.add_cookie(item)


bro.refresh() # 刷新页面
time.sleep(5)
bro.close()

Drawer semi-automatic praise

import time
import json

import requests
## 使用selenium登录上去,手动处理验证码
from selenium import webdriver
from selenium.webdriver.common.by import By

# bro = webdriver.Chrome()
# bro.implicitly_wait(5)
# bro.get('https://dig.chouti.com/')
#
# btn_login = bro.find_element(By.ID, 'login_btn')
# # btn_login.click()  # 按钮没找到  使用js点击
# bro.execute_script("arguments[0].click()", btn_login)
# time.sleep(2)
#
# phone = bro.find_element(By.NAME, 'phone')
# password = bro.find_element(By.NAME, 'password')
# phone.send_keys('18953675221')
# password.send_keys('lqz123')
# time.sleep(2)
# btn_login1 = bro.find_element(By.CSS_SELECTOR,
#                               'body > div.login-dialog.dialog.animated2.scaleIn > div > div.login-footer > div:nth-child(4) > button')
# btn_login1.click()
# # 可能会出验证码,手动操作
# input('你好了吗')
# time.sleep(2)
# cookies = bro.get_cookies()
# print(cookies)
# with open('chouti.json', 'w', encoding='utf-8') as f:
#     json.dump(cookies, f)
#
# time.sleep(2)
# bro.close()


### 拿10个要点的新闻
header = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
}
# 打开读出cookie
with open('./chouti.json', 'r', encoding='utf-8') as f:
    cookies = json.load(f)
# 把selenium拿到的cookie组装成requests能用的cookie
real_cookie = {}
for item in cookies:
    real_cookie[item['name']] = item['value']

print(real_cookie)

res = requests.get('https://dig.chouti.com/top/24hr?_=1689043464339', headers=header).json()
for item in res.get('data'):
    link_id = item.get('id')
    # 缺cookie,如果有了cookie,可以整个页面全点一遍
    data = {
        'linkId': link_id
    }
    res = requests.post('https://dig.chouti.com/link/vote', headers=header, data=data, cookies=real_cookie)
    print(res.text)

xpath uses

# Locating elements (labels) in the page, two general methods
    - css selector
    - xpath: XPath is the XML Path Language (XML Path Language), which is a language used to determine the location of a certain part of the XML document
    
    
# xpath syntax
    div select div tag
    / select from root
    // selects nodes in the document from the current node matching the selection, regardless of their position
    . selects the current node.
    .. Select the parent node of the current node.
    @ select attribute

    
    

doc = '''
<html>
 <head>
  <base href='http://example.com/' />
  <title>Example website</title>
 </head>
 <body>
  <div id='images'>
   <a href='image1.html' id='lqz'>Name: My image 1 <br /><img src='image1_thumb.jpg' /></a>
   <a href='image2.html'>Name: My image 2 <br /><img src='image2_thumb.jpg' /></a>
   <a href='image3.html'>Name: My image 3 <br /><img src='image3_thumb.jpg' /></a>
   <a href='image4.html'>Name: My image 4 <br /><img src='image4_thumb.jpg' /></a>
   <a href='image5.html' class='li li-item' name='items'>Name: My image 5 <br /><img src='image5_thumb.jpg' /></a>
   <a href='image6.html' name='items'><span><h5>test</h5></span>Name: My image 6 <br /><img src='image6_thumb.jpg' /></a>
  </div>
 </body>
</html>
'''
from lxml import etree

html = etree.HTML(doc)
# html=etree.parse('search.html',etree.HTMLParser())
# 1 所有节点
# a=html.xpath('//*')
# 2 指定节点(结果为列表)
# a=html.xpath('//head')
# 3 子节点,子孙节点
# a=html.xpath('//div/a')
# a=html.xpath('//body/a') #无数据
# a=html.xpath('//body//a')
# 4 父节点
# a=html.xpath('//body//a[@href="image1.html"]/..')
# a=html.xpath('//body//a[1]/..')
# 也可以这样
# a=html.xpath('//body//a[1]/parent::*')
# a=html.xpath('//body//a[1]/parent::div')
# 5 属性匹配
# a=html.xpath('//body//a[@href="image1.html"]')

# 6 文本获取     /text()
# a=html.xpath('//body//a[@href="image1.html"]/text()')

# 7 属性获取     @属性名
# a=html.xpath('//body//a/@href')
# # 注意从1 开始取(不是从0)
# a=html.xpath('//body//a[1]/@href')

# 8 属性多值匹配
#  a 标签有多个class类,直接匹配就不可以了,需要用contains
# a=html.xpath('//body//a[@class="li"]')
# a=html.xpath('//body//a[contains(@class,"li")]')
# a=html.xpath('//body//a[contains(@class,"li")]/text()')
# 9 多属性匹配
# a=html.xpath('//body//a[contains(@class,"li") or @name="items"]')
# a=html.xpath('//body//a[contains(@class,"li") and @name="items"]/text()')
# a=html.xpath('//body//a[contains(@class,"li")]/text()')
# 10 按序选择
# a=html.xpath('//a[2]/text()')
# a=html.xpath('//a[2]/@href')
# 取最后一个
# a=html.xpath('//a[last()]/@href')
# a=html.xpath('//a[last()-1]/@href') # 倒数第二个
# 位置小于3的
# a = html.xpath('//a[position()<3]/@href')

# 倒数第三个
# a=html.xpath('//a[last()-2]/@href')
# 11 节点轴选择
# ancestor:祖先节点
# 使用了* 获取所有祖先节点
# a=html.xpath('//a/ancestor::*')
# # 获取祖先节点中的div
# a=html.xpath('//a/ancestor::div')
# attribute:属性值
# a=html.xpath('//a[1]/attribute::*')
# a=html.xpath('//a[1]/attribute::href')

# child:直接子节点
# a=html.xpath('//a[1]/child::*')
# descendant:所有子孙节点
# a=html.xpath('//a[6]/descendant::*')
# following:当前节点之后所有节点
# a=html.xpath('//a[1]/following::*')
# a=html.xpath('//a[1]/following::*[1]/@href')
# following-sibling:当前节点之后同级节点
# a=html.xpath('//a[1]/following-sibling::*')
# a=html.xpath('//a[1]/following-sibling::a')
# a=html.xpath('//a[1]/following-sibling::*[2]')
# a=html.xpath('//a[1]/following-sibling::*[2]/@href')

# print(a)


'''
/
//
.
..
取文本  /text()
取属性  /@属性名
根据属性过滤  [@属性名=属性值]
class 特殊
[contains(@class,"li")]
'''


# 终极大招

action chain

# 模拟鼠标点住,拖动的效果,实现滑块认证

# 两种形式
	-形式一:
        actions=ActionChains(bro) #拿到动作链对象
        actions.drag_and_drop(sourse,target) #把动作放到动作链中,准备串行执行
        actions.perform()
    -方式二:
    	ActionChains(bro).click_and_hold(sourse).perform()
    	distance=target.location['x']-sourse.location['x']
        track=0
        while track < distance:
            ActionChains(bro).move_by_offset(xoffset=2,yoffset=0).perform()
            track+=2
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.support.wait import WebDriverWait  # 等待页面加载某些元素
import time
from selenium.webdriver.common.by import By

driver = webdriver.Chrome()
driver.get('http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable')
driver.implicitly_wait(3)
driver.maximize_window()

try:
    driver.switch_to.frame('iframeResult')  ##切换到iframeResult
    sourse = driver.find_element(By.ID, 'draggable')
    target = driver.find_element(By.ID, 'droppable')

    # 方式一:基于同一个动作链串行执行
    # actions = ActionChains(driver)  # 拿到动作链对象
    # actions.drag_and_drop(sourse, target)  # 把动作放到动作链中,准备串行执行
    # actions.perform()

    # 方式二:不同的动作链,每次移动的位移都不同
    ActionChains(driver).click_and_hold(sourse).perform()  # 鼠标点中源 标签 不松开
    distance=target.location['x']-sourse.location['x']

    track = 0
    while track < distance:
        ActionChains(driver).move_by_offset(xoffset=2, yoffset=0).perform()
        track += 2
    ActionChains(driver).release().perform()
    time.sleep(10)

finally:
    driver.close()

Automatic login 12306

import time
from selenium.webdriver import ActionChains
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
# 12306检测到咱们用了自动化测试软件,
options = Options()
options.add_argument("--disable-blink-features=AutomationControlled")  # 去掉自动化控制
bro = webdriver.Chrome(chrome_options=options)
bro.get('https://kyfw.12306.cn/otn/resources/login.html')
bro.implicitly_wait(5)
bro.maximize_window()
user_login = bro.find_element(By.CSS_SELECTOR,
                              '#toolbar_Div > div.login-panel > div.login-box > ul > li.login-hd-code.active > a')

user_login.click()
time.sleep(1)

username = bro.find_element(By.ID, 'J-userName')
password = bro.find_element(By.ID, 'J-password')
submit_btn = bro.find_element(By.ID, 'J-login')
username.send_keys('18953675221')
password.send_keys('')
time.sleep(3)
submit_btn.click()

time.sleep(5)

# 找到滑块
span = bro.find_element(By.ID, 'nc_1_n1z')
ActionChains(bro).click_and_hold(span).perform()
ActionChains(bro).move_by_offset(xoffset=300, yoffset=0).perform()
ActionChains(bro).release().perform()
time.sleep(5)

bro.close()

Coding platform

# I will encounter verification codes in the future -
    - simple numbers and letters
    - some advanced ones, calculation questions, idioms
    - select the bus in the picture
    . . .
    
    
# Third-party solution: coding platform --- "You pass the picture of the verification code to it, it will crack it for you, return it to you, and spend money
 

Coding platform automatic login

import time

from selenium import webdriver
from selenium.webdriver.common.by import By
from chaojiying import ChaojiyingClient
from PIL import Image
bro = webdriver.Chrome()
bro.get('http://www.chaojiying.com/apiuser/login/')
bro.implicitly_wait(10)
bro.maximize_window()
try:
    username = bro.find_element(by=By.XPATH, value='/html/body/div[3]/div/div[3]/div[1]/form/p[1]/input')
    password = bro.find_element(by=By.XPATH, value='/html/body/div[3]/div/div[3]/div[1]/form/p[2]/input')
    code = bro.find_element(by=By.XPATH, value='/html/body/div[3]/div/div[3]/div[1]/form/p[3]/input')
    btn = bro.find_element(by=By.XPATH, value='/html/body/div[3]/div/div[3]/div[1]/form/p[4]/input')
    username.send_keys('306334678')
    password.send_keys('lqz123')

    # 获取验证码:
    # 1 整个页面截图
    bro.save_screenshot('main.png')
    # 2 使用pillow,从整个页面中截取出验证码图片 code.png
    img = bro.find_element(By.XPATH, '/html/body/div[3]/div/div[3]/div[1]/form/div/img')
    location = img.location
    size = img.size
    print(location)
    print(size)
    # 使用pillow扣除大图中的验证码
    # 使用pillow扣除大图中的验证码
    img_tu = (int(location['x']), int(location['y']), int(location['x'] + size['width']), int(location['y'] + size['height']))
    # # 抠出验证码
    # #打开
    img = Image.open('./main.png')
    # 抠图
    fram = img.crop(img_tu)
    # 截出来的小图
    fram.save('code.png')


    chaojiying = ChaojiyingClient('3053345678', 'lqz123', '950575')	#用户中心>>软件ID 生成一个替换 96001
    im = open('code.png', 'rb').read()
    print(chaojiying.PostPic(im, 1902))  # 1902 验证码类型  官方网站>>价格体系 3.4+版 print 后要加()
    res_code=chaojiying.PostPic(im, 1902)['pic_str']
    code.send_keys(res_code)
    time.sleep(5)
    btn.click()
    time.sleep(10)
except Exception as e:
    print(e)

finally:
    bro.close()
    
    

Selenium crawls Jingdong product information


import time

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys  # 键盘按键操作
from selenium.webdriver.chrome.options import Options



def get_goods(bro):
    # 往下滑动一下屏幕
    bro.execute_script('scrollTo(0,5000)')
    goods = bro.find_elements(By.CLASS_NAME, 'gl-item')
    print(len(goods))
    for good in goods:
        try:
            price = good.find_element(By.CSS_SELECTOR, 'div.p-price i').text
            url = good.find_element(By.CSS_SELECTOR, 'div.p-img>a').get_attribute('href')
            commit = good.find_element(By.CSS_SELECTOR, 'div.p-commit a').text
            name = good.find_element(By.CSS_SELECTOR, 'div.p-name em').text
            img = good.find_element(By.CSS_SELECTOR, 'div.p-img img').get_attribute('src')
            if not img:
                img = 'https:' + good.find_element(By.CSS_SELECTOR, 'div.p-img img').get_attribute('data-lazy-img')
            print('''
            商品名字:%s
            商品价格:%s
            商品评论:%s
            商品图片:%s
            商品链接:%s
            ''' % (name, price, commit, img, url))
        except Exception as e:
            print(e)
            continue

    # 找出下一页按钮,点击
    next = bro.find_element(By.PARTIAL_LINK_TEXT, '下一页')
    next.click()
    get_goods(bro)  # 递归调用




options = Options()
options.add_argument("--disable-blink-features=AutomationControlled")  # 去掉自动化控制
bro = webdriver.Chrome(chrome_options=options)
bro.get('https://www.jd.com/')
bro.maximize_window()
bro.implicitly_wait(10)
try:
    search_input=bro.find_element(By.ID,'key')
    search_input.send_keys('mac pro')
    search_input.send_keys(Keys.ENTER)
    # search_input.send_keys(Keys.BACKSPACE)
    get_goods(bro)
except Exception as e:
    print(e)
finally:
    bro.close()


Guess you like

Origin blog.csdn.net/qq_52385631/article/details/131660264