day03 selenium补充及实训(爬京东商品信息)、Beautifulsoup4

'''
一Selenium剩余部分
-点击、清除'
click clear
-Action Chains
是一个动作链对象,需要把driver驱动传给它。
动作链对象可以做一系列设定好的动作
-frame的切换
driver.switch_to.frame
-执行js代码
'''
-点击、清除'
click clear
from selenium import webdriver  # web驱动
from selenium.webdriver.common.by import By  # 按照什么方式查找,By.ID,By.CSS_SELECTOR
from selenium.webdriver.common.keys import Keys  # 键盘按键操作
from selenium.webdriver.support import expected_conditions as EC  # 和下面WebDriverWait一起用的
from selenium.webdriver.support.wait import WebDriverWait  # 等待页面加载某些元素
import time

driver = webdriver.Chrome()

try:
    driver.implicitly_wait(10)
    driver.get('https://www.jd.com/')
    time.sleep(5)

    # 点击、清除
    input = driver.find_element_by_id('key')
    input.send_keys('围城')

    # 通过class查找搜索按钮
    search=driver.find_element_by_class_name('button')
    search.click()#点击搜索按钮
    time.sleep(3)

    input2=driver.find_element_by_id('key')
    input2.clear()#清空输入框

    time.sleep(1)

    input2.send_keys('墨菲定律')
    input2.send_keys(Keys.ENTER)
    time.sleep(10)
finally:
    # 关闭浏览器释放操作系统资源
    driver.close()

-Action Chains
是一个动作链对象,需要把driver驱动传给它。

from selenium import webdriver  # web驱动
from selenium.webdriver import ActionChains
import time

driver = webdriver.Chrome()

try:
    driver.implicitly_wait(10)
    driver.get('https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable')
    time.sleep(5)

    driver.switch_to.frame('iframeResult')
    time.sleep(1)

    #获取动作链对象
    action=ActionChains(driver)


    #起始方块id:draggable
    source = driver.find_element_by_id('draggable')

    #目标方块id:droppable
    target = driver.find_element_by_id('droppable')

    #方式一 瞬移
    #起始方块瞬间移动到目标方块中
    #拟定好一个动作需要调用执行方法
    # action.drag_and_drop(source,target).perform()
    # time.sleep(10)

    # 方式二 一点一点移动
    print(source.size)  #大小
    print(source.tag_name)  #标签名
    print(source.text)  #文本
    print(source.location['x'])  #坐标
    print(target.location['x'])

    # 找到滑动距离
    distance=target.location['x']-source.location['x']

    # 摁住起始滑块
    ActionChains(driver).click_and_hold(source).perform()

    s=0
    while s<distance:
        ActionChains(driver).move_by_offset(xoffset=2,yoffset=0).perform()
        s += 2
        time.sleep(0.1)


   #松开起始滑块
    ActionChains(driver).release().perform()
    time.sleep(10)
finally:
    # 关闭浏览器释放操作系统资源
    driver.close()

执行js代码
from selenium import webdriver  # web驱动
from selenium.webdriver import ActionChains
import time

driver = webdriver.Chrome()

try:
    driver.implicitly_wait(10)
    driver.get('https://www.baidu.com')

    driver.execute_script(
        '''
        alert("cjfhkewhvwkhfkwuhfuwekhewuk")
        '''
    )

    time.sleep(10)

finally:
    # 关闭浏览器释放操作系统资源
    driver.close()
 
from selenium import webdriver  # web驱动
import time

broeser = webdriver.Chrome()
broeser.implicitly_wait(10)
broeser.get('https://www.baidu.com')
broeser.get('https://www.jd.com')
broeser.get('https://www.sina.com')

    #回退
broeser.back()
time.sleep(10)

    #前进
broeser.forward()
time.sleep(3)
broeser.close()

爬京东商品信息

from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By  # 按照什么方式查找,By.ID,By.CSS_SELECTOR
from selenium.webdriver.common.keys import Keys  # 键盘按键操作
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait  # 等待页面加载某些元素
import time


def get_goods(driver):
    try:
        goods = driver.find_elements_by_class_name('gl-item')

        for good in goods:
            detail_url = good.find_element_by_tag_name('a').get_attribute('href')

            p_name = good.find_element_by_css_selector('.p-name em').text.replace('\n', '')
            price = good.find_element_by_css_selector('.p-price i').text
            p_commit = good.find_element_by_css_selector('.p-commit a').text

            msg = '''
            商品 : %s
            链接 : %s
            价钱 :%s
            评论 :%s
            ''' % (p_name, detail_url, price, p_commit)

            print(msg, end='\n\n')

            with open('jd.txt','a',encoding='utf-8') as f:
                f.write(msg)
                print('写入成功')

        button = driver.find_element_by_partial_link_text('下一页')
        button.click()
        time.sleep(1)
        get_goods(driver)
    except Exception:
        pass


def spider(url, keyword):
    driver = webdriver.Chrome()
    driver.get(url)
    driver.implicitly_wait(3)  # 使用隐式等待
    try:
        input_tag = driver.find_element_by_id('key')
        input_tag.send_keys(keyword)
        input_tag.send_keys(Keys.ENTER)
        get_goods(driver)
    finally:
        driver.close()


if __name__ == '__main__':
    spider('https://www.jd.com/', keyword='墨菲定律')
bs4安装与使用
'''
安装解析器:
pip3 install lxml

安装解析库:
pip3 install bs4
'''
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="sister"><b>$37</b></p>
<p class="story" id="p">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" >Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>

<p class="story">...</p>
"""
from bs4 import BeautifulSoup

# python自带的解析库
# soup = BeautifulSoup(html_doc, 'html.parser')

# 调用bs4得到一个soup对象
soup = BeautifulSoup(html_doc, 'lxml')

# bs4对象
print(soup)

# bs4类型
print(type(soup))

# 美化功能
html = soup.prettify()
print(html)

bs4解析库及遍历文档树

html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="sister"><b>$37</b></p>
<p class="story" id="p">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" >Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>

<p class="story">...</p>
"""
from bs4 import BeautifulSoup

soup = BeautifulSoup(html_doc, 'lxml')
# print(soup)
# print(type(soup))
# 遍历文档树
# 1、直接使用  *****
print(soup.html)
print(type(soup.html))
print(soup.a)
print(soup.p)

# 2、获取标签的名称
print(soup.a.name)

# 3、获取标签的属性   *****
print(soup.a.attrs)  # 获取a标签中所有的属性
print(soup.a.attrs['href'])

# 4、获取标签的文本内容  *****
print(soup.p.text)  # $37

# 5、嵌套选择
print(soup.html.body.p)

# 6、子节点、子孙节点
print(soup.p.children)  # 返回迭代器对象
print(list(soup.p.children))  # [<b>$37</b>]

# 7、父节点、祖先节点
print(soup.b.parent)
print(soup.b.parents)
print(list(soup.b.parents))

# 8、兄弟节点  (sibling: 兄弟姐妹)
print(soup.a)
# 获取下一个兄弟节点
print(soup.a.next_sibling)

# 获取下一个的所有兄弟节点,返回的是一个生成器
print(soup.a.next_siblings)
print(list(soup.a.next_siblings))

# 获取上一个兄弟节点
print(soup.a.previous_sibling)
# 获取上一个的所有兄弟节点,返回的是一个生成器
print(list(soup.a.previous_siblings))

bs4之搜索文档树

''''''
'''
find: 找第一个
find_all: 找所有

标签查找与属性查找:
name 属性匹配

name 标签名
attrs 属性查找匹配
text 文本匹配

标签:
- 字符串过滤器
字符串全局匹配

- 正则过滤器
re模块匹配

- 列表过滤器
列表内的数据匹配

- bool过滤器
True匹配

- 方法过滤器
用于一些要的属性以及不需要的属性查找。
属性:
- class_
- id
'''
html_doc = """
<html><head><title>The Dormouse's story</title></head><body><p class="sister"><b>$37</b></p><p class="story" id="p">Once upon a time there were three little sisters; and their names were<a href="http://example.com/elsie" class="sister" >Elsie</a><a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>and they lived at the bottom of a well.</p><p class="story">...</p>
"""
from bs4 import BeautifulSoup

soup = BeautifulSoup(html_doc, 'lxml')

# name 标签名
# attrs 属性查找匹配
# text 文本匹配
# find与find_all搜索文档

'''

字符串过滤器
'''
p = soup.find(name='p')
p_s = soup.find_all(name='p')

print(p)
print(p_s)

# name + attrs
p = soup.find(name='p', attrs={"id": "p"})
print(p)

# name + text
tag = soup.find(name='title', text="The Dormouse's story")
print(tag)

# name + attrs + text
tag = soup.find(name='a', attrs={"class": "sister"}, text="Elsie")
print(tag)

'''
- 正则过滤器
re模块匹配
'''
import re
# name
# 根据re模块匹配带有a的节点
a = soup.find(name=re.compile('a'))
print(a)

a_s = soup.find_all(name=re.compile('a'))
print(a_s)


# attrs
a = soup.find(attrs={"id": re.compile('link')})
print(a)


# - 列表过滤器
# 列表内的数据匹配
print(soup.find(name=['a', 'p', 'html', re.compile('a')]))
print(soup.find_all(name=['a', 'p', 'html', re.compile('a')]))


# - bool过滤器
# True匹配
print(soup.find(name=True, attrs={"id": True}))

# - 方法过滤器
# 用于一些要的属性以及不需要的属性查找。

def have_id_not_class(tag):
    # print(tag.name)
    if tag.name == 'p' and tag.has_attr("id") and not tag.has_attr("class"):
        return tag

# print(soup.find_all(name=函数对象))
print(soup.find_all(name=have_id_not_class))

# 补充知识点:
# id
a = soup.find(id='link2')
print(a)

# class
p = soup.find(class_='sister')
print(p)





猜你喜欢

转载自www.cnblogs.com/cl007/p/11127733.html