【Python3爬虫(七)】【数据解析】【xpath】

上一篇:【Python3爬虫(六)】【数据解析】【正则表达式(二)】

++++++++++开始线++++++++++++++++++

一、 xpath

1.1 思维导图

在这里插入图片描述

1.2 xpath初体验

news_xpath2.py

import re
import requests

# 安装支持解析html和XML的解析库:lxml
# pip install lxml

from lxml import etree


url = 'http://news.baidu.com/'
headers = {
    "User-Agent": 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) '
                  'Chrome/70.0.3538.77 Safari/537.36 '
}
data = requests.get(url, headers=headers).content.decode()


# 1.转解析类型
xpath_data = etree.HTML(data)


# xpath 语法 1. 节点 /
#            2. 跨节点: //
#            3. 精确的标签: //a[@属性="属性值"]
#            4. 标签包裹的内容 text()
#            5. 属性:@href
#              xpath返回的数据类型是list

# 2.调用xpath的方法
# result = xpath_data.xpath('/html/head/title//text()')
# ['百度新闻——海量中文资讯平台']

# result = xpath_data.xpath('//a/text()')

# <a href="https://3w.huanqiu.com/a/9eda3d/403anuA4I4v?agt=8" target="_blank"
# mon="ct=1&amp;a=2&amp;c=top&amp;pn=18">亚美尼亚公布击毁阿塞拜疆坦克视频<span class="related-video-icon"></span></a> result =

# xpath_data.xpath('//a[@mon="ct=1&a=2&c=top&pn=18"]/text()')
# ['亚美尼亚公布击毁阿塞拜疆坦克视频']

result = xpath_data.xpath('//a[@mon="ct=1&a=2&c=top&pn=18"]/@href')
# ['https://3w.huanqiu.com/a/9eda3d/403anuA4I4v?agt=8']

# 打印个数
# print(len(result))

print(result)

# with open('02news.html', 'w') as f:
#     f.write(data)

1.3 xpath再体验

news_xpaht3.py

from lxml import etree

html = """
    <html>
    <body>
    <ul>
     <li>1
         <a href="">子</a>
     </li>
     <li>2
        <a href="">子</a>
     </li>
     <li>3
        <a href="">子</a>
     </li>
     <li>4
         <a href="">子</a>
     </li>
     <li>5
        <a href="">子</a>
     </li>
     
 </ul>
 </body>
 </html>
"""
# 1.转类型
x_data = etree.HTML(html)

# 2.xpath下标是从1开始; 只能取平级关系的标签
# result = x_data.xpath('//li[5]/text()')
# ['5\n        ', '\n     ']

result = x_data.xpath('/html/body/ul/li/a/text()')
# ['子', '子', '子', '子', '子']

print(result)

1.4 解析案例

btc.py

import requests
from lxml import etree
import json


class BtcSpider(object):
    def __init__(self):
        self.base_url = 'http://8btc.com/forum-61-'
        self.headers = {
            "User-Agent": 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) '
                          'Chrome/70.0.3538.77 Safari/537.36 '
        }

        self.data_list = []

    # 1.发请求
    def get_response(self, url):  # url会改变是一个变量,从外部传入
        response = requests.get(url, headers=self.headers)
        # 网页的编码到底是gbk还是urf-8,如何查看
        # head--meta-charset=""
        # 原因是抓取网页的编码就是gbk,所以解码的时候也是要gbk
        # data = response.content.decode('gbk')
        # 以字节的形式下载给data
        data = response.content
        return data

    # 2.解析数据
    def parse_data(self, data):
        # 使用xpath解析当前页面所有的新闻title和url并保存
        # 1.转类型
        x_data = etree.HTML(data)

        # 2.根据xpath路径解析
        # 路径 1. 纯手写  2. 借助浏览器的右击然后粘贴xpath路径; 需要修改
        title_list = x_data.xpath('//a[@class="s xst"]/text()')
        # title_list = x_data.xpath('//form[@id="moderate"]/div/div[2]/div/a[@class="s xst"]/text()')
        url_list = x_data.xpath('//a[@class="s xst"]/@href')

        # 遍历,是两个列表合并为一个列表
        for index, title in enumerate(title_list):
            news = {'name': title, 'url': url_list[index]}
            # print(index)
            # print(title)
            self.data_list.append(news)

    # 3.保存数据
    def save_data(self):

        # 将list转换为str
        data_str = json.dumps(self.data_list)
        with open('05btc.json', 'w') as f:
            f.write(data_str)

    # 4.启动
    def run(self):

        for i in range(1, 5):  # i为第i页
            # 1.拼接 完整url
            url = self.base_url + str(i) + '.html'
            # print(url)
            # 2.发请求
            data = self.get_response(url)

            # 3.做解析
            self.parse_data(data)

        # 4.保存
        self.save_data()


BtcSpider().run()


++++++++++结束线++++++++++++++++++

猜你喜欢

转载自blog.csdn.net/qq_42893334/article/details/108838059